From 9365f702c2f5b55ecb0e6bbd931a112957b7dba8 Mon Sep 17 00:00:00 2001 From: Michael Montour Date: Thu, 26 Sep 2024 13:49:52 -0700 Subject: [PATCH 01/13] Squashed commit of the following: commit 3ff1330ae7cbba62c96c11f9f3806dce42d4ed36 Author: Michael Montour Date: Thu Sep 26 13:39:25 2024 -0700 Fix unit tests Changes to be committed: modified: crates/rpc/src/eth/api.rs modified: crates/sim/src/estimation/v0_6.rs modified: crates/sim/src/estimation/v0_7.rs modified: crates/types/src/hybrid_compute.rs commit 2510cde1216498c75934edb8d90957f266225fa4 Author: Michael Montour Date: Thu Sep 26 13:08:35 2024 -0700 Finish merging log msgs etc. Changes to be committed: modified: crates/builder/src/transaction_tracker.rs modified: crates/provider/src/ethers/provider.rs modified: crates/rpc/src/eth/api.rs modified: crates/rpc/src/eth/router.rs modified: crates/sim/src/simulation/v0_6/context.rs modified: hybrid-compute/offchain/userop.py commit 4603b1089a18d31f4527d074f0c6fbcce59df0c1 Author: Michael Montour Date: Wed Sep 25 16:28:12 2024 -0700 More merges. Changes to be committed: modified: ../crates/builder/src/bundle_proposer.rs modified: ../crates/rpc/src/eth/api.rs modified: ../crates/rpc/src/eth/events/common.rs modified: ../crates/sim/src/estimation/estimate_call_gas.rs modified: ../crates/sim/src/estimation/estimate_verification_gas.rs modified: ../crates/sim/src/simulation/v0_6/tracer.rs modified: runit.sh commit 2dba7c3b170d5e249d4509779d6ad3dddcaea782 Author: Michael Montour Date: Tue Sep 24 23:40:04 2024 -0700 WIP - merge Changes to be committed: modified: .github/workflows/docker-images.yaml modified: Cargo.lock modified: Cargo.toml modified: bin/rundler/chain_specs/boba_sepolia.toml modified: bin/rundler/chain_specs/optimism_devnet.toml modified: crates/builder/src/bundle_sender.rs modified: crates/builder/src/task.rs modified: crates/provider/src/ethers/entry_point/v0_6.rs modified: crates/rpc/Cargo.toml modified: crates/sim/src/gas/gas.rs modified: crates/types/contracts/src/v0_6/imports.sol modified: hybrid-compute/offchain/userop.py commit a488d7c12d926b137f1eedc751dcbc20cc3cabfd Author: Michael Montour Date: Tue Sep 24 22:55:41 2024 -0700 CORS support. Changes to be committed: modified: crates/rpc/src/task.rs commit 9963ae009e8e7159411e6884f150480ad50f1679 Author: Michael Montour Date: Tue Sep 24 15:43:37 2024 -0700 Merge .github/.gitignore Changes to be committed: modified: .github/workflows/ci.yaml modified: .github/workflows/deny.yaml new file: .github/workflows/docker-images.yaml modified: .github/workflows/unit.yaml modified: .gitignore commit 24c02ee20b9e34346aa35b0429a0c5ae8e3c8978 Author: Michael Montour Date: Tue Sep 24 15:30:25 2024 -0700 Add HC support to bundle_proposer Changes to be committed: modified: crates/builder/src/bundle_proposer.rs modified: crates/pool/src/mempool/pool.rs modified: crates/rpc/src/eth/api.rs commit 112888d5a3446bd8103d0967e724f733cbe8518f Author: Michael Montour Date: Mon Sep 23 15:35:37 2024 -0700 Update api.rs to new interface. Changes to be committed: modified: crates/provider/src/traits/entry_point.rs modified: crates/rpc/src/eth/api.rs modified: crates/rpc/src/eth/router.rs modified: crates/rpc/src/types/v0_6.rs modified: crates/sim/src/estimation/mod.rs modified: crates/sim/src/estimation/v0_6.rs modified: crates/sim/src/estimation/v0_7.rs commit d0401797e9c1002970e53d9527a196ccde669005 Author: Michael Montour Date: Fri Sep 20 22:30:55 2024 -0700 WIP checkin Changes to be committed: modified: bin/rundler/src/cli/mod.rs modified: crates/provider/src/ethers/entry_point/v0_6.rs modified: crates/provider/src/ethers/entry_point/v0_7.rs modified: crates/provider/src/traits/entry_point.rs modified: crates/provider/src/traits/test_utils.rs modified: crates/rpc/src/eth/api.rs modified: crates/rpc/src/eth/router.rs modified: crates/types/src/user_operation/mod.rs modified: crates/types/src/user_operation/v0_6.rs modified: crates/types/src/user_operation/v0_7.rs modified: hybrid-compute/offchain/userop.py commit a0f2239441ed1a30fc682ddbfa78449131e7302d Author: Michael Montour Date: Thu Sep 19 15:50:42 2024 -0700 Use standard EntryPoint address. Misc updates. Changes to be committed: modified: bin/rundler/src/cli/chain_spec.rs modified: crates/types/build.rs modified: crates/types/contracts/foundry.toml modified: hybrid-compute/deploy-local.py modified: hybrid-compute/local.env commit acdccbf823ba6e79d5746a85c30cdfdd94861425 Author: Michael Montour Date: Thu Sep 19 13:15:04 2024 -0700 Add chain IDs for Boba Sepolia and local devnet. Changes to be committed: new file: bin/rundler/chain_specs/boba_sepolia.toml new file: bin/rundler/chain_specs/optimism_devnet.toml commit 4376ef30d19a81b7a48060eebdd6a89fceff77b7 Author: Michael Montour Date: Thu Sep 19 12:52:15 2024 -0700 Update deployer to work with new contract paths. Changes to be committed: new file: crates/types/contracts/hc_scripts/DeployHybridAccount.sol new file: crates/types/contracts/hc_scripts/ExampleDeploy.s.sol new file: crates/types/contracts/hc_scripts/LocalDeploy.s.sol modified: hybrid-compute/deploy-local.py commit 503da92bc146bbda469c87f5731b2c6971e645b9 Author: Michael Montour Date: Tue Sep 17 22:46:21 2024 -0700 Update submodule. Changes to be committed: modified: .gitmodules commit 48e3e732036434fad2ca24747494cefa4b2ba8ee Author: Michael Montour Date: Tue Sep 17 22:40:32 2024 -0700 Change the runit.sh wrapper Changes to be committed: modified: hybrid-compute/runit.sh commit 3bd1db78bb19ecd4c61bcf89336dd026f54a92d6 Author: Michael Montour Date: Tue Sep 17 22:40:00 2024 -0700 Add the hybrid_compute.rs file, updated for new UserOperations. Changes to be committed: modified: Cargo.lock modified: crates/types/Cargo.toml new file: crates/types/src/hybrid_compute.rs modified: crates/types/src/lib.rs commit 867d9fe74dfc05ea29ab622dec3f58beff3e0517 Author: Michael Montour Date: Tue Sep 17 14:52:01 2024 -0700 Import the hybrid-compute directory. Changes to be committed: new file: hybrid-compute/Dockerfile.offchain-rpc new file: hybrid-compute/README.md new file: hybrid-compute/aa-client.py new file: hybrid-compute/aa_utils/__init__.py new file: hybrid-compute/deploy-local.py new file: hybrid-compute/docker-compose.yml new file: hybrid-compute/local.env new file: hybrid-compute/offchain/add_sub_2/add_sub_2_offchain.py new file: hybrid-compute/offchain/add_sub_2/add_sub_2_test.py new file: hybrid-compute/offchain/add_sub_2/readme.md new file: hybrid-compute/offchain/auction_system/auction_system_offchain.py new file: hybrid-compute/offchain/auction_system/auction_system_test.py new file: hybrid-compute/offchain/check_kyc/check_kyc_offchain.py new file: hybrid-compute/offchain/check_kyc/check_kyc_test.py new file: hybrid-compute/offchain/check_kyc/readme.md new file: hybrid-compute/offchain/get_token_price/get_token_price_offchain.py new file: hybrid-compute/offchain/get_token_price/get_token_price_test.py new file: hybrid-compute/offchain/get_token_price/readme.md new file: hybrid-compute/offchain/offchain.py new file: hybrid-compute/offchain/offchain_utils.py new file: hybrid-compute/offchain/rainfall_insurance/rainfall_insurance_offchain.py new file: hybrid-compute/offchain/rainfall_insurance/rainfall_insurance_test.py new file: hybrid-compute/offchain/rainfall_insurance/readme.md new file: hybrid-compute/offchain/ramble/ramble_offchain.py new file: hybrid-compute/offchain/ramble/ramble_test.py new file: hybrid-compute/offchain/ramble/readme.md new file: hybrid-compute/offchain/readme.md new file: hybrid-compute/offchain/sports_betting/sports_betting_offchain.py new file: hybrid-compute/offchain/sports_betting/sports_betting_test.py new file: hybrid-compute/offchain/userop.py new file: hybrid-compute/offchain/userop_utils.py new file: hybrid-compute/offchain/verify_captcha/__init__.py new file: hybrid-compute/offchain/verify_captcha/captcha_offchain.py new file: hybrid-compute/offchain/verify_captcha/captcha_test.py new file: hybrid-compute/offchain/verify_captcha/readme.md new file: hybrid-compute/runit.sh commit a88a4c77160d098dc1ea7ec665e9998c9304fb4c Author: dancoombs Date: Mon Jul 1 16:36:00 2024 -0500 feat: add support for avax chains - Add gas overheads to chain spec (#638) - Allow for retries in chain sync - Add avax and avas fuji chain specs commit 459dab6fab992433f931b9f0779b4835eb6695d2 Author: dancoombs Date: Tue Jul 2 14:19:45 2024 -0500 feat(sim): ban access to arbitrum stylus contracts during sim commit 111868391c95c05e74edece693964161dc282ed7 Author: dancoombs Date: Tue Jul 2 16:50:09 2024 -0500 fix(builder): improve replacement underpriced handling commit f7aa69448f36154537a7c34647759ff191d5afdb Author: dancoombs Date: Tue Jul 2 11:31:58 2024 -0500 fix(cli): error if recursive base config, remove goerli netowrks commit 307e8c2cbeab80518a56a2f309e994de3b1631f0 Author: JP <36560907+0xfourzerofour@users.noreply.github.com> Date: Fri Jun 28 14:56:29 2024 -0400 feat(tx): change transaction size limit (#749) commit 214e550b95fa7e82b83033a62959466a244c871c Author: dancoombs Date: Fri Jun 28 10:53:40 2024 -0500 fix: various v0.3-rc fixes commit b97292a6004460eeb3eb724f241c41edbe01de9b Author: dancoombs Date: Tue Jun 25 14:05:41 2024 -0500 chore: update to rust v1.79.0 commit 65ee1e0d02d64246500fd962bb358f917b29b50c Author: dancoombs Date: Thu Jun 27 08:02:14 2024 -0500 chore(amoy): raise amoy chain config min fee to 30gwei commit 9a84f6e2b2bec12d802c2f752bf34e7273dbc8f1 Author: dancoombs Date: Wed Jun 26 16:16:51 2024 -0500 fix(pool): fix crash when block height goes backwards commit 3f364569c30ee1a57ab628370be17681125e2307 Author: dancoombs Date: Tue Jun 25 14:05:41 2024 -0500 chore: fix docs and set v0.3 version number commit 7611b13e1495cffd83414059d130fd763a87d6e1 Author: dancoombs Date: Tue Jun 25 13:20:38 2024 -0500 fix(rpc): fix error message for unstaked entity mempool count commit 2706f46fa0295c1cc377f9c0bccadbc4ad949c65 Author: dancoombs Date: Mon Jun 24 12:47:19 2024 -0500 fix(builder): fix builder state machine replacement tracking commit 85f1897c117712b2a9a77272a1b95ec9f1142b6e Author: dancoombs Date: Mon Jun 24 14:54:28 2024 -0500 feat(sim): check total gas limit after estimation commit 1dca183002b89c12f61750018ed77719451fcffb Author: dancoombs Date: Tue Jun 18 22:03:00 2024 -0500 chore(builder): add unit tests to bundle_sender commit f3a9c9849fe0cb69040eec07e14195518196742b Author: dancoombs Date: Fri Jun 14 10:40:34 2024 -0500 feat(builder): allow for multiple private keys commit f8c59e75450f6d3ed33a1d93781599f7e1b3e11f Author: dancoombs Date: Thu Jun 13 17:15:09 2024 -0500 feat(builder): reject ops if condition not met after failure commit 91ccf11f6e1f2e36c9d79b0cc383855f38f93240 Author: dancoombs Date: Wed Jun 12 16:55:55 2024 -0500 fix(builder): rework raw sender to support dropped/conditional/split-rpcs correctly commit 064d38c80b1c72aaaf70c216c3e95cb0b020bc88 Author: dancoombs Date: Tue Jun 11 14:59:56 2024 -0500 refactor(builder): large refactor of bundle sender state machine and metrics commit 11c47b280d23fbe9c887be93376a4ccf72199a31 Author: dancoombs Date: Tue Jun 11 11:56:55 2024 -0500 feat(builder): cancel transaction when replacement underpriced commit a72cdf3aeb214adcbd0f1bc01705c105a3f5fa6a Author: dancoombs Date: Wed Jun 5 15:41:38 2024 -0500 feat(builder): remove polling from transaction tracker, update state machine commit 39edc28c16ae7de57ac114d59f28164628a9d7a3 Author: dancoombs Date: Fri Jun 14 12:23:53 2024 -0500 feat(pool): allow staked senders to have multiple UOs in best_operations commit e41f1e55954dfdd6082b837b29dab892101a1785 Author: dancoombs Date: Tue Jun 18 16:02:51 2024 -0500 feat(pool): add time to mine tracking and metrics commit 80129e637c843c83d2b30370688fd50e617837be Author: dancoombs Date: Fri Jun 14 16:18:06 2024 -0500 fix(pool): fix race condition in paymaster tracking commit 416f2636ebe123d587526d3ba2a4c221d27cb5af Author: JP <36560907+0xfourzerofour@users.noreply.github.com> Date: Tue Jun 18 18:49:04 2024 -0400 fix(tracer): set tracer timeout default back to 10s (#733) commit 0d1683f6a839989010f567977025c4527feb6db9 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue Jun 18 20:17:59 2024 +0000 chore(deps): bump braces from 3.0.2 to 3.0.3 in /crates/sim/tracer Bumps [braces](https://github.com/micromatch/braces) from 3.0.2 to 3.0.3. - [Changelog](https://github.com/micromatch/braces/blob/master/CHANGELOG.md) - [Commits](https://github.com/micromatch/braces/compare/3.0.2...3.0.3) --- updated-dependencies: - dependency-name: braces dependency-type: indirect ... Signed-off-by: dependabot[bot] commit 23da1225ed87bb2677abc7a575cfa75c54f280f7 Author: JP <36560907+0xfourzerofour@users.noreply.github.com> Date: Tue Jun 18 16:17:14 2024 -0400 feat(tracer): add timeout of 10s to tracer (#730) commit 98f8e2ccd1da6513822e6f76097c3a6624cc6129 Author: JP <36560907+0xfourzerofour@users.noreply.github.com> Date: Fri Jun 14 14:38:20 2024 -0400 feat(ci): check if output file exists (#727) commit 6c96ed6fd7179df2cad0f526fc8ffd46a73b886b Author: dancoombs Date: Fri Jun 7 16:42:48 2024 -0500 chore: release v0.2.2 commit 1e8f5e206a73600935fcee062c7390eacf665251 Author: dancoombs Date: Fri Jun 7 16:19:36 2024 -0500 fix(cli): fix ep disable flags commit 0074fe020b1946f30d0e71da89776d691a44ba24 Author: dancoombs Date: Fri Jun 7 09:19:00 2024 -0500 chore: release 0.2.1, small doc fix commit 7990f85f4dd0f6ac22a9a76240f2eebaf023cc8e Author: dancoombs Date: Mon Jun 3 21:30:00 2024 -0500 fix(sim): handle v0.7 executeUserOp special call in call gas estimation commit 07f66997e164bf7bff7f43929404f1ab33811b2d Author: dancoombs Date: Mon Jun 3 14:06:10 2024 -0500 fix(cli): add missing global to many common parameters commit 3b4a4aa444762199fa245c6a0193ab3a41bc362c Author: dancoombs Date: Fri May 24 09:38:59 2024 -0500 chore: release v0.2.0 commit 78f15c962576cdb23948b658859ba696e1d88a8e Author: JP <36560907+0xfourzerofour@users.noreply.github.com> Date: Fri May 24 11:06:53 2024 -0400 feat(simulation): check whether the op fits within a valid time range (#712) commit 332d24102c0adc4c39cf59b820653fead05c8096 Author: dancoombs Date: Thu May 23 07:58:59 2024 -0500 fix(sim): update simulation for dencun opcodes commit 02af86989ec90045bbee7e2890062128665adf3b Author: dancoombs Date: Thu May 16 13:51:07 2024 -0400 fix(builder): filter fee before filtering by gas limit commit 7cc3f5f3a7430d36184f17cbf1c989de09cb8d95 Author: dancoombs Date: Fri May 10 14:40:13 2024 -0500 chore(docs): fix dead ERC-4337 links commit c1dc6a91015deea5fa018c08956ecfa673902da1 Author: dancoombs Date: Fri May 10 14:14:48 2024 -0500 fix(provider): use state overrides for gas used, not ctor method commit 58f31f425dade4af4e59cdfc0da9e42ea795999e Author: dancoombs Date: Thu May 2 15:43:30 2024 -0500 fix(chain): set maximum transaction size bytes to 128KB for ethereum chains commit 574038aa7313b322c62d3089cf524c8520ae66aa Author: dancoombs Date: Thu May 2 12:16:25 2024 -0500 chore(docs): add docs for entry point version support commit 5cdcc4686e428909ce788eae7fc055fecfe1e988 Author: dancoombs Date: Thu May 2 09:17:41 2024 -0500 fix(types): define entrypoint specific gas overheads commit d1a33937433587c232727f1facc4c66200690a66 Author: dancoombs Date: Tue Apr 30 12:36:34 2024 -0500 fix(types): fix unpack factory from initcode on v0.7 commit edab84ce8762eeec65a69142dd729bc77acadeb1 Author: dancoombs Date: Fri Apr 26 11:28:30 2024 -0500 feat(builder): support multiple flashbots builders commit fb32d1705bc9d750bd01301714b6c14eef556a1a Author: Alex Miao Date: Tue Feb 20 11:05:51 2024 -0800 fix: add flashbots signature header commit a32b6f75f9a9f327232dec63a71121839bdb02e0 Author: Alex Miao Date: Wed Feb 14 16:57:23 2024 -0800 refactor: use flashbots relay rpc commit 50d4b1679431d081e902edf69de16fb51b244764 Author: Alex Miao Date: Thu Feb 1 18:37:17 2024 -0800 feat(builder): add option to pass in additional builders to flashbots mev-share rpc commit c0d5235b2b8d4e7a407702ad5661622e41584a38 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed Apr 24 18:29:04 2024 +0000 chore(deps): bump rustls from 0.22.2 to 0.22.4 Bumps [rustls](https://github.com/rustls/rustls) from 0.22.2 to 0.22.4. - [Release notes](https://github.com/rustls/rustls/releases) - [Changelog](https://github.com/rustls/rustls/blob/main/CHANGELOG.md) - [Commits](https://github.com/rustls/rustls/compare/v/0.22.2...v/0.22.4) --- updated-dependencies: - dependency-name: rustls dependency-type: direct:production ... Signed-off-by: dependabot[bot] commit 2cba170c94df87c4499e9bf5465adf2ce9235045 Author: 0xfourzerofour Date: Wed Apr 24 11:44:15 2024 -0400 feat(entrypoint): use checksummed address with correct casing commit 2231bacda7842dc728d5f6df45ea9a43d40a1f65 Author: dancoombs Date: Wed Apr 24 09:33:31 2024 -0500 fix(sim): handle explicit zero fees in 0.7 pre-verification gas commit a42c74839de321e1b44ba4821425b5a29f213de5 Author: David Philipson Date: Tue Apr 23 22:05:11 2024 -0700 fix(sim): handle explicit zero fees in 0.6 pre-verification gas Treat explicitly passed zero values the same as absent values for gas fees in `preVerificationGas` estimation in v0.6, as is consistent with estimating gas limits. commit 9ba06103b7fe7c2aed5a80fee5e959b6e7117d65 Author: dancoombs Date: Wed Apr 24 09:30:37 2024 -0500 chore(sim): move bytecode to contracts crate, use hex constant commit 75180322e7a6dd90a20fc942549220cf48d78c3a Author: David Philipson Date: Tue Apr 23 16:31:28 2024 -0700 fix(simulation): don't use abi-generated EntryPoint bytecode This reverts commit 275414d9268997fa9519cdf77d56f88a202e523d. We need the v0.6 entry point bytecode so we can use it in state overrides during call gas estimation. In a previous commit, we started using bytecode generated by abigen to avoid hardcoding the entry point bytecode or fetching it from on-chain on every run. For unknown reasons, the abigen bytecode is faulty and causes reverts with no data, so switch back to using hardcoded bytecode. commit 8eb4cb4c93ffb513a45522ceaa999fda108fff6f Author: dancoombs Date: Tue Apr 23 10:12:52 2024 -0500 fix(cli): enforce num keys greater than or equal to num builders commit f2f3dd87f00e426943db4a6a4bb272236acc0ff5 Author: dancoombs Date: Thu Apr 18 20:32:47 2024 -0500 fix(rpc): make the validationRevert error message more clear commit 4ef403c0ebfc15284a12e073b0c2de9ec5fc0dce Author: dancoombs Date: Thu Apr 18 15:23:08 2024 -0500 feat(rpc): return internal errors and increment metric on panic commit 49a9ccb59bb2dc7c92e82ed8a8c891bcb30c09e9 Author: dancoombs Date: Thu Apr 18 08:51:39 2024 -0500 feat(rpc): add distinct types for EP version gas estimations commit b266807479390e5045672fb5a9849dec6d3b4034 Author: dancoombs Date: Tue Apr 16 20:10:05 2024 -0500 feat(sim): skip estimation if gas field is provided in op commit 71242888ff2472c65cb90087c4f71d523d51faae Author: David Philipson Date: Tue Apr 16 15:07:07 2024 -0700 chore(docs): Update commands in makefile, readme, and help messages commit 26a8fa2f681dd1026e4c0c0267ffdc1197b19fa9 Author: dancoombs Date: Tue Apr 16 18:44:45 2024 -0500 fix(rpc): include inner error message in revert message commit 32557bd0124e1310a73059cc5079fbb5f183ea3c Author: dancoombs Date: Tue Apr 16 17:35:42 2024 -0500 fix: add abi_encoded_size back to UserOperation commit e70538a1eb20f522ac3628e98f39fc01bd920b4a Author: David Philipson Date: Mon Apr 15 17:27:57 2024 -0700 refactor(simulation): use abi-generated EntryPoint bytecode Rather than hardcoding v0.6 entry point bytecode in two places, one with the init code and one with the deployed code, use abigen to generate Rust code for the v0.6 `EntryPoint.sol` and use the bytecode that was generated. This makes v0.6 `IEntryPoint.sol` unnecessary, to remove it and convert all occurrences of `v0_6::i_entry_point::IEntryPoint` to `v0_6::entry_point::EntryPoint`. commit 3b6bf73a8490078f33d133f1e3df2ee2eb315cf1 Author: David Philipson Date: Wed Apr 3 00:19:11 2024 -0700 feat(simulation): v0.7 call gas estimation Factors out the call gas estimation logic, which performs a binary search using the `target` and `targetData` arguments to `simulateHandleOp` along with some state override trickery, into a new trait, `CallGasEstimator`, then provides implementations for v0.6 and v0.7. commit 7f4804ecc941ad5447fd8223f7a0194b2d928e51 Author: David Philipson Date: Fri Apr 12 00:11:34 2024 -0700 feat(rpc): return validation revert data from rpc When validation fails in 0.7 in either simulation or gas estimation, the entry point now surfaces the inner revert which caused validation to fail. Surface this in the error response in the rpc, using the `data` field to return an object with the fields: - `entry_point_reason`: the entry point string like "AA99 some failure". This is the only field populated in v0.6. - `inner_revert_bytes`: the raw bytes of the inner revert message. - `inner_revert_reason`: if the inner revert is caused by a Solidity `require` or `assert`, then the string message provided to it. commit 9e88be2599e1204c426dd05f9266afd64a504c41 Author: David Philipson Date: Fri Apr 12 00:37:27 2024 -0700 chore(simulation): remove postop gas estimates commit b10a43926839941363bc6d3ceb01a1afecbf80b0 Author: David Philipson Date: Sun Mar 31 19:21:54 2024 -0700 feat(simulation): v0.7 verification gas estimation Factors out the verification gas estimation logic, which performs a binary search by making many `eth_call`s in a row, into a new trait, `VerificationGasEstimator`, then provide implementations for v0.6 and v0.7. Some notable fixes along the way: * Change the Rundler settings options around gas limits. There are now separate gas limits for all four things requiring gas in v0.7. * Fixed some logic around v0.7's `simulateHandleOp`, which, unlike its counterpart in v0.6, returns its results normally rather than by reverting. * Add some more functionality in and around `UserOperationBuilder` to facilitate updating fields in an existing `UserOperation`. commit e311a8cf34ed9718e012a47fd1925ac96d6c9d1a Author: dancoombs Date: Fri Apr 12 06:37:34 2024 -0500 chore(ci): fix remote tests for v0.6 and v0.7 commit 14b818ed383ebbed8137d6ee4623018350a2ec72 Author: dancoombs Date: Fri Apr 12 11:12:53 2024 -0500 fix(sim): use same method for expected storage in v0.7 from v0.6 tracer commit 2e4188637d3eda68296b16f40a17b723b2093518 Author: dancoombs Date: Thu Apr 11 20:33:23 2024 -0500 feat(sim): sto-022 factory or entity needs stake, with cleanups commit 50d632667750ea4bf267142eefe057646a0f6d90 Author: dancoombs Date: Thu Apr 11 18:56:35 2024 -0500 feat: add entry point to some logs and metrics commit 0de2be4913a8837affd92dc33d90156cc6a6fac0 Author: dancoombs Date: Wed Apr 10 19:32:46 2024 -0500 fix(sim): use H256 instead of U256 for expected storage commit 2b82bb5045cb76bab4f3f4dee1459af99c76219e Author: dancoombs Date: Thu Apr 4 16:13:57 2024 -0400 feat(sim): correctly attribute OP-054 and OP-061 to the correct phase commit 531b6b6e74e7a43ebfb92b40007c3af3ce4ca957 Author: dancoombs Date: Thu Apr 4 14:35:24 2024 -0400 feat(sim): cleanups for v0.7 sim commit 6e91e82d6dd174d70bc3af26cc87bbe8a10ff1e1 Author: dancoombs Date: Wed Apr 3 14:22:04 2024 -0500 feat(sim): implement simulation for v0.7 entrypoint using reference tracer commit f99c68c2582e1c06e3abe115d6119eb1085f3942 Author: dancoombs Date: Tue Apr 2 09:04:15 2024 -0500 refactor: create context interface for simulation commit 97d257558ad7500d07bc983d9b5c923e23991a10 Author: David Philipson Date: Mon Apr 1 17:57:59 2024 -0700 fix(mempool): fix configuration deserialization Correctly deserialize the `entryPoint` field, and display a more descriptive error when deserialization fails. commit 0b65b981dfc61093d13165a6718321b0e9a5daed Author: dancoombs Date: Sun Mar 31 20:31:23 2024 -0500 feat: cleanups for entry point v0.7 support commit b7753661624ec03850496858dab138a947238678 Author: dancoombs Date: Sun Mar 31 09:36:25 2024 -0500 chore: fixing PR comments commit 89f527bcb4daf70a4a954b4889f624be6a2f8d55 Author: dancoombs Date: Sat Mar 30 13:26:17 2024 -0500 chore: fixing PR comments commit 3e2b0eb356a58d143bedaef0267dfaf7796f8c62 Author: dancoombs Date: Sat Mar 30 13:18:03 2024 -0500 fix: remove extra box from dyn objects commit 03790598cb3c61da4d792f86e97df1a94d57bb42 Author: dancoombs Date: Sat Mar 30 12:07:28 2024 -0500 chore: fixing PR comments commit 0983c536141a9a7357e11ffec8381a7fdd8fbc9c Author: Dan Coombs Date: Fri Mar 29 16:55:23 2024 -0500 feat: end to end entry point routing (#649) commit d49e9ade706456ac463c3007f6964d20627f1bbe Author: Dan Coombs Date: Fri Mar 29 16:48:46 2024 -0500 feat: unsafe mode for EP v0.6 (#648) commit 2353c281950190a953f379ad53a5f27073b3ccf1 Author: dancoombs Date: Mon Mar 25 16:20:10 2024 -0400 feat(provider): add an entry point v0.7 provider commit b3b16df65daf84070476b6338e25d6478a503f6d Author: Dan Coombs Date: Fri Mar 29 16:49:26 2024 -0500 feat(builder): start support for multiple entry points in builder (#645) commit 25d9bdf8ba1c95aedb8736fe92b8041406779856 Author: dancoombs Date: Thu Mar 21 20:21:36 2024 -0400 feat(pool): Add entry point routing to pool commit ff93640c5083d6b9c4039cafd785167f8e2e7a3c Author: dancoombs Date: Thu Mar 14 18:48:14 2024 -0400 refactor: move builder and pool traits to types commit 7496d732dcd06def7f55e58df24fd041206e51a1 Author: dancoombs Date: Thu Mar 14 13:53:42 2024 -0400 feat(rpc): introduce entry point routing commit 552ee6c9e94245af48c0d0d9aac42a2267f93868 Author: dancoombs Date: Tue Mar 12 06:59:12 2024 -0400 feat: add and use a user operation trait commit 532739f4417ab636eb271803e9708e5e566c31c1 Author: dancoombs Date: Tue Mar 5 17:01:43 2024 -0500 feat(types): add entrypoint 0.7 contract types commit de9e409edff3edf0bec6aa685363356b1bd32ff1 Author: dancoombs Date: Tue Mar 12 06:59:12 2024 -0400 feat: add and use a user operation trait commit 9ecced13a1713a3e36ff1e04e09e38666ad622a0 Author: dancoombs Date: Tue Mar 5 17:01:43 2024 -0500 feat(types): add entrypoint 0.7 contract types commit 979f15b7ba7fe6ad250c11c1539fa6027b9e178d Author: dancoombs Date: Fri Apr 19 12:29:34 2024 -0500 chore: start v0.2.0-rc0 commit b3c71ffea3713fb917ccd65bb4d46b0eb8fd5dc5 Author: dancoombs Date: Fri Apr 19 12:12:05 2024 -0500 chore: fix v0.1.0 version commit b12f683d5565a49a56feea0756058bc82ef52c4d Author: dancoombs Date: Fri Apr 19 07:37:00 2024 -0500 fix(builder): add builder index to tracker metrics commit 29409d75241d553863196758cf26a7331d555f10 Author: dancoombs Date: Thu Apr 18 11:17:39 2024 -0500 fix(bin): update metrics-process version to fix metrics commit cfff5a378ccc63cc77067320f83d652bb610ca19 Author: dancoombs Date: Thu Apr 18 09:25:40 2024 -0500 fix(builder): add missing builder index to metrics commit 68e6eaba172f2a274137dda0262290105740c5b7 Author: JP <36560907+0xfourzerofour@users.noreply.github.com> Date: Tue Apr 16 15:37:24 2024 -0400 feat(proposer): add field in chainspec to limit size of bundle in bytes (#681) commit 45f39d8e28a89466488b10384359351e8bc2ab15 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri Apr 5 16:15:51 2024 +0000 chore(deps): bump h2 from 0.3.24 to 0.3.26 Bumps [h2](https://github.com/hyperium/h2) from 0.3.24 to 0.3.26. - [Release notes](https://github.com/hyperium/h2/releases) - [Changelog](https://github.com/hyperium/h2/blob/v0.3.26/CHANGELOG.md) - [Commits](https://github.com/hyperium/h2/compare/v0.3.24...v0.3.26) --- updated-dependencies: - dependency-name: h2 dependency-type: indirect ... Signed-off-by: dependabot[bot] commit 4042160b046698b8032f8e98f6f0e1875fee2124 Author: clonefetch Date: Tue Apr 9 10:48:32 2024 +0800 chore: remove repetitive words Signed-off-by: clonefetch commit fc79aa49a3b7cd1bf5ecde296aad4d2664181a74 Author: dancoombs Date: Thu Apr 11 16:47:06 2024 -0500 fix(estimation): handle zero fee fields in pvg estimation commit 0054d2b5029515e2f1a2a252bf716c8a0d2f6752 Author: Dan Coombs Date: Thu Mar 28 13:32:49 2024 -0500 fix(sim): fix sto-022 (#655) commit dc8f6e2a912f59c0941742389310b81c0d171f38 Author: Dan Coombs Date: Thu Mar 28 07:24:05 2024 -0500 feat(rpc): use zero fees in no paymaster estimation case (#651) commit 4e69bd3421f1d190220759e5fd34771456013d9c Author: JP <36560907+0xfourzerofour@users.noreply.github.com> Date: Wed Mar 27 18:42:37 2024 -0400 fix(sim): update storage access rules to return correct entity (#647) commit 3d9ac77eaf36ea1dcd90283685e6cc7a4ec9e257 Author: JP <36560907+0xfourzerofour@users.noreply.github.com> Date: Wed Mar 27 15:55:20 2024 -0400 feat(spec): add compliance back (#631) commit adebb49be112adb34b3d5562793397706bccc2e8 Author: Dan Coombs Date: Mon Mar 25 13:51:54 2024 -0400 fix(sim): account for native transfer overhead in estimation (#643) commit e7cbc3c3a3ac89c8a221cb2fb7843c47f5de1993 Author: Leo Antelyes Date: Mon Mar 25 22:56:15 2024 +0900 fix(docs): add missing command to full node docker-compose.yaml example (#632) commit 7dd8f3676bae1e610b3d494cfa48916f2990517b Author: Dan Coombs Date: Wed Mar 20 19:42:10 2024 -0400 fix: fix the error code for execution failed in RPC metrics (#642) commit 8bb5886f4ea41271885dde636539c626ac97d863 Author: Dan Coombs Date: Wed Mar 20 16:15:30 2024 -0400 fix(sim): handle aa51 as OOG in estimation (#641) commit 13fd5fd4621c224c905e3966a990dd984351bdbd Author: Dan Coombs Date: Wed Mar 13 12:33:47 2024 -0400 feat(sim): add support for op-080 (#637) commit 69ccf3c80f7386a298d467d97862c7eb38a9d347 Author: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon Mar 4 18:38:36 2024 -0500 chore(deps): bump mio from 0.8.10 to 0.8.11 (#627) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> commit 725b11562eef64da3359b5a8807327e3ed9c4a3d Author: Dan Coombs Date: Mon Mar 4 18:18:16 2024 -0500 chore(ci): fix remote spec tests (#629) commit 555d230b0ae010990bb86c1331ffd2d1d780651a Author: Dan Coombs Date: Mon Mar 4 17:45:45 2024 -0500 fix(rpc): add a buffer to call gas limit estimation (#628) commit 8702543af007d6504bf7c41bd20208bdf5bd952e Author: JP <36560907+0xfourzerofour@users.noreply.github.com> Date: Mon Mar 4 17:30:26 2024 -0500 feat(release): update version (#622) commit fc82b4d368b43f6e62bb95feb0b77bf030f01e5b Author: Dan Coombs Date: Fri Feb 23 11:18:22 2024 -0500 chore: change version to 0.2.0-alpha.0 (#621) commit 899794ecada8890267e4b2bfb7696736e74815ad Author: JP <36560907+0xfourzerofour@users.noreply.github.com> Date: Fri Feb 23 11:07:29 2024 -0500 feat(cross-arch): add cross arch release pipeline (#487) commit a5cde2d80a2c61a28db2ac011c1e73d9f071d52b Author: JP <36560907+0xfourzerofour@users.noreply.github.com> Date: Thu Feb 22 21:46:03 2024 -0500 feat(metrics): add internal rpc metrics (#618) commit 475b47d05c77e288ddd58051df06f8d73a73e511 Author: Dan Coombs Date: Thu Feb 22 19:07:08 2024 -0500 chore(docs): add docs for debug_bundler_getStakeStatus (#616) commit ded162c8685a842c29817953379f00716fd391b8 Author: Dan Coombs Date: Thu Feb 22 19:02:54 2024 -0500 chore: update deps (#615) commit 1b6cf2436842960591fa67ec03ca8341b1cd84d0 Author: Dan Coombs Date: Thu Feb 22 18:52:31 2024 -0500 feat(pool): add paymaster lru cache (#614) commit da6cc016e20ee636fcdac828f601d8b805c1893d Author: Dan Coombs Date: Thu Feb 22 18:47:43 2024 -0500 refactor: modify how balances are batch read (#613) commit ffec1410457580b77a17cf273cc7cf4f24ab52e7 Author: Dan Coombs Date: Thu Feb 22 16:34:29 2024 -0500 fix(pool): do chain updates async (#619) commit 44fae8a9c8d1910f200329b00bc6cc65262a6f56 Author: Dan Coombs Date: Wed Feb 21 15:08:34 2024 -0500 feat: add drop local user operation endpoint (#610) commit 20b725fea994490a1c3d716c3c51137b8e57ff54 Author: JP <36560907+0xfourzerofour@users.noreply.github.com> Date: Wed Feb 21 11:37:55 2024 -0500 feat(paymaster): refactor paymaster and reputation structure (#577) commit 21797d59513d532928ef2b221b8fd1539d87639b Author: Dan Coombs Date: Wed Feb 21 10:32:27 2024 -0500 feat: add debug rpc method to dump paymaster balances from pool (#609) commit 06b76f2dba5a6e51d10b89e91f8b4c7497842d58 Author: Dan Coombs Date: Tue Feb 20 19:36:09 2024 -0500 chore(bin): remove base goerli chain spec (#612) commit abe73edd567782c4f0f0a00a88fe9eb7bf06255a Author: Dan Coombs Date: Tue Feb 20 19:16:10 2024 -0500 fix(builder): move timer interval creation outside of loop (#611) commit 402528fde1cb76e1ec0683867b7229b3c8a88d1e Author: Dan Coombs Date: Tue Feb 20 15:46:20 2024 -0500 feat(builder): add a timer based trigger to bundle building (#607) commit 88bdb7317d54b2ade99700af32fbe95e66ef7f9d Author: Dan Coombs Date: Fri Feb 16 17:11:29 2024 -0800 chore(docs): add chain spec docs (#606) commit 4cb00d6ce3d6c6e2838118f718b46b2a6ba334eb Author: Dan Coombs Date: Thu Feb 15 15:17:46 2024 -0800 fix(rpc): enforce min in priority fee estimation (#605) commit 8c251fd18f6f4171ad80597ea4add5e394c51cbb Author: Dan Coombs Date: Thu Feb 15 11:36:02 2024 -0800 feat: add chain spec, use it for gas, add configs (#549) commit 7e51896db9bd48c43afa7171cba738b65cb196ad Author: Dan Coombs Date: Thu Feb 15 08:34:30 2024 -0800 feat(sim): reject UOs that don't have a buffer on verification gas limit (#599) commit 30674eef8f191ecb3dd9e4369a19b7cf713f8154 Author: Dan Coombs Date: Wed Feb 14 18:40:55 2024 -0800 chore: update spec tests after rep fix (#602) commit 69854a73df8e2b22d1e506e2a4889de94cc35321 Author: Dan Coombs Date: Wed Feb 14 18:38:04 2024 -0800 fix(pool): fix remote pool error conversion (#601) commit ff6889c52fbde927853cab9787c4ca3dd014a9d7 Author: Dan Coombs Date: Wed Feb 14 17:52:06 2024 -0800 fix(pool): fix proto reputation status conversion (#600) commit bb6e28adaf4bda904d28cbc298cce0250cd9ab76 Author: Dan Coombs Date: Wed Feb 14 10:51:03 2024 -0800 chore(spec): update spec tests to v0.6 (#594) commit e73e2e5a373bd7282d4708f22d42997d9468e2e6 Author: Dan Coombs Date: Tue Feb 13 15:52:42 2024 -0800 fix(rpc): handle double postop revert data parse (#592) commit cda0c81c8457ae495d619ca29f8d2dc04dc3a242 Author: Dan Coombs Date: Fri Feb 9 14:56:12 2024 -0500 feat(builder): only require bundle fees when replacing (#590) commit 0b2aa762432d35236839df6d6a50a7b26cec16a7 Author: JP <36560907+0xfourzerofour@users.noreply.github.com> Date: Tue Feb 6 13:14:53 2024 -0500 feat(tracer): add RIP-7212 precompile (#586) commit bdf15108afafbc4f44c7b774a47b276b20e682e8 Author: JP <36560907+0xfourzerofour@users.noreply.github.com> Date: Tue Feb 6 09:33:38 2024 -0500 feat(chains): add polygon amoy and use alloy-rs (#583) commit a670f081593d8362bf74f94bef36cf0ca9b3513c Author: JP <36560907+0xfourzerofour@users.noreply.github.com> Date: Fri Feb 2 16:27:14 2024 -0500 feat(paymaster): add paymaster withdrawal logic to tracker (#572) commit 96bd95bf065eec05e9e8ef636e699dbd39b80cdb Author: JP <36560907+0xfourzerofour@users.noreply.github.com> Date: Fri Feb 2 13:20:27 2024 -0500 feat(admin): create admin namespace and add paymaster clear / toggle for reputation (#570) Co-authored-by: Alex Miao --- .github/scripts/compliance_parser.sh | 23 + .github/workflows/compliance.yaml | 37 + .github/workflows/docker-images.yaml | 85 + .github/workflows/docker-release.yaml | 62 + .github/workflows/release.yaml | 219 ++ .gitignore | 10 +- .gitmodules | 28 +- Cargo.lock | 1166 ++++++----- Cargo.toml | 29 +- Cross.toml | 2 + Dockerfile | 6 +- Dockerfile.build | 22 + Dockerfile.cross | 15 + Makefile | 63 +- README.md | 25 +- bin/rundler/Cargo.toml | 19 +- bin/rundler/chain_specs/arbitrum.toml | 9 + bin/rundler/chain_specs/arbitrum_sepolia.toml | 4 + bin/rundler/chain_specs/avax.toml | 7 + bin/rundler/chain_specs/avax_fuji.toml | 4 + bin/rundler/chain_specs/base.toml | 13 + bin/rundler/chain_specs/base_sepolia.toml | 7 + bin/rundler/chain_specs/boba_sepolia.toml | 6 + bin/rundler/chain_specs/dev.toml | 6 + bin/rundler/chain_specs/ethereum.toml | 10 + bin/rundler/chain_specs/ethereum_sepolia.toml | 9 + bin/rundler/chain_specs/optimism.toml | 11 + bin/rundler/chain_specs/optimism_devnet.toml | 6 + bin/rundler/chain_specs/optimism_sepolia.toml | 4 + bin/rundler/chain_specs/polygon.toml | 7 + bin/rundler/chain_specs/polygon_amoy.toml | 4 + bin/rundler/src/cli/builder.rs | 286 ++- bin/rundler/src/cli/chain_spec.rs | 128 ++ bin/rundler/src/cli/metrics.rs | 223 +- bin/rundler/src/cli/mod.rs | 185 +- bin/rundler/src/cli/node/events.rs | 7 - bin/rundler/src/cli/node/mod.rs | 17 +- bin/rundler/src/cli/pool.rs | 172 +- bin/rundler/src/cli/rpc.rs | 29 +- bin/tools/src/bin/get_example_ops.rs | 38 - crates/builder/Cargo.toml | 6 +- crates/builder/src/bundle_proposer.rs | 877 +++++--- crates/builder/src/bundle_sender.rs | 1851 ++++++++++++++--- crates/builder/src/emit.rs | 11 + crates/builder/src/lib.rs | 12 +- crates/builder/src/sender/bloxroute.rs | 85 +- crates/builder/src/sender/conditional.rs | 103 - crates/builder/src/sender/flashbots.rs | 386 ++-- crates/builder/src/sender/mod.rs | 232 ++- crates/builder/src/sender/raw.rs | 107 +- crates/builder/src/server/local.rs | 60 +- crates/builder/src/server/mod.rs | 55 +- crates/builder/src/server/remote/client.rs | 20 +- crates/builder/src/server/remote/error.rs | 31 +- crates/builder/src/server/remote/protos.rs | 15 +- crates/builder/src/server/remote/server.rs | 28 +- crates/builder/src/signer/mod.rs | 3 +- crates/builder/src/task.rs | 333 ++- crates/builder/src/transaction_tracker.rs | 660 +++--- crates/dev/src/lib.rs | 34 +- crates/pool/Cargo.toml | 9 +- crates/pool/proto/op_pool/op_pool.proto | 212 +- crates/pool/src/chain.rs | 880 ++++++-- crates/pool/src/emit.rs | 21 +- crates/pool/src/lib.rs | 8 +- crates/pool/src/mempool/error.rs | 111 - crates/pool/src/mempool/mod.rs | 191 +- crates/pool/src/mempool/paymaster.rs | 789 +++++-- crates/pool/src/mempool/pool.rs | 453 ++-- crates/pool/src/mempool/reputation.rs | 344 ++- crates/pool/src/mempool/uo_pool.rs | 1054 ++++++---- crates/pool/src/server/local.rs | 375 +++- crates/pool/src/server/mod.rs | 111 +- crates/pool/src/server/remote/client.rs | 222 +- crates/pool/src/server/remote/error.rs | 387 +++- crates/pool/src/server/remote/protos.rs | 251 ++- crates/pool/src/server/remote/server.rs | 148 +- crates/pool/src/task.rs | 250 ++- crates/provider/Cargo.toml | 8 + crates/provider/src/ethers/entry_point.rs | 211 -- crates/provider/src/ethers/entry_point/mod.rs | 115 + .../provider/src/ethers/entry_point/v0_6.rs | 449 ++++ .../provider/src/ethers/entry_point/v0_7.rs | 488 +++++ .../provider/src/ethers/metrics_middleware.rs | 137 ++ crates/provider/src/ethers/mod.rs | 7 +- .../provider/src/ethers/paymaster_helper.rs | 36 - crates/provider/src/ethers/provider.rs | 250 ++- crates/provider/src/lib.rs | 13 +- crates/provider/src/traits/entry_point.rs | 213 +- crates/provider/src/traits/mod.rs | 21 +- crates/provider/src/traits/provider.rs | 90 +- crates/provider/src/traits/test_utils.rs | 214 ++ crates/rpc/Cargo.toml | 5 +- crates/rpc/src/admin.rs | 110 + crates/rpc/src/debug.rs | 204 +- crates/rpc/src/eth/api.rs | 1014 +++------ crates/rpc/src/eth/error.rs | 178 +- crates/rpc/src/eth/events/common.rs | 257 +++ crates/rpc/src/eth/events/mod.rs | 222 ++ crates/rpc/src/eth/events/v0_6.rs | 105 + crates/rpc/src/eth/events/v0_7.rs | 109 + crates/rpc/src/eth/mod.rs | 23 +- crates/rpc/src/eth/router.rs | 365 ++++ crates/rpc/src/eth/server.rs | 70 +- crates/rpc/src/lib.rs | 7 +- crates/rpc/src/metrics.rs | 10 +- crates/rpc/src/rundler.rs | 140 +- crates/rpc/src/task.rs | 203 +- crates/rpc/src/{types.rs => types/mod.rs} | 195 +- crates/rpc/src/types/v0_6.rs | 127 ++ crates/rpc/src/types/v0_7.rs | 194 ++ crates/rpc/src/utils.rs | 58 + crates/sim/build.rs | 3 +- .../sim/src/estimation/estimate_call_gas.rs | 249 +++ .../estimation/estimate_verification_gas.rs | 235 +++ crates/sim/src/estimation/estimation.rs | 1342 ------------ crates/sim/src/estimation/mod.rs | 108 +- crates/sim/src/estimation/types.rs | 4 +- crates/sim/src/estimation/v0_6.rs | 1508 ++++++++++++++ crates/sim/src/estimation/v0_7.rs | 890 ++++++++ crates/sim/src/gas/gas.rs | 292 +-- crates/sim/src/gas/oracle.rs | 29 +- crates/sim/src/lib.rs | 19 +- crates/sim/src/precheck.rs | 328 ++- crates/sim/src/simulation/context.rs | 180 ++ crates/sim/src/simulation/mempool.rs | 55 +- crates/sim/src/simulation/mod.rs | 194 +- crates/sim/src/simulation/simulation.rs | 1369 ------------ crates/sim/src/simulation/simulator.rs | 1214 +++++++++++ crates/sim/src/simulation/tracer.rs | 202 -- crates/sim/src/simulation/unsafe_sim.rs | 186 ++ crates/sim/src/simulation/v0_6/context.rs | 362 ++++ .../src/simulation/v0_6/mod.rs} | 20 +- crates/sim/src/simulation/v0_6/tracer.rs | 124 ++ crates/sim/src/simulation/v0_7/context.rs | 501 +++++ crates/sim/src/simulation/v0_7/mod.rs | 17 + crates/sim/src/simulation/v0_7/tracer.rs | 191 ++ .../sim/src/simulation/validation_results.rs | 142 -- crates/sim/src/types.rs | 15 +- crates/sim/src/utils.rs | 97 +- crates/sim/tracer/package.json | 2 +- ...ationTracer.ts => validationTracerV0_6.ts} | 27 +- crates/sim/tracer/src/validationTracerV0_7.ts | 444 ++++ crates/task/src/grpc/metrics.rs | 10 +- crates/task/src/grpc/protos.rs | 60 +- crates/types/.gitignore | 6 +- crates/types/Cargo.toml | 17 +- crates/types/build.rs | 146 +- ...DCD49DcA30c7CF57E578a026d2789_deployed.txt | 1 + crates/types/contracts/foundry.toml | 10 +- .../hc_scripts/DeployHybridAccount.sol | 8 +- .../contracts/hc_scripts/ExampleDeploy.s.sol | 16 +- .../contracts/hc_scripts/LocalDeploy.s.sol | 8 +- .../v0_6} | 0 .../lib/account-abstraction-versions/v0_7 | 1 + .../contracts/lib/openzeppelin-contracts | 1 - .../lib/openzeppelin-contracts-versions/v4_9 | 1 + .../lib/openzeppelin-contracts-versions/v5_0 | 1 + crates/types/contracts/src/GetGasUsed.sol | 24 - .../types/contracts/src/PaymasterHelper.sol | 26 - crates/types/contracts/src/imports.sol | 12 - .../src/utils/CallGasEstimationProxyTypes.sol | 15 + .../src/{ => utils}/GetCodeHashes.sol | 0 .../types/contracts/src/utils/GetGasUsed.sol | 33 + .../contracts/src/utils/StorageLoader.sol | 17 + .../src/{ => v0_6}/CallGasEstimationProxy.sol | 100 +- .../types/contracts/src/v0_6/GetBalances.sol | 23 + .../src/{ => v0_6}/PrecompileAccount.sol | 2 +- crates/types/contracts/src/v0_6/imports.sol | 12 + .../src/v0_7/CallGasEstimationProxy.sol | 198 ++ .../types/contracts/src/v0_7/GetBalances.sol | 23 + crates/types/contracts/src/v0_7/imports.sol | 12 + .../contracts/test/PrecompileAccountTest.sol | 33 - .../src/server => types/src/builder}/error.rs | 24 +- crates/types/src/builder/mod.rs | 23 + .../src/builder/traits.rs} | 26 +- .../src/builder/types.rs} | 27 +- crates/types/src/chain.rs | 204 +- crates/types/src/contracts/mod.rs | 39 + crates/types/src/entity.rs | 131 +- crates/types/src/hybrid_compute.rs | 29 +- crates/types/src/lib.rs | 22 +- crates/types/src/opcode.rs | 426 ++++ crates/types/src/pool/error.rs | 250 +++ crates/types/src/pool/mod.rs | 36 + crates/types/src/pool/traits.rs | 124 ++ crates/types/src/pool/types.rs | 156 ++ crates/types/src/timestamp.rs | 17 + crates/types/src/user_operation.rs | 332 --- crates/types/src/user_operation/mod.rs | 460 ++++ crates/types/src/user_operation/v0_6.rs | 549 +++++ crates/types/src/user_operation/v0_7.rs | 994 +++++++++ crates/types/src/validation_results.rs | 453 ++++ crates/utils/Cargo.toml | 3 + crates/utils/src/cache.rs | 67 + crates/utils/src/eth.rs | 54 +- crates/utils/src/lib.rs | 1 + deny.toml | 1 + docs/README.md | 2 + docs/architecture/builder.md | 76 +- docs/architecture/chain_spec.md | 39 + docs/architecture/entry_point.md | 65 + docs/architecture/pool.md | 8 +- docs/architecture/rpc.md | 321 ++- docs/cli.md | 103 +- docs/docker.md | 50 + docs/release.md | 39 + hybrid-compute/deploy-local.py | 48 +- hybrid-compute/local.env | 1 + hybrid-compute/runit.sh | 4 +- test/.env.default | 2 +- test/spec-tests/bundler-spec-tests | 1 - .../rundler-launcher/docker-compose.yml | 5 +- test/spec-tests/local/.env | 6 +- test/spec-tests/local/docker-compose.yml | 38 +- test/spec-tests/local/launcher.sh | 12 +- test/spec-tests/local/run-spec-tests-v0_6.sh | 17 + test/spec-tests/local/run-spec-tests-v0_7.sh | 17 + test/spec-tests/local/run-spec-tests.sh | 15 - test/spec-tests/remote/docker-compose.yml | 63 +- test/spec-tests/remote/launcher.sh | 26 +- ...n-spec-tests.sh => run-spec-tests-v0_6.sh} | 4 +- test/spec-tests/remote/run-spec-tests-v0_7.sh | 15 + test/spec-tests/v0_6/bundler-spec-tests | 1 + test/spec-tests/v0_7/bundler-spec-tests | 1 + 225 files changed, 25587 insertions(+), 10581 deletions(-) create mode 100755 .github/scripts/compliance_parser.sh create mode 100644 .github/workflows/compliance.yaml create mode 100644 .github/workflows/docker-images.yaml create mode 100644 .github/workflows/docker-release.yaml create mode 100644 .github/workflows/release.yaml create mode 100644 Cross.toml create mode 100644 Dockerfile.build create mode 100644 Dockerfile.cross create mode 100644 bin/rundler/chain_specs/arbitrum.toml create mode 100644 bin/rundler/chain_specs/arbitrum_sepolia.toml create mode 100644 bin/rundler/chain_specs/avax.toml create mode 100644 bin/rundler/chain_specs/avax_fuji.toml create mode 100644 bin/rundler/chain_specs/base.toml create mode 100644 bin/rundler/chain_specs/base_sepolia.toml create mode 100644 bin/rundler/chain_specs/boba_sepolia.toml create mode 100644 bin/rundler/chain_specs/dev.toml create mode 100644 bin/rundler/chain_specs/ethereum.toml create mode 100644 bin/rundler/chain_specs/ethereum_sepolia.toml create mode 100644 bin/rundler/chain_specs/optimism.toml create mode 100644 bin/rundler/chain_specs/optimism_devnet.toml create mode 100644 bin/rundler/chain_specs/optimism_sepolia.toml create mode 100644 bin/rundler/chain_specs/polygon.toml create mode 100644 bin/rundler/chain_specs/polygon_amoy.toml create mode 100644 bin/rundler/src/cli/chain_spec.rs delete mode 100644 bin/tools/src/bin/get_example_ops.rs delete mode 100644 crates/builder/src/sender/conditional.rs delete mode 100644 crates/pool/src/mempool/error.rs delete mode 100644 crates/provider/src/ethers/entry_point.rs create mode 100644 crates/provider/src/ethers/entry_point/mod.rs create mode 100644 crates/provider/src/ethers/entry_point/v0_6.rs create mode 100644 crates/provider/src/ethers/entry_point/v0_7.rs create mode 100644 crates/provider/src/ethers/metrics_middleware.rs delete mode 100644 crates/provider/src/ethers/paymaster_helper.rs create mode 100644 crates/provider/src/traits/test_utils.rs create mode 100644 crates/rpc/src/admin.rs create mode 100644 crates/rpc/src/eth/events/common.rs create mode 100644 crates/rpc/src/eth/events/mod.rs create mode 100644 crates/rpc/src/eth/events/v0_6.rs create mode 100644 crates/rpc/src/eth/events/v0_7.rs create mode 100644 crates/rpc/src/eth/router.rs rename crates/rpc/src/{types.rs => types/mod.rs} (51%) create mode 100644 crates/rpc/src/types/v0_6.rs create mode 100644 crates/rpc/src/types/v0_7.rs create mode 100644 crates/rpc/src/utils.rs create mode 100644 crates/sim/src/estimation/estimate_call_gas.rs create mode 100644 crates/sim/src/estimation/estimate_verification_gas.rs delete mode 100644 crates/sim/src/estimation/estimation.rs create mode 100644 crates/sim/src/estimation/v0_6.rs create mode 100644 crates/sim/src/estimation/v0_7.rs create mode 100644 crates/sim/src/simulation/context.rs delete mode 100644 crates/sim/src/simulation/simulation.rs create mode 100644 crates/sim/src/simulation/simulator.rs delete mode 100644 crates/sim/src/simulation/tracer.rs create mode 100644 crates/sim/src/simulation/unsafe_sim.rs create mode 100644 crates/sim/src/simulation/v0_6/context.rs rename crates/{provider/src/ethers/stake_manager.rs => sim/src/simulation/v0_6/mod.rs} (60%) create mode 100644 crates/sim/src/simulation/v0_6/tracer.rs create mode 100644 crates/sim/src/simulation/v0_7/context.rs create mode 100644 crates/sim/src/simulation/v0_7/mod.rs create mode 100644 crates/sim/src/simulation/v0_7/tracer.rs delete mode 100644 crates/sim/src/simulation/validation_results.rs rename crates/sim/tracer/src/{validationTracer.ts => validationTracerV0_6.ts} (95%) create mode 100644 crates/sim/tracer/src/validationTracerV0_7.ts create mode 100644 crates/types/contracts/bytecode/entrypoint/0x5FF137D4b0FDCD49DcA30c7CF57E578a026d2789_deployed.txt rename crates/types/contracts/lib/{account-abstraction => account-abstraction-versions/v0_6} (100%) create mode 160000 crates/types/contracts/lib/account-abstraction-versions/v0_7 delete mode 160000 crates/types/contracts/lib/openzeppelin-contracts create mode 160000 crates/types/contracts/lib/openzeppelin-contracts-versions/v4_9 create mode 160000 crates/types/contracts/lib/openzeppelin-contracts-versions/v5_0 delete mode 100644 crates/types/contracts/src/GetGasUsed.sol delete mode 100644 crates/types/contracts/src/PaymasterHelper.sol delete mode 100644 crates/types/contracts/src/imports.sol create mode 100644 crates/types/contracts/src/utils/CallGasEstimationProxyTypes.sol rename crates/types/contracts/src/{ => utils}/GetCodeHashes.sol (100%) create mode 100644 crates/types/contracts/src/utils/GetGasUsed.sol create mode 100644 crates/types/contracts/src/utils/StorageLoader.sol rename crates/types/contracts/src/{ => v0_6}/CallGasEstimationProxy.sol (72%) create mode 100644 crates/types/contracts/src/v0_6/GetBalances.sol rename crates/types/contracts/src/{ => v0_6}/PrecompileAccount.sol (94%) create mode 100644 crates/types/contracts/src/v0_6/imports.sol create mode 100644 crates/types/contracts/src/v0_7/CallGasEstimationProxy.sol create mode 100644 crates/types/contracts/src/v0_7/GetBalances.sol create mode 100644 crates/types/contracts/src/v0_7/imports.sol delete mode 100644 crates/types/contracts/test/PrecompileAccountTest.sol rename crates/{pool/src/server => types/src/builder}/error.rs (59%) create mode 100644 crates/types/src/builder/mod.rs rename crates/{provider/src/traits/stake_manager.rs => types/src/builder/traits.rs} (53%) rename crates/{provider/src/traits/paymaster_helper.rs => types/src/builder/types.rs} (54%) create mode 100644 crates/types/src/contracts/mod.rs create mode 100644 crates/types/src/opcode.rs create mode 100644 crates/types/src/pool/error.rs create mode 100644 crates/types/src/pool/mod.rs create mode 100644 crates/types/src/pool/traits.rs create mode 100644 crates/types/src/pool/types.rs delete mode 100644 crates/types/src/user_operation.rs create mode 100644 crates/types/src/user_operation/mod.rs create mode 100644 crates/types/src/user_operation/v0_6.rs create mode 100644 crates/types/src/user_operation/v0_7.rs create mode 100644 crates/types/src/validation_results.rs create mode 100644 crates/utils/src/cache.rs create mode 100644 docs/architecture/chain_spec.md create mode 100644 docs/architecture/entry_point.md create mode 100644 docs/release.md delete mode 160000 test/spec-tests/bundler-spec-tests create mode 100755 test/spec-tests/local/run-spec-tests-v0_6.sh create mode 100755 test/spec-tests/local/run-spec-tests-v0_7.sh delete mode 100755 test/spec-tests/local/run-spec-tests.sh rename test/spec-tests/remote/{run-spec-tests.sh => run-spec-tests-v0_6.sh} (53%) create mode 100755 test/spec-tests/remote/run-spec-tests-v0_7.sh create mode 160000 test/spec-tests/v0_6/bundler-spec-tests create mode 160000 test/spec-tests/v0_7/bundler-spec-tests diff --git a/.github/scripts/compliance_parser.sh b/.github/scripts/compliance_parser.sh new file mode 100755 index 00000000..c6af8315 --- /dev/null +++ b/.github/scripts/compliance_parser.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# Check for xml output file +if [ -f "$1" ]; then + echo "$1 exists." +else + echo "$1 does not exist." + exit 1 +fi +# Parse the XML data +errors=$(xmllint --xpath 'string(//testsuite/@errors)' "$1") +failures=$(xmllint --xpath 'string(//testsuite/@failures)' "$1") +test_count=$(xmllint --xpath 'string(//testsuite/@tests)' "$1") + +# Check if there are any errors or failures +if [[ $errors -gt 0 || $failures -gt 0 ]]; then + echo "ERROR: Tests failed!" + echo "$failures tests failed, $errors errors. out of $test_count tests" + exit 1 +else + echo "SUCCESS: All tests passed!" + echo "$test_count tests passed." +fi diff --git a/.github/workflows/compliance.yaml b/.github/workflows/compliance.yaml new file mode 100644 index 00000000..f0e58712 --- /dev/null +++ b/.github/workflows/compliance.yaml @@ -0,0 +1,37 @@ +on: + push: + branches: + - main + pull_request: + +name: compliance +jobs: + compliance: + runs-on: ubuntu-latest + steps: + - run: curl -sSL https://raw.githubusercontent.com/pdm-project/pdm/main/install-pdm.py | python3 - + - run: pip install jq yq + - run: sudo apt install -y libxml2-utils + + + - name: Checkout Rundler + uses: actions/checkout@v4 + with: + path: rundler + submodules: recursive + + - name: Clone Test Executor + uses: actions/checkout@v4 + with: + path: bundler-test-executors + repository: alchemyplatform/bundler-test-executor + ref: releases/v0.6 + + - name: Build rundler image locally + run: docker buildx build ./rundler -t alchemyplatform/rundler:latest + + - name: Run bundle executor + run: ./bundler-test-executors/runall.sh local ./bundler-test-executors/bundlers/rundler/rundler.yml + + - name: Parse spec results + run: ./rundler/.github/scripts/compliance_parser.sh ./bundler-test-executors/build/out/rundler.xml diff --git a/.github/workflows/docker-images.yaml b/.github/workflows/docker-images.yaml new file mode 100644 index 00000000..c1cff811 --- /dev/null +++ b/.github/workflows/docker-images.yaml @@ -0,0 +1,85 @@ +name: Docker Images + +on: + workflow_dispatch: + push: + tags: + - '*' + +jobs: + publish-images: + name: Publish Images + runs-on: ubuntu-latest + outputs: + rundler-hc: ${{ steps.packages.outputs.rundler-hc }} + offchain-rpc: ${{ steps.packages.outputs.offchain-rpc }} + + steps: + - name: Check out source code + uses: actions/checkout/@v4 + with: + fetch-depth: 0 + + rundler-hc: + name: Publish rundler-hc + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: DockerHub login + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }} + password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }} + + - name: Metadata + id: meta + uses: docker/metadata-action@v5 + with: + images: bobanetwork/rundler-hc + tags: | + type=ref,event=tag + type=sha + + - name: Build and push + uses: docker/build-push-action@v5 + with: + context: . + file: Dockerfile + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + + offchain-rpc: + name: Publish offchain-rpc + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: DockerHub login + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }} + password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }} + + - name: Metadata + id: meta + uses: docker/metadata-action@v5 + with: + images: bobanetwork/offchain-rpc + tags: | + type=ref,event=tag + type=sha + + - name: Build and push + uses: docker/build-push-action@v5 + with: + context: ./hybrid-compute + file: hybrid-compute/Dockerfile.offchain-rpc + push: ${{ github.event_name != 'pull_request' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} diff --git a/.github/workflows/docker-release.yaml b/.github/workflows/docker-release.yaml new file mode 100644 index 00000000..99aed36d --- /dev/null +++ b/.github/workflows/docker-release.yaml @@ -0,0 +1,62 @@ +name: Docker release + +on: + workflow_dispatch: + inputs: + version: + required: true + type: string + push: + tags: + - v*.*.* + +env: + CARGO_TERM_COLOR: always + DOCKER_IMAGE_NAME: alchemyplatform/rundler + +jobs: + build: + name: build and push + runs-on: ubuntu-22.04 + permissions: + packages: write + contents: read + steps: + - name: Checkout sources + uses: actions/checkout@v3 + with: + submodules: recursive + + - name: Install toolchain + uses: dtolnay/rust-toolchain@stable + with: + components: clippy + toolchain: 1.79.0 + + - name: Install toolchain (nightly) + run: rustup toolchain add nightly --component rustfmt --profile minimal + + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + + - uses: taiki-e/install-action@cross + + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: alchemyplatform + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Set up Docker builder + run: | + docker run --privileged --rm tonistiigi/binfmt --install arm64,amd64 + docker buildx create --use --name cross-builder + + - name: Build and push image + run: | + if [ -n "${{ github.event.inputs.version }}" ]; then + sudo -E env "PATH=$PATH" make GIT_TAG="${{ github.event.inputs.version }}" docker-build + else + sudo -E env "PATH=$PATH" make docker-build-latest + fi diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 00000000..da7626c7 --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,219 @@ +## This release action is inspired from https://githug.com/paradigmxyz/reth + +name: Release + +on: + workflow_dispatch: + inputs: + version: + required: true + type: string + push: + tags: + - v*.*.* + +jobs: + extract-version: + name: extract version + runs-on: ubuntu-latest + steps: + - name: Extract version + run: | + if [ -n "${{ github.event.inputs.version }}" ]; then + echo "VERSION=${{ github.event.inputs.version }}" >> $GITHUB_OUTPUT + else + echo "VERSION=$(echo ${GITHUB_REF#refs/tags/})" >> $GITHUB_OUTPUT + fi + id: extract_version + outputs: + VERSION: ${{ steps.extract_version.outputs.VERSION }} + + build: + name: build release + runs-on: ${{ matrix.os }} + needs: extract-version + strategy: + matrix: + include: + - target: aarch64-unknown-linux-gnu + os: ubuntu-20.04 + profile: release + - target: x86_64-unknown-linux-gnu + os: ubuntu-20.04 + profile: release + - target: x86_64-apple-darwin + os: macos-latest + profile: release + - target: aarch64-apple-darwin + os: macos-latest + profile: release + - target: x86_64-pc-windows-gnu + os: ubuntu-20.04 + profile: release + + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + - uses: dtolnay/rust-toolchain@stable + with: + target: ${{ matrix.target }} + - uses: taiki-e/install-action@cross + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + + - name: Foundry install + if: contains( ${{ matrix.target }}, 'apple') + uses: foundry-rs/foundry-toolchain@v1 + - name: Install protobuf + if: contains( ${{ matrix.target }}, 'apple') + uses: arduino/setup-protoc@v3 + + # ============================== + # Apple Silicon SDK setup + # ============================== + + - name: Apple Silicon setup + if: matrix.target == 'aarch64-apple-darwin' + run: | + echo "SDKROOT=$(xcrun -sdk macosx --show-sdk-path)" >> $GITHUB_ENV + echo "MACOSX_DEPLOYMENT_TARGET=$(xcrun -sdk macosx --show-sdk-platform-version)" >> $GITHUB_ENV + + # ============================== + # Builds + # ============================== + + - name: Build rundler for ${{ matrix.target }} + run: | + sudo -E env "PATH=$PATH" make build-${{ matrix.target }} + + - name: Move cross-compiled binary + run: | + mkdir artifacts + [[ "${{ matrix.target }}" == *windows* ]] && ext=".exe" + sudo mv "target/${{ matrix.target }}/${{ matrix.profile }}/rundler${ext}" ./artifacts + + # ============================== + # Signing + # ============================== + + - name: Configure GPG and create artifacts + env: + GPG_SIGNING_KEY: ${{ secrets.GPG_SIGNING_KEY }} + GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} + run: | + export GPG_TTY=$(tty) + echo -n "$GPG_SIGNING_KEY" | base64 --decode | gpg --batch --import + cd artifacts + tar -czf rundler-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz rundler* + echo "$GPG_PASSPHRASE" | gpg --passphrase-fd 0 --pinentry-mode loopback --batch -ab rundler-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz + mv *tar.gz* .. + shell: bash + + # ======================================================================= + # Upload artifacts + # This is required to share artifacts between different jobs + # ======================================================================= + - name: Upload artifact + uses: actions/upload-artifact@v3 + with: + name: rundler-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz + path: rundler-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz + + - name: Upload signature + uses: actions/upload-artifact@v3 + with: + name: rundler-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz.asc + path: rundler-${{ needs.extract-version.outputs.VERSION }}-${{ matrix.target }}.tar.gz.asc + + draft-release: + name: draft release + needs: [build, extract-version] + runs-on: ubuntu-latest + env: + VERSION: ${{ needs.extract-version.outputs.VERSION }} + permissions: + # Required to post the release + contents: write + steps: + # This is necessary for generating the changelog. It has to come before "Download Artifacts" or else it deletes the artifacts. + - name: Checkout sources + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + # ============================== + # Download artifacts + # ============================== + - name: Download artifacts + uses: actions/download-artifact@v3 + + # ============================== + # Create release draft + # ============================== + - name: Generate full changelog + id: changelog + run: | + echo "CHANGELOG<> $GITHUB_OUTPUT + echo "$(git log --pretty=format:"- %s" $(git describe --tags --abbrev=0 ${{ env.VERSION }}^)..${{ env.VERSION }})" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + + - name: Create release draft + env: + GITHUB_USER: ${{ github.repository_owner }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + # The formatting here is borrowed from Reth & Lighthouse (which is borrowed from OpenEthereum): + # https://github.com/openethereum/openethereum/blob/main/.github/workflows/build.yml + run: | + body=$(cat <<- "ENDBODY" + + + ## Testing Checklist (DELETE ME) + + - [ ] Run on testnet for 1-3 days. + - [ ] Ensure all CI checks pass. + + ## Release Checklist (DELETE ME) + + - [ ] Ensure all crates have had their versions bumped. + - [ ] Write the summary. + - [ ] Ensure all binaries have been added. + - [ ] Prepare release posts (Twitter, ...). + + ## Summary + + Add a summary, including: + + - Critical bug fixes + - New features + - Any breaking changes (and what to expect) + + ## All Changes + + ${{ steps.changelog.outputs.CHANGELOG }} + + ## Binaries + + The binaries are signed with the PGP key: `85C5 DEF0 37D3 FDE4 FC17 94B1 475B 35EA 9352 EB2` + + | System | Architecture | Binary | PGP Signature | + |:---:|:---:|:---:|:---| + | | x86_64 | [rundler-${{ env.VERSION }}-x86_64-unknown-linux-gnu.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/rundler-${{ env.VERSION }}-x86_64-unknown-linux-gnu.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/rundler-${{ env.VERSION }}-x86_64-unknown-linux-gnu.tar.gz.asc) | + | | aarch64 | [rundler-${{ env.VERSION }}-aarch64-unknown-linux-gnu.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/rundler-${{ env.VERSION }}-aarch64-unknown-linux-gnu.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/rundler-${{ env.VERSION }}-aarch64-unknown-linux-gnu.tar.gz.asc) | + | | x86_64 | [rundler-${{ env.VERSION }}-x86_64-pc-windows-gnu.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/rundler-${{ env.VERSION }}-x86_64-pc-windows-gnu.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/rundler-${{ env.VERSION }}-x86_64-pc-windows-gnu.tar.gz.asc) | + | | x86_64 | [rundler-${{ env.VERSION }}-x86_64-apple-darwin.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/rundler-${{ env.VERSION }}-x86_64-apple-darwin.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/rundler-${{ env.VERSION }}-x86_64-apple-darwin.tar.gz.asc) | + | | aarch64 | [rundler-${{ env.VERSION }}-aarch64-apple-darwin.tar.gz](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/rundler-${{ env.VERSION }}-aarch64-apple-darwin.tar.gz) | [PGP Signature](https://github.com/${{ env.REPO_NAME }}/releases/download/${{ env.VERSION }}/rundler-${{ env.VERSION }}-aarch64-apple-darwin.tar.gz.asc) | + | | | | | + | **System** | **Option** | - | **Resource** | + | | Docker | [${{ env.VERSION }}](https://hub.docker.com/r/alchemyplatform/rundler) | + + ENDBODY + ) + assets=() + for asset in ./rundler-*.tar.gz*; do + assets+=("$asset/$asset") + done + tag_name="${{ env.VERSION }}" + echo "$body" | gh release create --draft -t "Rundler $tag_name" -F "-" "$tag_name" "${assets[@]}" diff --git a/.gitignore b/.gitignore index 2faf0456..bf28f410 100644 --- a/.gitignore +++ b/.gitignore @@ -17,11 +17,11 @@ .DS_Store .helix -/venv +# Release artifacts +dist/ +# Hybrid Compute __pycache__ -hybrid-compute/__pycache__/ -hybrid-compute/addresses.txt -hybrid-compute/contracts.json hybrid-compute/.env.old -crates/types/contracts/broadcast +hybrid-compute/contracts.json +crates/types/contracts/broadcast/ diff --git a/.gitmodules b/.gitmodules index 119e2dfb..d130ed7e 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,16 +1,28 @@ -[submodule "crates/types/contracts/lib/account-abstraction"] - path = crates/types/contracts/lib/account-abstraction +[submodule "crates/types/contracts/lib/account-abstraction-versions/v0_7"] + path = crates/types/contracts/lib/account-abstraction-versions/v0_7 + url = https://github.com/eth-infinitism/account-abstraction + branch = releases/v0.7 +[submodule "crates/types/contracts/lib/account-abstraction-versions/v0_6"] + path = crates/types/contracts/lib/account-abstraction-versions/v0_6 url = https://github.com/bobanetwork/account-abstraction-hc branch = hc-dev [submodule "crates/types/contracts/lib/forge-std"] path = crates/types/contracts/lib/forge-std url = https://github.com/foundry-rs/forge-std branch = chore/v1.5.0 -[submodule "crates/types/contracts/lib/openzeppelin-contracts"] - path = crates/types/contracts/lib/openzeppelin-contracts +[submodule "crates/types/contracts/lib/openzeppelin-contracts-versions/v5_0"] + path = crates/types/contracts/lib/openzeppelin-contracts-versions/v5_0 url = https://github.com/OpenZeppelin/openzeppelin-contracts - branch = release-v4.8 -[submodule "test/spec-tests/bundler-spec-tests"] - path = test/spec-tests/bundler-spec-tests - url = https://github.com/alchemyplatform/bundler-spec-tests.git + branch = release-v5.0 +[submodule "crates/types/contracts/lib/openzeppelin-contracts-versions/v4_9"] + path = crates/types/contracts/lib/openzeppelin-contracts-versions/v4_9 + url = https://github.com/OpenZeppelin/openzeppelin-contracts + branch = release-v4.9 +[submodule "test/spec-tests/v0_6/bundler-spec-tests"] + path = test/spec-tests/v0_6/bundler-spec-tests + url = git@github.com:alchemyplatform/bundler-spec-tests.git + ignore = dirty +[submodule "test/spec-tests/v0_7/bundler-spec-tests"] + path = test/spec-tests/v0_7/bundler-spec-tests + url = git@github.com:alchemyplatform/bundler-spec-tests.git ignore = dirty diff --git a/Cargo.lock b/Cargo.lock index 0362be57..efd139e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -52,13 +52,15 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.3" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" dependencies = [ "cfg-if", + "getrandom", "once_cell", "version_check", + "zerocopy", ] [[package]] @@ -85,12 +87,6 @@ dependencies = [ "alloc-no-stdlib", ] -[[package]] -name = "android-tzdata" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" - [[package]] name = "android_system_properties" version = "0.1.5" @@ -239,7 +235,7 @@ dependencies = [ "log", "parking", "polling", - "rustix 0.37.25", + "rustix 0.37.3", "slab", "socket2 0.4.9", "waker-fn", @@ -299,7 +295,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.50", ] [[package]] @@ -316,7 +312,7 @@ checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.50", ] [[package]] @@ -338,14 +334,13 @@ checksum = "1181e1e0d1fce796a03db1ae795d67167da795f9cf4a39c37589e85ef57f26d3" [[package]] name = "auto_impl" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fee3da8ef1276b0bee5dd1c7258010d8fffd31801447323115a25560e1327b89" +checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ - "proc-macro-error", "proc-macro2", "quote", - "syn 1.0.107", + "syn 2.0.50", ] [[package]] @@ -464,22 +459,22 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.64.0" +version = "0.69.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4243e6031260db77ede97ad86c27e501d646a27ab57b59a574f725d98ab1fb4" +checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.4.1", "cexpr", "clang-sys", + "itertools 0.12.1", "lazy_static", "lazycell", - "peeking_take_while", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", - "syn 1.0.107", + "syn 2.0.50", ] [[package]] @@ -505,9 +500,12 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.5.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" +dependencies = [ + "serde", +] [[package]] name = "bitvec" @@ -614,9 +612,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.6.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" dependencies = [ "serde", ] @@ -676,13 +674,11 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.99" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96c51067fd44124faa7f870b4b1c969379ad32b2ba805aa959430ceaa384f695" +checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" dependencies = [ "jobserver", - "libc", - "once_cell", ] [[package]] @@ -702,17 +698,18 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.30" +version = "0.4.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defd4e7873dbddba6c7c91e199c7fcb946abc4a6a4ac3195400bcfb01b5de877" +checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" dependencies = [ - "android-tzdata", "iana-time-zone", "js-sys", + "num-integer", "num-traits", "serde", + "time 0.1.45", "wasm-bindgen", - "windows-targets 0.48.5", + "winapi", ] [[package]] @@ -776,7 +773,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.50", ] [[package]] @@ -873,18 +870,38 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "2.5.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" dependencies = [ "crossbeam-utils", ] +[[package]] +name = "config" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7328b20597b53c2454f0b1919720c25c7339051c02b72b7e05409e00b14132be" +dependencies = [ + "async-trait", + "convert_case 0.6.0", + "json5", + "lazy_static", + "nom", + "pathdiff", + "ron", + "rust-ini", + "serde", + "serde_json", + "toml 0.8.10", + "yaml-rust", +] + [[package]] name = "const-hex" -version = "1.10.0" +version = "1.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5104de16b218eddf8e34ffe2f86f74bfa4e61e95a1b89732fccf6325efd0557" +checksum = "5ba00838774b4ab0233e355d26710fbfc8327a05c017f6dc4873f876d1f79f78" dependencies = [ "cfg-if", "cpufeatures", @@ -899,6 +916,26 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" +[[package]] +name = "const-random" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aaf16c9c2c612020bcfd042e170f6e32de9b9d75adb5277cdbbd2e2c8c8299a" +dependencies = [ + "const-random-macro", +] + +[[package]] +name = "const-random-macro" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" +dependencies = [ + "getrandom", + "once_cell", + "tiny-keccak", +] + [[package]] name = "const_format" version = "0.2.31" @@ -925,6 +962,18 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" +[[package]] +name = "constcat" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d5cd0c57ef83705837b1cb872c973eff82b070846d3e23668322b2c0f8246d0" + +[[package]] +name = "convert_case" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" + [[package]] name = "convert_case" version = "0.6.0" @@ -936,9 +985,9 @@ dependencies = [ [[package]] name = "core-foundation" -version = "0.9.4" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" dependencies = [ "core-foundation-sys", "libc", @@ -946,9 +995,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.6" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" +checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" [[package]] name = "cpufeatures" @@ -1133,7 +1182,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.32", + "syn 2.0.50", ] [[package]] @@ -1144,7 +1193,7 @@ checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core", "quote", - "syn 2.0.32", + "syn 2.0.50", ] [[package]] @@ -1169,8 +1218,10 @@ version = "0.99.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ + "convert_case 0.4.0", "proc-macro2", "quote", + "rustc_version", "syn 1.0.107", ] @@ -1180,12 +1231,6 @@ version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" -[[package]] -name = "difflib" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" - [[package]] name = "digest" version = "0.9.0" @@ -1248,6 +1293,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "dlv-list" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "442039f5147480ba31067cb00ada1adae6892028e40e45fc5de7b7df6dcc1b5f" +dependencies = [ + "const-random", +] + [[package]] name = "dotenv" version = "0.15.0" @@ -1354,7 +1408,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.50", ] [[package]] @@ -1363,17 +1417,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" -[[package]] -name = "errno" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" -dependencies = [ - "errno-dragonfly", - "libc", - "winapi", -] - [[package]] name = "errno" version = "0.3.6" @@ -1384,16 +1427,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "errno-dragonfly" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" -dependencies = [ - "cc", - "libc", -] - [[package]] name = "eth-keystore" version = "0.5.0" @@ -1530,8 +1563,8 @@ dependencies = [ "reqwest", "serde", "serde_json", - "syn 2.0.32", - "toml", + "syn 2.0.50", + "toml 0.7.3", "walkdir", ] @@ -1548,7 +1581,7 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.32", + "syn 2.0.50", ] [[package]] @@ -1573,8 +1606,8 @@ dependencies = [ "rlp", "serde", "serde_json", - "strum", - "syn 2.0.32", + "strum 0.25.0", + "syn 2.0.50", "tempfile", "thiserror", "tiny-keccak", @@ -1783,15 +1816,6 @@ dependencies = [ "miniz_oxide 0.6.2", ] -[[package]] -name = "float-cmp" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98de4bbd547a563b716d8dfa9aad1cb19bfab00f4fa09a6a4ed21dbcf44ce9c4" -dependencies = [ - "num-traits", -] - [[package]] name = "fnv" version = "1.0.7" @@ -1831,9 +1855,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -1846,9 +1870,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -1856,15 +1880,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -1873,9 +1897,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-lite" @@ -1904,26 +1928,26 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.50", ] [[package]] name = "futures-sink" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-timer" @@ -1937,9 +1961,9 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-channel", "futures-core", @@ -1975,14 +1999,14 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.15" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" dependencies = [ "cfg-if", "js-sys", "libc", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "wasm-bindgen", ] @@ -2044,6 +2068,12 @@ dependencies = [ "web-sys", ] +[[package]] +name = "go-parse-duration" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "558b88954871f5e5b2af0e62e2e176c8bde7a6c2c4ed41b13d138d96da2e2cbd" + [[package]] name = "group" version = "0.13.0" @@ -2067,7 +2097,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 2.0.0", + "indexmap 2.2.3", "slab", "tokio", "tokio-util", @@ -2082,18 +2112,18 @@ checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashbrown" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ff8ae62cd3a9102e5637afc8452c55acf3844001bd5374e0b0bd7b6616c038" -dependencies = [ - "ahash", -] +checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" [[package]] name = "hashbrown" -version = "0.14.0" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +dependencies = [ + "ahash", +] [[package]] name = "hashers" @@ -2104,16 +2134,6 @@ dependencies = [ "fxhash", ] -[[package]] -name = "hdrhistogram" -version = "7.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d" -dependencies = [ - "byteorder", - "num-traits", -] - [[package]] name = "heck" version = "0.4.1" @@ -2248,7 +2268,7 @@ dependencies = [ "http", "hyper", "log", - "rustls 0.21.11", + "rustls 0.21.7", "rustls-native-certs", "tokio", "tokio-rustls 0.24.1", @@ -2363,12 +2383,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.0.0" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" dependencies = [ "equivalent", - "hashbrown 0.14.0", + "hashbrown 0.14.3", "serde", ] @@ -2392,13 +2412,12 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "1.0.11" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" +checksum = "1abeb7a0dd0f8181267ff8adc397075586500b81b28a73e8a0208b00fc170fb3" dependencies = [ - "hermit-abi", "libc", - "windows-sys 0.48.0", + "windows-sys 0.45.0", ] [[package]] @@ -2409,9 +2428,9 @@ checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146" [[package]] name = "iri-string" -version = "0.7.2" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f5f6c2df22c009ac44f6f1499308e7a3ac7ba42cd2378475cc691510e1eef1b" +checksum = "9c25163201be6ded9e686703e85532f8f852ea1f92ba625cb3c51f7fe6d07a4a" dependencies = [ "memchr", "serde", @@ -2425,7 +2444,7 @@ checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f" dependencies = [ "hermit-abi", "io-lifetimes", - "rustix 0.37.25", + "rustix 0.37.3", "windows-sys 0.48.0", ] @@ -2447,6 +2466,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.5" @@ -2455,9 +2483,9 @@ checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" [[package]] name = "jobserver" -version = "0.1.31" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" +checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" dependencies = [ "libc", ] @@ -2471,6 +2499,17 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "json5" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96b0db21af676c1ce64250b5f40f3ce2cf27e4e47cb91ed91eb6fe9350b430c1" +dependencies = [ + "pest", + "pest_derive", + "serde", +] + [[package]] name = "jsonrpsee" version = "0.20.3" @@ -2565,7 +2604,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29110019693a4fa2dbda04876499d098fa16d70eba06b1e6e2b3f1b251419515" dependencies = [ "heck", - "proc-macro-crate 1.3.0", + "proc-macro-crate", "proc-macro2", "quote", "syn 1.0.107", @@ -2720,9 +2759,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.155" +version = "0.2.150" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" +checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" [[package]] name = "libloading" @@ -2742,12 +2781,12 @@ checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libproc" -version = "0.13.0" +version = "0.14.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b18cbf29f8ff3542ba22bdce9ac610fcb75d74bb4e2b306b2a2762242025b4f" +checksum = "8eb6497078a4c9c2aca63df56d8dce6eb4381d53a960f781a3a748f7ea97436d" dependencies = [ "bindgen", - "errno 0.2.8", + "errno", "libc", ] @@ -2768,15 +2807,9 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" - -[[package]] -name = "linux-raw-sys" -version = "0.3.8" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" +checksum = "d59d8c75012853d2e872fb56bc8a2e53718e2cafe1a4c823143141c6d90c322f" [[package]] name = "linux-raw-sys" @@ -2850,9 +2883,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.5.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" [[package]] name = "memoffset" @@ -2865,49 +2898,36 @@ dependencies = [ [[package]] name = "metrics" -version = "0.21.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa8ebbd1a9e57bbab77b9facae7f5136aea44c356943bf9a198f647da64285d6" +checksum = "cd71d9db2e4287c3407fa04378b8c2ee570aebe0854431562cdd89ca091854f4" dependencies = [ "ahash", - "metrics-macros", "portable-atomic", ] [[package]] name = "metrics-exporter-prometheus" -version = "0.12.1" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a4964177ddfdab1e3a2b37aec7cf320e14169abb0ed73999f558136409178d5" +checksum = "9bf4e7146e30ad172c42c39b3246864bd2d3c6396780711a1baf749cfe423e21" dependencies = [ "base64 0.21.0", "hyper", - "indexmap 1.9.3", + "indexmap 2.2.3", "ipnet", "metrics", "metrics-util", "quanta", "thiserror", "tokio", - "tracing", -] - -[[package]] -name = "metrics-macros" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.32", ] [[package]] name = "metrics-process" -version = "1.0.12" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c93f6ad342d3f7bc14724147e2dbc6eb6fdbe5a832ace16ea23b73618e8cc17" +checksum = "f69c2a98ebe047f74b834c7eeaad0db5a9fd3604e129721d212e0ef9442e238a" dependencies = [ "libproc", "mach2", @@ -2920,15 +2940,15 @@ dependencies = [ [[package]] name = "metrics-util" -version = "0.15.1" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4de2ed6e491ed114b40b732e4d1659a9d53992ebd87490c44a6ffe23739d973e" +checksum = "ece71ab046dcf45604e573329966ec1db5ff4b81cfa170a924ff4c959ab5451a" dependencies = [ "aho-corasick", "crossbeam-epoch", "crossbeam-utils", - "hashbrown 0.13.1", - "indexmap 1.9.3", + "hashbrown 0.14.3", + "indexmap 2.2.3", "metrics", "num_cpus", "ordered-float", @@ -2945,9 +2965,9 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" -version = "2.0.4" +version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" +checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" dependencies = [ "mime", "unicase", @@ -2984,15 +3004,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "windows-sys 0.48.0", ] [[package]] name = "mockall" -version = "0.11.4" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c84490118f2ee2d74570d114f3d0493cbf02790df303d2707606c3e14e07c96" +checksum = "43766c2b5203b10de348ffe19f7e54564b64f3d6018ff7648d1e2d6d3a0f0a48" dependencies = [ "cfg-if", "downcast", @@ -3005,14 +3025,14 @@ dependencies = [ [[package]] name = "mockall_derive" -version = "0.11.4" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" +checksum = "af7cbce79ec385a1d4f54baa90a76401eb15d9cab93685f62e7e9f942aa00ae2" dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 1.0.107", + "syn 2.0.50", ] [[package]] @@ -3055,12 +3075,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "normalize-line-endings" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" - [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -3114,23 +3128,23 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "683751d591e6d81200c39fb0d1032608b77724f34114db54f571ff1317b337c0" +checksum = "02339744ee7253741199f897151b38e72257d13802d4ee837285cc2990a90845" dependencies = [ "num_enum_derive", ] [[package]] name = "num_enum_derive" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c11e44798ad209ccdd91fc192f0526a369a01234f7373e1b141c96d7cee4f0e" +checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" dependencies = [ - "proc-macro-crate 2.0.2", + "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.50", ] [[package]] @@ -3193,13 +3207,23 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "ordered-float" -version = "3.9.1" +version = "4.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a54938017eacd63036332b4ae5c8a49fc8c0c1d6d629893057e4f13609edd06" +checksum = "a76df7075c7d4d01fdcb46c912dd17fba5b60c78ea480b475f2b6ab6f666584e" dependencies = [ "num-traits", ] +[[package]] +name = "ordered-multimap" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ed8acf08e98e744e5384c8bc63ceb0364e68a6854187221c18df61c4797690e" +dependencies = [ + "dlv-list", + "hashbrown 0.13.2", +] + [[package]] name = "overload" version = "0.1.1" @@ -3226,7 +3250,7 @@ version = "3.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86b26a931f824dd4eca30b3e43bb4f31cd5f0d3a403c5f5ff27106b805bfde7b" dependencies = [ - "proc-macro-crate 1.3.0", + "proc-macro-crate", "proc-macro2", "quote", "syn 1.0.107", @@ -3263,28 +3287,27 @@ dependencies = [ [[package]] name = "parse-display" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f96cc033d72896bb9a2c239a14e1141c3e2eae6d649e7c10ef4e598d66bc86c" +checksum = "06af5f9333eb47bd9ba8462d612e37a8328a5cb80b13f0af4de4c3b89f52dee5" dependencies = [ - "once_cell", "parse-display-derive", "regex", + "regex-syntax 0.8.2", ] [[package]] name = "parse-display-derive" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5587062be441f3d868f7c4c9d13c67f286b03aa679d7f8176ef80bf2ee79e5d" +checksum = "dc9252f259500ee570c75adcc4e317fa6f57a1e47747d622e0bf838002a7b790" dependencies = [ - "once_cell", "proc-macro2", "quote", "regex", - "regex-syntax 0.6.28", + "regex-syntax 0.8.2", "structmeta", - "syn 1.0.107", + "syn 2.0.50", ] [[package]] @@ -3298,12 +3321,24 @@ dependencies = [ "subtle", ] +[[package]] +name = "paste" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" + [[package]] name = "path-slash" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e91099d4268b0e11973f036e885d652fb0b21fedcf69738c627f94db6a44f42" +[[package]] +name = "pathdiff" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" + [[package]] name = "pbkdf2" version = "0.11.0" @@ -3326,12 +3361,6 @@ dependencies = [ "hmac 0.12.1", ] -[[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" - [[package]] name = "pem" version = "1.1.1" @@ -3347,6 +3376,51 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +[[package]] +name = "pest" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f200d8d83c44a45b21764d1916299752ca035d15ecd46faca3e9a2a2bf6ad06" +dependencies = [ + "memchr", + "thiserror", + "ucd-trie", +] + +[[package]] +name = "pest_derive" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcd6ab1236bbdb3a49027e920e693192ebfe8913f6d60e294de57463a493cfde" +dependencies = [ + "pest", + "pest_generator", +] + +[[package]] +name = "pest_generator" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a31940305ffc96863a735bef7c7994a00b325a7138fdbc5bda0f1a0476d3275" +dependencies = [ + "pest", + "pest_meta", + "proc-macro2", + "quote", + "syn 2.0.50", +] + +[[package]] +name = "pest_meta" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7ff62f5259e53b78d1af898941cdcdccfae7385cf7d793a6e55de5d05bb4b7d" +dependencies = [ + "once_cell", + "pest", + "sha2 0.10.8", +] + [[package]] name = "petgraph" version = "0.6.3" @@ -3435,7 +3509,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.50", ] [[package]] @@ -3502,16 +3576,12 @@ checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" [[package]] name = "predicates" -version = "2.1.5" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd" +checksum = "68b87bfd4605926cdfefc1c3b5f8fe560e3feca9d5552cf68c466d3d8236c7e8" dependencies = [ - "difflib", - "float-cmp", - "itertools 0.10.5", - "normalize-line-endings", + "anstyle", "predicates-core", - "regex", ] [[package]] @@ -3537,7 +3607,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.32", + "syn 2.0.50", ] [[package]] @@ -3565,59 +3635,35 @@ dependencies = [ ] [[package]] -name = "proc-macro-crate" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b00f26d3400549137f92511a46ac1cd8ce37cb5598a96d382381458b992a5d24" -dependencies = [ - "toml_datetime 0.6.3", - "toml_edit 0.20.2", -] - -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn 1.0.107", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" +name = "proc-macro2" +version = "1.0.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" dependencies = [ - "proc-macro2", - "quote", - "version_check", + "unicode-ident", ] [[package]] -name = "proc-macro2" -version = "1.0.66" +name = "procfs" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" +checksum = "731e0d9356b0c25f16f33b5be79b1c57b562f141ebfcdb0ad8ac2c13a24293b4" dependencies = [ - "unicode-ident", + "bitflags 2.4.1", + "hex", + "lazy_static", + "procfs-core", + "rustix 0.38.21", ] [[package]] -name = "procfs" -version = "0.15.1" +name = "procfs-core" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "943ca7f9f29bab5844ecd8fdb3992c5969b6622bb9609b9502fef9b4310e3f1f" +checksum = "2d3554923a69f4ce04c4a754260c338f505ce22642d3830e049a399fc2059a29" dependencies = [ - "bitflags 1.3.2", - "byteorder", + "bitflags 2.4.1", "hex", - "lazy_static", - "rustix 0.36.16", ] [[package]] @@ -3626,7 +3672,7 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" dependencies = [ - "bitflags 2.5.0", + "bitflags 2.4.1", "lazy_static", "num-traits", "rand", @@ -3663,7 +3709,7 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.32", + "syn 2.0.50", "tempfile", "which", ] @@ -3678,7 +3724,7 @@ dependencies = [ "itertools 0.11.0", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.50", ] [[package]] @@ -3692,25 +3738,24 @@ dependencies = [ [[package]] name = "quanta" -version = "0.11.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab" +checksum = "9ca0b7bac0b97248c40bb77288fc52029cf1459c0461ea1b05ee32ccf011de2c" dependencies = [ "crossbeam-utils", "libc", - "mach2", "once_cell", "raw-cpuid", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "web-sys", "winapi", ] [[package]] name = "quote" -version = "1.0.33" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" dependencies = [ "proc-macro2", ] @@ -3778,11 +3823,11 @@ dependencies = [ [[package]] name = "raw-cpuid" -version = "10.7.0" +version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" +checksum = "9d86a7c4638d42c44551f4791a20e687dbb4c3de1f33c43dd71e355cd429def1" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.4.1", ] [[package]] @@ -3809,9 +3854,9 @@ dependencies = [ [[package]] name = "redis" -version = "0.21.7" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "152f3863635cbb76b73bc247845781098302c6c9ad2060e1a9a7de56840346b6" +checksum = "c580d9cbbe1d1b479e8d67cf9daf6a62c957e6846048408b80b43ac3f6af84cd" dependencies = [ "async-std", "async-trait", @@ -3822,7 +3867,8 @@ dependencies = [ "percent-encoding", "pin-project-lite", "ryu", - "sha1 0.6.1", + "sha1_smol", + "socket2 0.4.9", "tokio", "tokio-util", "url", @@ -3859,14 +3905,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.3" +version = "1.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" +checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.3.6", - "regex-syntax 0.7.4", + "regex-automata 0.4.5", + "regex-syntax 0.8.2", ] [[package]] @@ -3880,13 +3926,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.6" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" +checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.7.4", + "regex-syntax 0.8.2", ] [[package]] @@ -3930,7 +3976,7 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls 0.21.11", + "rustls 0.21.7", "rustls-pemfile", "serde", "serde_json", @@ -3974,17 +4020,16 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.8" +version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +checksum = "9babe80d5c16becf6594aa32ad2be8fe08498e7ae60b77de8df700e67f191d7e" dependencies = [ "cc", - "cfg-if", "getrandom", "libc", "spin 0.9.8", "untrusted 0.9.0", - "windows-sys 0.52.0", + "windows-sys 0.48.0", ] [[package]] @@ -4027,6 +4072,18 @@ dependencies = [ "syn 1.0.107", ] +[[package]] +name = "ron" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b91f7eff05f748767f183df4320a63d6936e9c6107d97c9e6bdd9784f4289c94" +dependencies = [ + "base64 0.21.0", + "bitflags 2.4.1", + "serde", + "serde_derive", +] + [[package]] name = "route-recognizer" version = "0.3.1" @@ -4035,9 +4092,9 @@ checksum = "afab94fb28594581f62d981211a9a4d53cc8130bbcbbb89a0440d9b8e81a7746" [[package]] name = "rslock" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f28fe01fb772d16186b3484f8ab45cefb258fd49d13c2baeddad999211ccc6d" +checksum = "069fed2ed397dcba536df6105c2b11ded413297746b451876e60297a87939900" dependencies = [ "futures", "rand", @@ -4047,18 +4104,20 @@ dependencies = [ [[package]] name = "rundler" -version = "0.1.0-beta" +version = "0.3.0" dependencies = [ "anyhow", "clap", + "config", "dotenv", "ethers", - "itertools 0.11.0", + "go-parse-duration", + "itertools 0.12.1", "metrics", "metrics-exporter-prometheus", "metrics-process", "metrics-util", - "once_cell", + "paste", "rundler-builder", "rundler-pool", "rundler-provider", @@ -4074,17 +4133,17 @@ dependencies = [ "sscanf", "tokio", "tokio-metrics", - "tokio-rustls 0.24.1", + "tokio-rustls 0.25.0", "tokio-util", "tracing", "tracing-appender", - "tracing-log", + "tracing-log 0.2.0", "tracing-subscriber", ] [[package]] name = "rundler-builder" -version = "0.1.0-beta" +version = "0.3.0" dependencies = [ "anyhow", "async-trait", @@ -4103,7 +4162,6 @@ dependencies = [ "prost", "reqwest", "rslock", - "rundler-pool", "rundler-provider", "rundler-sim", "rundler-task", @@ -4113,6 +4171,7 @@ dependencies = [ "rusoto_kms", "serde", "serde_json", + "strum 0.26.1", "thiserror", "tokio", "tokio-util", @@ -4125,7 +4184,7 @@ dependencies = [ [[package]] name = "rundler-dev" -version = "0.1.0-beta" +version = "0.3.0" dependencies = [ "anyhow", "ethers", @@ -4134,7 +4193,7 @@ dependencies = [ [[package]] name = "rundler-pool" -version = "0.1.0-beta" +version = "0.3.0" dependencies = [ "anyhow", "async-stream", @@ -4142,7 +4201,7 @@ dependencies = [ "ethers", "futures", "futures-util", - "itertools 0.11.0", + "itertools 0.12.1", "metrics", "mockall", "parking_lot", @@ -4153,7 +4212,7 @@ dependencies = [ "rundler-types", "rundler-utils", "serde", - "strum", + "strum 0.26.1", "thiserror", "tokio", "tokio-stream", @@ -4168,22 +4227,28 @@ dependencies = [ [[package]] name = "rundler-provider" -version = "0.1.0-beta" +version = "0.3.0" dependencies = [ "anyhow", "async-trait", + "auto_impl", "ethers", + "metrics", "mockall", + "parse-display", + "reqwest", + "rundler-provider", "rundler-types", "rundler-utils", "serde", "thiserror", "tokio", + "tracing", ] [[package]] name = "rundler-rpc" -version = "0.1.0-beta" +version = "0.3.0" dependencies = [ "anyhow", "async-trait", @@ -4193,15 +4258,13 @@ dependencies = [ "jsonrpsee", "metrics", "mockall", - "rundler-builder", - "rundler-pool", "rundler-provider", "rundler-sim", "rundler-task", "rundler-types", "rundler-utils", "serde", - "strum", + "strum 0.26.1", "thiserror", "tokio", "tokio-util", @@ -4214,14 +4277,14 @@ dependencies = [ [[package]] name = "rundler-sim" -version = "0.1.0-beta" +version = "0.3.0" dependencies = [ "anyhow", "arrayvec", "async-trait", "ethers", "futures-util", - "indexmap 2.0.0", + "indexmap 2.2.3", "mockall", "parse-display", "rand", @@ -4232,7 +4295,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "strum", + "strum 0.26.1", "thiserror", "tokio", "tracing", @@ -4241,7 +4304,7 @@ dependencies = [ [[package]] name = "rundler-task" -version = "0.1.0-beta" +version = "0.3.0" dependencies = [ "anyhow", "async-trait", @@ -4261,7 +4324,7 @@ dependencies = [ [[package]] name = "rundler-tools" -version = "0.1.0-beta" +version = "0.3.0" dependencies = [ "anyhow", "clap", @@ -4278,28 +4341,40 @@ dependencies = [ [[package]] name = "rundler-types" -version = "0.1.0-beta" +version = "0.3.0" dependencies = [ "anyhow", + "async-trait", "chrono", + "const-hex", + "constcat", "ethers", + "futures-util", + "mockall", + "num_enum", "once_cell", "parse-display", + "rand", + "rundler-types", "rundler-utils", "serde", "serde_json", - "strum", + "strum 0.26.1", + "thiserror", ] [[package]] name = "rundler-utils" -version = "0.1.0-beta" +version = "0.3.0" dependencies = [ "anyhow", + "derive_more", "ethers", "futures", + "itertools 0.12.1", "rand", "reqwest", + "schnellru", "tokio", "tracing", "url", @@ -4401,6 +4476,16 @@ dependencies = [ "tokio", ] +[[package]] +name = "rust-ini" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e2a3bcec1f113553ef1c88aae6c020a369d03d55b58de9869a0908930385091" +dependencies = [ + "cfg-if", + "ordered-multimap", +] + [[package]] name = "rustc-demangle" version = "0.1.23" @@ -4430,40 +4515,26 @@ dependencies = [ [[package]] name = "rustix" -version = "0.36.16" +version = "0.37.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6da3636faa25820d8648e0e31c5d519bbb01f72fdf57131f0f5f7da5fed36eab" +checksum = "62b24138615de35e32031d041a09032ef3487a616d901ca4db224e7d557efae2" dependencies = [ "bitflags 1.3.2", - "errno 0.3.6", + "errno", "io-lifetimes", "libc", - "linux-raw-sys 0.1.4", + "linux-raw-sys 0.3.1", "windows-sys 0.45.0", ] -[[package]] -name = "rustix" -version = "0.37.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4eb579851244c2c03e7c24f501c3432bed80b8f720af1d6e5b0e0f01555a035" -dependencies = [ - "bitflags 1.3.2", - "errno 0.3.6", - "io-lifetimes", - "libc", - "linux-raw-sys 0.3.8", - "windows-sys 0.48.0", -] - [[package]] name = "rustix" version = "0.38.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" dependencies = [ - "bitflags 2.5.0", - "errno 0.3.6", + "bitflags 2.4.1", + "errno", "libc", "linux-raw-sys 0.4.11", "windows-sys 0.48.0", @@ -4483,16 +4554,30 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.11" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fecbfb7b1444f477b345853b1fce097a2c6fb637b2bfb87e6bc5db0f043fae4" +checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" dependencies = [ "log", - "ring 0.17.8", - "rustls-webpki", + "ring 0.16.20", + "rustls-webpki 0.101.4", "sct", ] +[[package]] +name = "rustls" +version = "0.22.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" +dependencies = [ + "log", + "ring 0.17.3", + "rustls-pki-types", + "rustls-webpki 0.102.2", + "subtle", + "zeroize", +] + [[package]] name = "rustls-native-certs" version = "0.6.2" @@ -4514,13 +4599,30 @@ dependencies = [ "base64 0.21.0", ] +[[package]] +name = "rustls-pki-types" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "048a63e5b3ac996d78d402940b5fa47973d2d080c6c6fffa1d0f19c4445310b7" + [[package]] name = "rustls-webpki" -version = "0.101.7" +version = "0.101.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +checksum = "7d93931baf2d282fff8d3a532bbfd7653f734643161b87e3e01e59a04439bf0d" dependencies = [ - "ring 0.17.8", + "ring 0.16.20", + "untrusted 0.7.1", +] + +[[package]] +name = "rustls-webpki" +version = "0.102.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610" +dependencies = [ + "ring 0.17.3", + "rustls-pki-types", "untrusted 0.9.0", ] @@ -4572,7 +4674,7 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "303959cf613a6f6efd19ed4b4ad5bf79966a13352716299ad532cfb115f4205c" dependencies = [ - "proc-macro-crate 1.3.0", + "proc-macro-crate", "proc-macro2", "quote", "syn 1.0.107", @@ -4587,6 +4689,17 @@ dependencies = [ "windows-sys 0.42.0", ] +[[package]] +name = "schnellru" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "772575a524feeb803e5b0fcbc6dd9f367e579488197c94c6e4023aad2305774d" +dependencies = [ + "ahash", + "cfg-if", + "hashbrown 0.13.2", +] + [[package]] name = "scopeguard" version = "1.1.0" @@ -4637,11 +4750,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.11.0" +version = "2.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" +checksum = "a332be01508d814fed64bf28f798a146d73792121129962fdf335bb3c49a4254" dependencies = [ - "bitflags 2.5.0", + "bitflags 1.3.2", "core-foundation", "core-foundation-sys", "libc", @@ -4650,9 +4763,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.11.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" +checksum = "31c9bb296072e961fcbd8853511dd39c2d8be2deb1e17c6860b1d30732b323b4" dependencies = [ "core-foundation-sys", "libc", @@ -4696,7 +4809,7 @@ checksum = "291a097c63d8497e00160b166a967a4a79c64f3facdd01cbd7502231688d77df" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.50", ] [[package]] @@ -4712,9 +4825,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.1" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0efd8caf556a6cebd3b285caf480045fcc1ac04f6bd786b09a6f11af30c4fcf4" +checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" dependencies = [ "serde", ] @@ -4741,11 +4854,11 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.0.0", + "indexmap 2.2.3", "serde", "serde_json", "serde_with_macros", - "time", + "time 0.3.20", ] [[package]] @@ -4757,7 +4870,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.50", ] [[package]] @@ -4773,15 +4886,6 @@ dependencies = [ "opaque-debug", ] -[[package]] -name = "sha1" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1da05c97445caa12d05e848c4a4fcbbea29e748ac28f7e80e9b010392063770" -dependencies = [ - "sha1_smol", -] - [[package]] name = "sha1" version = "0.10.5" @@ -4876,7 +4980,7 @@ dependencies = [ "num-bigint", "num-traits", "thiserror", - "time", + "time 0.3.20", ] [[package]] @@ -4902,9 +5006,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.13.2" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" +checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "socket2" @@ -4918,9 +5022,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e" +checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" dependencies = [ "libc", "windows-sys 0.48.0", @@ -4996,12 +5100,12 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84955aa74a157e5834d58a07be11af7f0ab923f0194a0bb2ea6b3db8b5d1611d" dependencies = [ - "convert_case", + "convert_case 0.6.0", "proc-macro2", "quote", "regex-syntax 0.6.28", "strsim", - "syn 2.0.32", + "syn 2.0.50", "unicode-width", ] @@ -5032,25 +5136,25 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "structmeta" -version = "0.1.5" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bd9c2155aa89fb2c2cb87d99a610c689e7c47099b3e9f1c8a8f53faf4e3d2e3" +checksum = "2e1575d8d40908d70f6fd05537266b90ae71b15dbbe7a8b7dffa2b759306d329" dependencies = [ "proc-macro2", "quote", "structmeta-derive", - "syn 1.0.107", + "syn 2.0.50", ] [[package]] name = "structmeta-derive" -version = "0.1.5" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bafede0d0a2f21910f36d47b1558caae3076ed80f6f3ad0fc85a91e6ba7e5938" +checksum = "152a0b65a590ff6c3da95cabe2353ee04e6167c896b28e3b14478c2636c922fc" dependencies = [ "proc-macro2", "quote", - "syn 1.0.107", + "syn 2.0.50", ] [[package]] @@ -5059,7 +5163,16 @@ version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" dependencies = [ - "strum_macros", + "strum_macros 0.25.2", +] + +[[package]] +name = "strum" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "723b93e8addf9aa965ebe2d11da6d7540fa2283fcea14b3371ff055f7ba13f5f" +dependencies = [ + "strum_macros 0.26.1", ] [[package]] @@ -5072,7 +5185,20 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.32", + "syn 2.0.50", +] + +[[package]] +name = "strum_macros" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a3417fc93d76740d974a01654a09777cb500428cc874ca9f45edfe0c4d4cd18" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.50", ] [[package]] @@ -5114,9 +5240,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.32" +version = "2.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239814284fd6f1a4ffe4ca893952cdd93c224b6a1571c9a9eadd670295c0c9e2" +checksum = "74f1bdc9872430ce9b75da68329d1c1746faf50ffac5f19e02b71e37ff881ffb" dependencies = [ "proc-macro2", "quote", @@ -5212,7 +5338,7 @@ checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.50", ] [[package]] @@ -5225,6 +5351,17 @@ dependencies = [ "once_cell", ] +[[package]] +name = "time" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" +dependencies = [ + "libc", + "wasi 0.10.0+wasi-snapshot-preview1", + "winapi", +] + [[package]] name = "time" version = "0.3.20" @@ -5278,9 +5415,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.33.0" +version = "1.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f38200e3ef7995e5ef13baec2f432a6da0aa9ac495b2c0e8f3b7eec2c92d653" +checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" dependencies = [ "backtrace", "bytes", @@ -5289,7 +5426,7 @@ dependencies = [ "num_cpus", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.4", + "socket2 0.5.5", "tokio-macros", "windows-sys 0.48.0", ] @@ -5306,13 +5443,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 2.0.50", ] [[package]] @@ -5344,7 +5481,18 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls 0.21.11", + "rustls 0.21.7", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +dependencies = [ + "rustls 0.22.4", + "rustls-pki-types", "tokio", ] @@ -5368,7 +5516,7 @@ checksum = "212d5dcb2a1ce06d81107c3d0ffa3121fe974b73f068c8282cb1c32328113b6c" dependencies = [ "futures-util", "log", - "rustls 0.21.11", + "rustls 0.21.7", "tokio", "tokio-rustls 0.24.1", "tungstenite", @@ -5398,10 +5546,22 @@ checksum = "b403acf6f2bb0859c93c7f0d967cb4a75a7ac552100f9322faf64dc047669b21" dependencies = [ "serde", "serde_spanned", - "toml_datetime 0.6.3", + "toml_datetime 0.6.5", "toml_edit 0.19.8", ] +[[package]] +name = "toml" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a9aad4a3066010876e8dcf5a8a06e70a558751117a145c6ce2b82c2e2054290" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime 0.6.5", + "toml_edit 0.22.5", +] + [[package]] name = "toml_datetime" version = "0.5.1" @@ -5410,9 +5570,9 @@ checksum = "4553f467ac8e3d374bc9a177a26801e5d0f9b211aa1673fb137a403afd1c9cf5" [[package]] name = "toml_datetime" -version = "0.6.3" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" +checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" dependencies = [ "serde", ] @@ -5437,26 +5597,28 @@ dependencies = [ "indexmap 1.9.3", "serde", "serde_spanned", - "toml_datetime 0.6.3", + "toml_datetime 0.6.5", "winnow 0.4.1", ] [[package]] name = "toml_edit" -version = "0.20.2" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" +checksum = "99e68c159e8f5ba8a28c4eb7b0c0c190d77bb479047ca713270048145a9ad28a" dependencies = [ - "indexmap 2.0.0", - "toml_datetime 0.6.3", - "winnow 0.5.40", + "indexmap 2.2.3", + "serde", + "serde_spanned", + "toml_datetime 0.6.5", + "winnow 0.6.0", ] [[package]] name = "tonic" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5469afaf78a11265c343a88969045c1568aa8ecc6c787dbf756e92e70f199861" +checksum = "76c4eb7a4e9ef9d4763600161f12f5070b92a578e1b634db88a6887844c91a13" dependencies = [ "async-stream", "async-trait", @@ -5481,22 +5643,22 @@ dependencies = [ [[package]] name = "tonic-build" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b477abbe1d18c0b08f56cd01d1bc288668c5b5cfd19b2ae1886bbf599c546f1" +checksum = "be4ef6dd70a610078cb4e338a0f79d06bc759ff1b22d2120c2ff02ae264ba9c2" dependencies = [ "prettyplease", "proc-macro2", "prost-build", "quote", - "syn 2.0.32", + "syn 2.0.50", ] [[package]] name = "tonic-health" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d387f8ad56024274c8d0350f0997dd2dc80f501bcfd27f1df8fd7e558109894c" +checksum = "2cef6e24bc96871001a7e48e820ab240b3de2201e59b517cf52835df2f1d2350" dependencies = [ "async-stream", "prost", @@ -5507,9 +5669,9 @@ dependencies = [ [[package]] name = "tonic-reflection" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e61add39c1426d5f21eae2cc196e97e1f5a5ea7bcf491df3885797992a86eb" +checksum = "548c227bd5c0fae5925812c4ec6c66ffcfced23ea370cb823f4d18f0fc1cb6a7" dependencies = [ "prost", "prost-types", @@ -5526,7 +5688,6 @@ checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", - "hdrhistogram", "indexmap 1.9.3", "pin-project", "pin-project-lite", @@ -5547,7 +5708,7 @@ checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" dependencies = [ "async-compression", "base64 0.21.0", - "bitflags 2.5.0", + "bitflags 2.4.1", "bytes", "futures-core", "futures-util", @@ -5566,7 +5727,7 @@ dependencies = [ "tower-layer", "tower-service", "tracing", - "uuid 1.8.0", + "uuid 1.10.0", ] [[package]] @@ -5601,7 +5762,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09d48f71a791638519505cefafe162606f706c25592e4bde4d97600c0195312e" dependencies = [ "crossbeam-channel", - "time", + "time 0.3.20", "tracing-subscriber", ] @@ -5647,6 +5808,17 @@ dependencies = [ "tracing-core", ] +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + [[package]] name = "tracing-serde" version = "0.1.3" @@ -5674,7 +5846,7 @@ dependencies = [ "thread_local", "tracing", "tracing-core", - "tracing-log", + "tracing-log 0.1.3", "tracing-serde", ] @@ -5697,8 +5869,8 @@ dependencies = [ "httparse", "log", "rand", - "rustls 0.21.11", - "sha1 0.10.5", + "rustls 0.21.7", + "sha1", "thiserror", "url", "utf-8", @@ -5710,6 +5882,12 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" +[[package]] +name = "ucd-trie" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" + [[package]] name = "uint" version = "0.9.5" @@ -5823,9 +6001,9 @@ dependencies = [ [[package]] name = "uuid" -version = "1.8.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" +checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" dependencies = [ "getrandom", ] @@ -5879,6 +6057,12 @@ dependencies = [ "try-lock", ] +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" + [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -5887,9 +6071,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.92" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +checksum = "31f8dcbc21f30d9b8f2ea926ecb58f6b91192c17e9d33594b3df58b2007ca53b" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -5897,16 +6081,16 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.92" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +checksum = "95ce90fd5bcc06af55a641a86428ee4229e44e07033963a2290a8e241607ccb9" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.32", + "syn 1.0.107", "wasm-bindgen-shared", ] @@ -5924,9 +6108,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.92" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +checksum = "4c21f77c0bedc37fd5dc21f897894a5ca01e7bb159884559461862ae90c0b4c5" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -5934,22 +6118,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.92" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +checksum = "2aff81306fcac3c7515ad4e177f521b5c9a15f2b08f4e32d823066102f35a5f6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.32", + "syn 1.0.107", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.92" +version = "0.2.84" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" +checksum = "0046fef7e28c3804e5e38bfa31ea2a0f73905319b677e57ebe37e49358989b5d" [[package]] name = "web-sys" @@ -6021,21 +6205,31 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" -version = "0.51.1" +version = "0.54.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca229916c5ee38c2f2bc1e9d8f04df975b4bd93f9955dc69fabb5d91270045c9" +checksum = "9252e5725dbed82865af151df558e754e4a3c2c30818359eb17465f1346a1b49" dependencies = [ "windows-core", - "windows-targets 0.48.5", + "windows-targets 0.52.5", ] [[package]] name = "windows-core" -version = "0.51.1" +version = "0.54.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" +checksum = "12661b9c89351d684a50a8a643ce5f608e20243b9fb84687800163429f161d65" dependencies = [ - "windows-targets 0.48.5", + "windows-result", + "windows-targets 0.52.5", +] + +[[package]] +name = "windows-result" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "749f0da9cc72d82e600d8d2e44cadd0b9eedb9038f71a1c58556ac1c5791813b" +dependencies = [ + "windows-targets 0.52.5", ] [[package]] @@ -6071,15 +6265,6 @@ dependencies = [ "windows-targets 0.48.5", ] -[[package]] -name = "windows-sys" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" -dependencies = [ - "windows-targets 0.52.6", -] - [[package]] name = "windows-targets" version = "0.42.1" @@ -6112,18 +6297,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" dependencies = [ - "windows_aarch64_gnullvm 0.52.6", - "windows_aarch64_msvc 0.52.6", - "windows_i686_gnu 0.52.6", + "windows_aarch64_gnullvm 0.52.5", + "windows_aarch64_msvc 0.52.5", + "windows_i686_gnu 0.52.5", "windows_i686_gnullvm", - "windows_i686_msvc 0.52.6", - "windows_x86_64_gnu 0.52.6", - "windows_x86_64_gnullvm 0.52.6", - "windows_x86_64_msvc 0.52.6", + "windows_i686_msvc 0.52.5", + "windows_x86_64_gnu 0.52.5", + "windows_x86_64_gnullvm 0.52.5", + "windows_x86_64_msvc 0.52.5", ] [[package]] @@ -6140,9 +6325,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" [[package]] name = "windows_aarch64_msvc" @@ -6158,9 +6343,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" [[package]] name = "windows_i686_gnu" @@ -6176,15 +6361,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" [[package]] name = "windows_i686_gnullvm" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" [[package]] name = "windows_i686_msvc" @@ -6200,9 +6385,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" [[package]] name = "windows_x86_64_gnu" @@ -6218,9 +6403,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" [[package]] name = "windows_x86_64_gnullvm" @@ -6236,9 +6421,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" [[package]] name = "windows_x86_64_msvc" @@ -6254,9 +6439,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" [[package]] name = "winnow" @@ -6269,9 +6454,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.5.40" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" +checksum = "6b1dbce9e90e5404c5a52ed82b1d13fc8cfbdad85033b6f57546ffd1265f8451" dependencies = [ "memchr", ] @@ -6320,17 +6505,46 @@ version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52839dc911083a8ef63efa4d039d1f58b5e409f923e44c80828f206f66e5541c" +[[package]] +name = "yaml-rust" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" +dependencies = [ + "linked-hash-map", +] + [[package]] name = "yansi" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" +[[package]] +name = "zerocopy" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.50", +] + [[package]] name = "zeroize" -version = "1.8.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" [[package]] name = "zip" @@ -6347,8 +6561,8 @@ dependencies = [ "flate2", "hmac 0.12.1", "pbkdf2 0.11.0", - "sha1 0.10.5", - "time", + "sha1", + "time 0.3.20", "zstd 0.11.2+zstd.1.5.2", ] diff --git a/Cargo.toml b/Cargo.toml index afd66e79..533f1e67 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,9 +7,9 @@ default-members = ["bin/rundler"] resolver = "2" [workspace.package] -version = "0.1.0-beta" +version = "0.3.0" edition = "2021" -rust-version = "1.75" +rust-version = "1.79" license = "MIT OR Apache-2.0" repository = "https://github.com/alchemyplatform/rundler" @@ -20,28 +20,29 @@ cargo-husky = { version = "1", default-features = false, features = ["user-hooks ethers = "2.0.10" futures = "0.3.28" futures-util = "0.3.28" +itertools = "0.12.1" jsonrpsee = "0.20.3" -metrics = "0.21.0" -mockall = "0.11.4" -parse-display = "0.8.0" +metrics = "0.22.1" +mockall = "0.12.1" +parse-display = "0.9.0" pin-project = "1.0.12" prost = "0.12.0" serde = "1.0.160" serde_json = "1.0.64" rand = "0.8.5" reqwest = { version = "0.11.18", default-features = false, features = ["rustls-tls"] } -rustls = "0.21.7" +rustls = "0.22.4" thiserror = "1.0.40" tokio = { version = "1.27.0", default-features = false } tokio-util = "0.7.8" -tonic = "0.10.0" -tonic-build = "0.10.0" -tonic-health = "0.10.0" -tonic-reflection = "0.10.0" -tonic-types = "0.10.0" -tower = { version = "0.4.13", features = ["full"] } +tonic = "0.11.0" +tonic-build = "0.11.0" +tonic-health = "0.11.0" +tonic-reflection = "0.11.0" +tonic-types = "0.11.0" +tower = "0.4.13" tracing = "0.1.37" -strum = "0.25.0" +strum = { version = "0.26.1", features = ["derive"] } url = "2.3.1" tower-http = { version = "0.4.0", features = ["full"] } -hyper = "0.14.20" \ No newline at end of file +hyper = "0.14.20" diff --git a/Cross.toml b/Cross.toml new file mode 100644 index 00000000..a69f5eca --- /dev/null +++ b/Cross.toml @@ -0,0 +1,2 @@ +[build] +dockerfile = "./Dockerfile.build" diff --git a/Dockerfile b/Dockerfile index 177fb5e2..f257e999 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,7 @@ # Adapted from https://github.com/paradigmxyz/reth/blob/main/Dockerfile # syntax=docker/dockerfile:1.4 -FROM rust:1.75.0 AS chef-builder +FROM --platform=$TARGETPLATFORM rust:1.79.0 AS chef-builder # Install system dependencies RUN curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - && echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list @@ -50,8 +50,8 @@ WORKDIR /app # install curl for healthcheck RUN apt-get -y update; apt-get -y install curl -# Copy reth over from the build stage +# Copy rundler over from the build stage COPY --from=builder /app/target/release/rundler /usr/local/bin EXPOSE 3000 8080 -CMD ["/usr/local/bin/rundler", "node"] +ENTRYPOINT ["/usr/local/bin/rundler"] diff --git a/Dockerfile.build b/Dockerfile.build new file mode 100644 index 00000000..2324e2af --- /dev/null +++ b/Dockerfile.build @@ -0,0 +1,22 @@ +ARG CROSS_BASE_IMAGE + +FROM ghcr.io/foundry-rs/foundry:nightly-ac802618e15039b31e464ae6d1fe3ee39f87cefd as foundry + +FROM $CROSS_BASE_IMAGE +COPY --from=foundry /usr/local/bin/forge /usr/local/bin/forge + +# Install Node.js 14.x and Yarn +RUN apt-get update \ + && apt-get install -y curl \ + && curl -fsSL https://deb.nodesource.com/setup_14.x | bash - \ + && apt-get install -y nodejs \ + && npm install -g yarn \ + && apt-get clean + +RUN apt-get update && apt-get install -y unzip + +# Install Protobuf compiler v3 +RUN curl -LO https://github.com/protocolbuffers/protobuf/releases/download/v3.15.8/protoc-3.15.8-linux-x86_64.zip \ + && unzip protoc-3.15.8-linux-x86_64.zip -d /usr/local \ + && chmod +x /usr/local/bin/protoc + diff --git a/Dockerfile.cross b/Dockerfile.cross new file mode 100644 index 00000000..1b2aa672 --- /dev/null +++ b/Dockerfile.cross @@ -0,0 +1,15 @@ +# This image is meant to enable cross-architecture builds. +# It assumes the rundler binary has already been compiled for `$TARGETPLATFORM` and is +# locatable in `./dist/bin/$TARGETARCH` +FROM --platform=$TARGETPLATFORM ubuntu:22.04 + +LABEL org.opencontainers.image.source=https://github.com/alchemyplatform/rundler +LABEL org.opencontainers.image.licenses="GNU Lesser General Public License v3.0" + +# Filled by docker buildx +ARG TARGETARCH + +COPY ./dist/bin/$TARGETARCH/rundler /usr/local/bin/rundler + +EXPOSE 3000 8080 +ENTRYPOINT ["/usr/local/bin/rundler"] diff --git a/Makefile b/Makefile index 75357566..bb08aa7c 100644 --- a/Makefile +++ b/Makefile @@ -3,6 +3,11 @@ ##@ Test UNIT_TEST_ARGS := --locked --workspace --all-features +PROFILE ?= release +DOCKER_IMAGE_NAME ?= alchemyplatform/rundler +BIN_DIR = "dist/bin" +BUILD_PATH = "target" +GIT_TAG ?= $(shell git describe --tags --abbrev=0) .PHONY: build build: ## Build the project. @@ -23,13 +28,67 @@ test-unit: ## Run unit tests. .PHONY: test-spec-integrated test-spec-integrated: ## Run spec tests in integrated mode - test/spec-tests/local/run-spec-tests.sh + $(MAKE) test-spec-integrated-v0_6 + $(MAKE) test-spec-integrated-v0_7 + +.PHONY: test-spec-integrated-v0_6 +test-spec-integrated-v0_6: ## Run v0.6 spec tests in integrated mode + test/spec-tests/local/run-spec-tests-v0_6.sh + +.PHONY: test-spec-integrated-v0_7 +test-spec-integrated-v0_7: ## Run v0.7 spec tests in integrated mode + test/spec-tests/local/run-spec-tests-v0_7.sh .PHONY: test-spec-modular test-spec-modular: ## Run spec tests in modular mode - test/spec-tests/remote/run-spec-tests.sh + $(MAKE) test-spec-modular-v0_6 + $(MAKE) test-spec-modular-v0_7 + +.PHONY: test-spec-modular-v0_6 +test-spec-modular-v0_6: ## Run v0.6 spec tests in modular mode + test/spec-tests/remote/run-spec-tests-v0_6.sh + +.PHONY: test-spec-modular-v0_7 +test-spec-modular-v0_7: ## Run v0.7 spec tests in modular mode + test/spec-tests/remote/run-spec-tests-v0_7.sh + +.PHONY: submodule-update +submodule-update: ## Update git submodules + git submodule update + +build-%: + cross build --target $* --profile "$(PROFILE)" .PHONY: fmt fmt: ## format code with nightly rust cargo +nightly fmt +# Note: This requires a buildx builder with emulation support. For example: +# +# `docker run --privileged --rm tonistiigi/binfmt --install amd64,arm64` +# `docker buildx create --use --driver docker-container --name cross-builder` +.PHONY: docker-build-latest +docker-build-latest: ## Build and push a cross-arch Docker image tagged with the latest git tag and `latest`. + $(call build_docker_image,$(GIT_TAG),latest) + +.PHONY: docker-build +docker-build: ## Build and push a cross-arch Docker image + $(call build_docker_image,$(GIT_TAG)) + +# Create a cross-arch Docker image with the given tags and push it +define build_docker_image + $(MAKE) build-aarch64-unknown-linux-gnu + mkdir -p $(BIN_DIR)/arm64 + cp $(BUILD_PATH)/aarch64-unknown-linux-gnu/$(PROFILE)/rundler $(BIN_DIR)/arm64/rundler + + $(MAKE) build-x86_64-unknown-linux-gnu + mkdir -p $(BIN_DIR)/amd64 + cp $(BUILD_PATH)/x86_64-unknown-linux-gnu/$(PROFILE)/rundler $(BIN_DIR)/amd64/rundler + + docker buildx build --file ./Dockerfile.cross . \ + --platform linux/arm64,linux/amd64 \ + --tag $(DOCKER_IMAGE_NAME):$(1) \ + $(if $(2),--tag $(DOCKER_IMAGE_NAME):$(2)) \ + --provenance=false --push +endef + diff --git a/README.md b/README.md index beb0abe3..ada04ed3 100644 --- a/README.md +++ b/README.md @@ -41,18 +41,25 @@ The documentation is work in progress, and we are working to improve it. Please **Use in production at your own risk.** -### ERC-4337 Support +### ERC-4337 Entry Point Version Support -Rundler currently supports the [v0.6 release](https://github.com/eth-infinitism/account-abstraction/tree/v0.6.0) of Entry Point contract. +Rundler currently supports the following Entry Point versions: + * [v0.6.0](https://github.com/eth-infinitism/account-abstraction/tree/v0.6.0) + * [v0.7.0](https://github.com/eth-infinitism/account-abstraction/tree/v0.7.0) + +See more on Entry Point support [here](docs/architecture/entry_point.md). ### Chain Support -Rundler currently supports and has been tested on the following networks and their testnets: +Rundler has been tested on the following networks and their testnets: * Ethereum -* Optimism -* Base -* Arbitrum One +* OP Stack + * Generally any OP stack chain should work. + * Rundler has been explicitly tested on Optimism, Base, Zora, and Frax. +* Arbitrum Orbit + * Generally any Arbitrum Orbit chain should work. + * Rundler has been explicitly tested on Arbitrum One. * Polygon POS ## Developers @@ -85,7 +92,11 @@ make test-unit Run ERC-4337 spec tests: ``` -cd test/spec-tests/bundler-spec-tests && pdm install && pdm run update-deps +# Only need to run once to install testing frameworks +cd test/spec-tests/v0_6/bundler-spec-tests && pdm install && pdm run update-deps +cd test/spec-tests/v0_7/bundler-spec-tests && pdm install && pdm run update-deps + +# Run the v0.6 and v0.7 tests make test-spec-integrated ``` diff --git a/bin/rundler/Cargo.toml b/bin/rundler/Cargo.toml index db6ca3c5..5fc29f88 100644 --- a/bin/rundler/Cargo.toml +++ b/bin/rundler/Cargo.toml @@ -21,14 +21,17 @@ rundler-utils = { path = "../../crates/utils" } # CLI dependencies anyhow.workspace = true +config = "0.14.0" clap = { version = "4.4.4", features = ["derive", "env"] } dotenv = "0.15.0" ethers.workspace = true -itertools = "0.11.0" -metrics = "0.21.0" -metrics-exporter-prometheus = "0.12.0" -metrics-process = "1.0.10" -metrics-util = "0.15.0" +itertools = "0.12.1" +metrics = "0.22.1" +go-parse-duration = "0.1" +metrics-exporter-prometheus = { version = "0.13.1", default-features = false, features = ["http-listener"] } +metrics-process = "1.2.1" +metrics-util = "0.16.2" +paste = "1.0" rusoto_core = { version = "0.48.0", default-features = false, features = ["rustls"] } rusoto_s3 = { version = "0.48.0", default-features = false, features = ["rustls"] } serde.workspace = true @@ -36,10 +39,10 @@ serde_json.workspace = true sscanf = "0.4.0" tokio = { workspace = true, features = ["macros", "rt-multi-thread", "signal", "sync"] } tokio-metrics = "0.3.1" -tokio-rustls = "0.24.1" +tokio-rustls = "0.25.0" tokio-util = "0.7.8" tracing.workspace = true tracing-appender = "0.2.2" -tracing-log = "0.1.3" +tracing-log = "0.2.0" tracing-subscriber = { version = "0.3.16", features = ["env-filter", "fmt", "json"] } -once_cell = "1.19.0" + diff --git a/bin/rundler/chain_specs/arbitrum.toml b/bin/rundler/chain_specs/arbitrum.toml new file mode 100644 index 00000000..116d07fe --- /dev/null +++ b/bin/rundler/chain_specs/arbitrum.toml @@ -0,0 +1,9 @@ +name = "Arbitrum" +id = 42161 + +calldata_pre_verification_gas = true +l1_gas_oracle_contract_type = "ARBITRUM_NITRO" +l1_gas_oracle_contract_address = "0x00000000000000000000000000000000000000C8" + +supports_eip1559 = false +max_transaction_size_bytes = 95000 diff --git a/bin/rundler/chain_specs/arbitrum_sepolia.toml b/bin/rundler/chain_specs/arbitrum_sepolia.toml new file mode 100644 index 00000000..91a51acb --- /dev/null +++ b/bin/rundler/chain_specs/arbitrum_sepolia.toml @@ -0,0 +1,4 @@ +base = "arbitrum" + +name = "Arbitrum Sepolia" +id = 421614 diff --git a/bin/rundler/chain_specs/avax.toml b/bin/rundler/chain_specs/avax.toml new file mode 100644 index 00000000..f8b1001a --- /dev/null +++ b/bin/rundler/chain_specs/avax.toml @@ -0,0 +1,7 @@ +name = "Avax" +id = 43114 + +# Intrinsic cost + overhead of non-reentry storage without refund +transaction_intrinsic_gas = "0x5DC0" # 24_000 +# Extra cost of a deploy without refunds +per_user_op_deploy_overhead_gas = "0x4E20" # 20_000 diff --git a/bin/rundler/chain_specs/avax_fuji.toml b/bin/rundler/chain_specs/avax_fuji.toml new file mode 100644 index 00000000..ac1a0c81 --- /dev/null +++ b/bin/rundler/chain_specs/avax_fuji.toml @@ -0,0 +1,4 @@ +base = "avax" + +name = "Avax-Fuji" +id = 43113 diff --git a/bin/rundler/chain_specs/base.toml b/bin/rundler/chain_specs/base.toml new file mode 100644 index 00000000..1b0d673f --- /dev/null +++ b/bin/rundler/chain_specs/base.toml @@ -0,0 +1,13 @@ +name = "Base" +id = 8453 + +calldata_pre_verification_gas = true +l1_gas_oracle_contract_type = "OPTIMISM_BEDROCK" +l1_gas_oracle_contract_address = "0x420000000000000000000000000000000000000F" +include_l1_gas_in_gas_limit = false + +max_transaction_size_bytes = 130000 + +priority_fee_oracle_type = "USAGE_BASED" +min_max_priority_fee_per_gas = "0x0F4240" # 1_000_000 +congestion_trigger_usage_ratio_threshold = 0.25 diff --git a/bin/rundler/chain_specs/base_sepolia.toml b/bin/rundler/chain_specs/base_sepolia.toml new file mode 100644 index 00000000..0e8d2daa --- /dev/null +++ b/bin/rundler/chain_specs/base_sepolia.toml @@ -0,0 +1,7 @@ +base = "base" + +name = "Base Sepolia" +id = 84532 + +min_max_priority_fee_per_gas = "0x0186A0" # 100_000 +congestion_trigger_usage_ratio_threshold = 0.55 diff --git a/bin/rundler/chain_specs/boba_sepolia.toml b/bin/rundler/chain_specs/boba_sepolia.toml new file mode 100644 index 00000000..5837061d --- /dev/null +++ b/bin/rundler/chain_specs/boba_sepolia.toml @@ -0,0 +1,6 @@ +base = "optimism" + +name = "Boba Sepolia" +id = 28882 + +min_max_priority_fee_per_gas = "0x0F4240" # 1_000_000 diff --git a/bin/rundler/chain_specs/dev.toml b/bin/rundler/chain_specs/dev.toml new file mode 100644 index 00000000..fb9bdb59 --- /dev/null +++ b/bin/rundler/chain_specs/dev.toml @@ -0,0 +1,6 @@ +base = "ethereum" + +name = "Ethereum Devnet" +id = 1337 + +bundle_max_send_interval_millis = 250 diff --git a/bin/rundler/chain_specs/ethereum.toml b/bin/rundler/chain_specs/ethereum.toml new file mode 100644 index 00000000..1ebb4f23 --- /dev/null +++ b/bin/rundler/chain_specs/ethereum.toml @@ -0,0 +1,10 @@ +name = "Ethereum" +id = 1 + +flashbots_enabled = true +flashbots_relay_url = "https://relay.flashbots.net" +flashbots_status_url = "https://protect.flashbots.net/tx/" + +max_bundle_size_bytes = 131072 + +min_max_priority_fee_per_gas = "0x2FAF080" # 0.05 GWEI diff --git a/bin/rundler/chain_specs/ethereum_sepolia.toml b/bin/rundler/chain_specs/ethereum_sepolia.toml new file mode 100644 index 00000000..85373c53 --- /dev/null +++ b/bin/rundler/chain_specs/ethereum_sepolia.toml @@ -0,0 +1,9 @@ +base = "ethereum" + +name = "Ethereum Sepolia" +id = 11155111 + +flashbots_relay_url = "https://relay-sepolia.flashbots.net" +flashbots_status_url = "https://protect-sepolia.flashbots.net/tx/" + +min_max_priority_fee_per_gas = "0x3B9ACA00" # 0.1 GWEI diff --git a/bin/rundler/chain_specs/optimism.toml b/bin/rundler/chain_specs/optimism.toml new file mode 100644 index 00000000..23a9608c --- /dev/null +++ b/bin/rundler/chain_specs/optimism.toml @@ -0,0 +1,11 @@ +name = "Optimism" +id = 10 + +calldata_pre_verification_gas = true +l1_gas_oracle_contract_type = "OPTIMISM_BEDROCK" +l1_gas_oracle_contract_address = "0x420000000000000000000000000000000000000F" +include_l1_gas_in_gas_limit = false + +priority_fee_oracle_type = "USAGE_BASED" +min_max_priority_fee_per_gas = "0x0186A0" # 100_000 +max_transaction_size_bytes = 90000 diff --git a/bin/rundler/chain_specs/optimism_devnet.toml b/bin/rundler/chain_specs/optimism_devnet.toml new file mode 100644 index 00000000..5eb3b1dc --- /dev/null +++ b/bin/rundler/chain_specs/optimism_devnet.toml @@ -0,0 +1,6 @@ +base = "optimism" + +name = "Boba/Optimism Local Devnet" +id = 901 + +min_max_priority_fee_per_gas = "0x0F4240" # 1_000_000 diff --git a/bin/rundler/chain_specs/optimism_sepolia.toml b/bin/rundler/chain_specs/optimism_sepolia.toml new file mode 100644 index 00000000..d806657a --- /dev/null +++ b/bin/rundler/chain_specs/optimism_sepolia.toml @@ -0,0 +1,4 @@ +base = "optimism" + +name = "Optimism Sepolia" +id = 11155420 diff --git a/bin/rundler/chain_specs/polygon.toml b/bin/rundler/chain_specs/polygon.toml new file mode 100644 index 00000000..84c63610 --- /dev/null +++ b/bin/rundler/chain_specs/polygon.toml @@ -0,0 +1,7 @@ +name = "Polygon" +id = 137 + +priority_fee_oracle_type = "USAGE_BASED" +min_max_priority_fee_per_gas = "0x06FC23AC00" # 30_000_000_000 +bloxroute_enabled = true +max_transaction_size_bytes = 130000 diff --git a/bin/rundler/chain_specs/polygon_amoy.toml b/bin/rundler/chain_specs/polygon_amoy.toml new file mode 100644 index 00000000..f56d019e --- /dev/null +++ b/bin/rundler/chain_specs/polygon_amoy.toml @@ -0,0 +1,4 @@ +base = "polygon" + +name = "Polygon Amoy" +id = 80002 diff --git a/bin/rundler/src/cli/builder.rs b/bin/rundler/src/cli/builder.rs index 542ddc8f..e743399b 100644 --- a/bin/rundler/src/cli/builder.rs +++ b/bin/rundler/src/cli/builder.rs @@ -11,21 +11,22 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -use std::{collections::HashMap, net::SocketAddr, time::Duration}; +use std::net::SocketAddr; -use anyhow::Context; +use anyhow::{bail, Context}; use clap::Args; -use ethers::types::H256; use rundler_builder::{ - self, BuilderEvent, BuilderEventKind, BuilderTask, BuilderTaskArgs, LocalBuilderBuilder, - TransactionSenderType, + self, BloxrouteSenderArgs, BuilderEvent, BuilderEventKind, BuilderTask, BuilderTaskArgs, + EntryPointBuilderSettings, FlashbotsSenderArgs, LocalBuilderBuilder, RawSenderArgs, + TransactionSenderArgs, TransactionSenderKind, }; use rundler_pool::RemotePoolClient; -use rundler_sim::{MempoolConfig, PriorityFeeMode}; +use rundler_sim::{MempoolConfigs, PriorityFeeMode}; use rundler_task::{ server::{connect_with_retries_shutdown, format_socket_addr}, spawn_tasks_with_shutdown, }; +use rundler_types::{chain::ChainSpec, EntryPointVersion}; use rundler_utils::emit::{self, WithEntryPoint, EVENT_CHANNEL_CAPACITY}; use tokio::sync::broadcast; @@ -56,6 +57,10 @@ pub struct BuilderArgs { host: String, /// Private key to use for signing transactions + /// DEPRECATED: Use `builder.private_keys` instead + /// + /// If both `builder.private_key` and `builder.private_keys` are set, `builder.private_key` is appended + /// to `builder.private_keys`. Keys must be unique. #[arg( long = "builder.private_key", name = "builder.private_key", @@ -63,6 +68,17 @@ pub struct BuilderArgs { )] private_key: Option, + /// Private keys to use for signing transactions + /// + /// Cannot use both `builder.private_keys` and `builder.aws_kms_key_ids` at the same time. + #[arg( + long = "builder.private_keys", + name = "builder.private_keys", + env = "BUILDER_PRIVATE_KEYS", + value_delimiter = ',' + )] + private_keys: Vec, + /// AWS KMS key IDs to use for signing transactions #[arg( long = "builder.aws_kms_key_ids", @@ -99,8 +115,22 @@ pub struct BuilderArgs { )] max_bundle_size: u64, + /// Choice of what sender type to to use for transaction submission. + /// Defaults to the value of `raw`. Other options include `flashbots`, + /// `conditional` and `bloxroute` + #[arg( + long = "builder.sender", + name = "builder.sender", + env = "BUILDER_SENDER", + value_enum, + default_value = "raw" + )] + pub sender_type: TransactionSenderKind, + /// If present, the url of the ETH provider that will be used to send /// transactions. Defaults to the value of `node_http`. + /// + /// Only used when BUILDER_SENDER is "raw" #[arg( long = "builder.submit_url", name = "builder.submit_url", @@ -108,17 +138,71 @@ pub struct BuilderArgs { )] pub submit_url: Option, - /// Choice of what sender type to to use for transaction submission. - /// Defaults to the value of `raw`. Other options inclue `flashbots`, - /// `conditional` and `polygon_bloxroute` + /// If true, use the submit endpoint for transaction status checks. + /// + /// Only used when BUILDER_SENDER is "raw" #[arg( - long = "builder.sender", - name = "builder.sender", - env = "BUILDER_SENDER", - value_enum, - default_value = "raw" + long = "builder.use_submit_for_status", + name = "builder.use_submit_for_status", + env = "BUILDER_USE_SUBMIT_FOR_STATUS", + default_value = "false" + )] + pub use_submit_for_status: bool, + + /// Use the conditional RPC endpoint for transaction submission. + /// + /// Only used when BUILDER_SENDER is "raw" + #[arg( + long = "builder.use_conditional_rpc", + name = "builder.use_conditional_rpc", + env = "BUILDER_USE_CONDITIONAL_RPC", + default_value = "false" + )] + pub use_conditional_rpc: bool, + + /// If the "dropped" status is unsupported by the status provider. + /// + /// Only used when BUILDER_SENDER is "raw" + #[arg( + long = "builder.dropped_status_unsupported", + name = "builder.dropped_status_unsupported", + env = "BUILDER_DROPPED_STATUS_UNSUPPORTED", + default_value = "false" + )] + pub dropped_status_unsupported: bool, + + /// A list of builders to pass into the Flashbots Relay RPC. + /// + /// Only used when BUILDER_SENDER is "flashbots" + #[arg( + long = "builder.flashbots_relay_builders", + name = "builder.flashbots_relay_builders", + env = "BUILDER_FLASHBOTS_RELAY_BUILDERS", + value_delimiter = ',', + default_value = "flashbots" + )] + flashbots_relay_builders: Vec, + + /// A private key used to authenticate with the Flashbots relay. + /// + /// Only used when BUILDER_SENDER is "flashbots" + #[arg( + long = "builder.flashbots_relay_auth_key", + name = "builder.flashbots_relay_auth_key", + env = "BUILDER_FLASHBOTS_RELAY_AUTH_KEY", + value_delimiter = ',' + )] + flashbots_relay_auth_key: Option, + + /// Auth header to use for Bloxroute polygon_private_tx sender + /// + /// Only used when BUILDER_SENDER is "bloxroute" + #[arg( + long = "builder.bloxroute_auth_header", + name = "builder.bloxroute_auth_header", + env = "BUILDER_BLOXROUTE_AUTH_HEADER" )] - pub sender_type: TransactionSenderType, + bloxroute_auth_header: Option, /// After submitting a bundle transaction, the maximum number of blocks to /// wait for that transaction to mine before we try resending with higher @@ -141,25 +225,26 @@ pub struct BuilderArgs { )] replacement_fee_percent_increase: u64, - /// Maximum number of times to increase gas fees when retrying a transaction + /// Maximum number of times to increase gas fees when retrying a cancellation transaction /// before giving up. #[arg( - long = "builder.max_fee_increases", - name = "builder.max_fee_increases", - env = "BUILDER_MAX_FEE_INCREASES", - // Seven increases of 10% is roughly 2x the initial fees. - default_value = "7" + long = "builder.max_cancellation_fee_increases", + name = "builder.max_cancellation_fee_increases", + env = "BUILDER_MAX_CANCELLATION_FEE_INCREASES", + default_value = "15" )] - max_fee_increases: u64, + max_cancellation_fee_increases: u64, - /// If using Polygon Mainnet, the auth header to use - /// for Bloxroute polygon_private_tx sender + /// The maximum number of blocks to wait in a replacement underpriced state before issuing + /// a cancellation transaction. #[arg( - long = "builder.bloxroute_auth_header", - name = "builder.bloxroute_auth_header", - env = "BUILDER_BLOXROUTE_AUTH_HEADER" + long = "builder.max_replacement_underpriced_blocks", + name = "builder.max_replacement_underpriced_blocks", + env = "BUILDER_MAX_REPLACEMENT_UNDERPRICED_BLOCKS", + default_value = "20" )] - bloxroute_auth_header: Option, + max_replacement_underpriced_blocks: u64, + /// The index offset to apply to the builder index #[arg( long = "builder_index_offset", @@ -175,6 +260,7 @@ impl BuilderArgs { /// common and builder specific arguments. pub async fn to_args( &self, + chain_spec: ChainSpec, common: &CommonArgs, remote_address: Option, ) -> anyhow::Result { @@ -187,24 +273,75 @@ impl BuilderArgs { .node_http .clone() .context("should have a node HTTP URL")?; - let submit_url = self.submit_url.clone().unwrap_or_else(|| rpc_url.clone()); let mempool_configs = match &common.mempool_config_path { - Some(path) => { - get_json_config::>(path, &common.aws_region).await? - } - None => HashMap::from([(H256::zero(), MempoolConfig::default())]), + Some(path) => get_json_config::(path, &common.aws_region) + .await + .with_context(|| format!("should load mempool configurations from {path}"))?, + None => MempoolConfigs::default(), }; + let mut entry_points = vec![]; + let mut num_builders = 0; + + if !common.disable_entry_point_v0_6 { + entry_points.push(EntryPointBuilderSettings { + address: chain_spec.entry_point_address_v0_6, + version: EntryPointVersion::V0_6, + num_bundle_builders: common.num_builders_v0_6, + bundle_builder_index_offset: self.builder_index_offset, + mempool_configs: mempool_configs + .get_for_entry_point(chain_spec.entry_point_address_v0_6), + }); + num_builders += common.num_builders_v0_6; + } + if !common.disable_entry_point_v0_7 { + entry_points.push(EntryPointBuilderSettings { + address: chain_spec.entry_point_address_v0_7, + version: EntryPointVersion::V0_7, + num_bundle_builders: common.num_builders_v0_7, + bundle_builder_index_offset: self.builder_index_offset, + mempool_configs: mempool_configs + .get_for_entry_point(chain_spec.entry_point_address_v0_7), + }); + num_builders += common.num_builders_v0_7; + } + + if (self.private_key.is_some() || !self.private_keys.is_empty()) + && !self.aws_kms_key_ids.is_empty() + { + bail!( + "Cannot use both builder.private_key(s) and builder.aws_kms_key_ids at the same time." + ); + } + + let mut private_keys = self.private_keys.clone(); + if self.private_key.is_some() || !self.private_keys.is_empty() { + if let Some(pk) = &self.private_key { + private_keys.push(pk.clone()); + } + + if num_builders > private_keys.len() as u64 { + bail!( + "Found {} private keys, but need {} keys for the number of builders. You may need to disable one of the entry points.", + private_keys.len(), num_builders + ); + } + } else if self.aws_kms_key_ids.len() < num_builders as usize { + bail!( + "Not enough AWS KMS key IDs for the number of builders. Need {} keys, found {}. You may need to disable one of the entry points.", + num_builders, self.aws_kms_key_ids.len() + ); + } + + let sender_args = self.sender_args(&chain_spec, &rpc_url)?; + Ok(BuilderTaskArgs { + entry_points, + chain_spec, + unsafe_mode: common.unsafe_mode, rpc_url, - entry_point_address: common - .entry_points - .first() - .context("should have at least one entry point")? - .parse() - .context("should parse entry point address")?, - private_key: self.private_key.clone(), + private_keys, aws_kms_key_ids: self.aws_kms_key_ids.clone(), aws_kms_region: common .aws_region @@ -212,25 +349,65 @@ impl BuilderArgs { .context("should be a valid aws region")?, redis_uri: self.redis_uri.clone(), redis_lock_ttl_millis: self.redis_lock_ttl_millis, - chain_id: common.chain_id, max_bundle_size: self.max_bundle_size, max_bundle_gas: common.max_bundle_gas, - submit_url, bundle_priority_fee_overhead_percent: common.bundle_priority_fee_overhead_percent, priority_fee_mode, - sender_type: self.sender_type, - eth_poll_interval: Duration::from_millis(common.eth_poll_interval_millis), - sim_settings: common.into(), - mempool_configs, + sender_args, + sim_settings: common.try_into()?, max_blocks_to_wait_for_mine: self.max_blocks_to_wait_for_mine, replacement_fee_percent_increase: self.replacement_fee_percent_increase, - max_fee_increases: self.max_fee_increases, + max_cancellation_fee_increases: self.max_cancellation_fee_increases, + max_replacement_underpriced_blocks: self.max_replacement_underpriced_blocks, remote_address, - bloxroute_auth_header: self.bloxroute_auth_header.clone(), - num_bundle_builders: common.num_builders, - bundle_builder_index_offset: self.builder_index_offset, }) } + + fn sender_args( + &self, + chain_spec: &ChainSpec, + rpc_url: &str, + ) -> anyhow::Result { + match self.sender_type { + TransactionSenderKind::Raw => Ok(TransactionSenderArgs::Raw(RawSenderArgs { + submit_url: self.submit_url.clone().unwrap_or_else(|| rpc_url.into()), + use_submit_for_status: self.use_submit_for_status, + dropped_status_supported: !self.dropped_status_unsupported, + use_conditional_rpc: self.use_conditional_rpc, + })), + TransactionSenderKind::Flashbots => { + if !chain_spec.flashbots_enabled { + return Err(anyhow::anyhow!("Flashbots sender is not enabled for chain")); + } + + Ok(TransactionSenderArgs::Flashbots(FlashbotsSenderArgs { + builders: self.flashbots_relay_builders.clone(), + relay_url: chain_spec + .flashbots_relay_url + .clone() + .context("should have a relay URL (chain spec: flashbots_relay_url)")?, + status_url: chain_spec.flashbots_status_url.clone().context( + "should have a flashbots status URL (chain spec: flashbots_status_url)", + )?, + auth_key: self.flashbots_relay_auth_key.clone().context( + "should have a flashbots relay auth key (cli: flashbots_relay_auth_key)", + )?, + })) + } + TransactionSenderKind::Bloxroute => { + if !chain_spec.bloxroute_enabled { + return Err(anyhow::anyhow!("Flashbots sender is not enabled for chain")); + } + + Ok(TransactionSenderArgs::Bloxroute(BloxrouteSenderArgs { + header: self + .bloxroute_auth_header + .clone() + .context("should have a bloxroute auth header")?, + })) + } + } + } } /// CLI options for the Builder server standalone @@ -249,7 +426,11 @@ pub struct BuilderCliArgs { pool_url: String, } -pub async fn run(builder_args: BuilderCliArgs, common_args: CommonArgs) -> anyhow::Result<()> { +pub async fn run( + chain_spec: ChainSpec, + builder_args: BuilderCliArgs, + common_args: CommonArgs, +) -> anyhow::Result<()> { let BuilderCliArgs { builder: builder_args, pool_url, @@ -260,6 +441,7 @@ pub async fn run(builder_args: BuilderCliArgs, common_args: CommonArgs) -> anyho let task_args = builder_args .to_args( + chain_spec.clone(), &common_args, Some(format_socket_addr(&builder_args.host, builder_args.port).parse()?), ) @@ -268,7 +450,7 @@ pub async fn run(builder_args: BuilderCliArgs, common_args: CommonArgs) -> anyho let pool = connect_with_retries_shutdown( "op pool from builder", &pool_url, - RemotePoolClient::connect, + |url| RemotePoolClient::connect(url, chain_spec.clone()), tokio::signal::ctrl_c(), ) .await?; diff --git a/bin/rundler/src/cli/chain_spec.rs b/bin/rundler/src/cli/chain_spec.rs new file mode 100644 index 00000000..4fab371f --- /dev/null +++ b/bin/rundler/src/cli/chain_spec.rs @@ -0,0 +1,128 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use config::{Config, Environment, File, FileFormat}; +use paste::paste; +use rundler_types::chain::ChainSpec; + +/// Resolve the chain spec from the network flag and a chain spec file +pub fn resolve_chain_spec(network: &Option, file: &Option) -> ChainSpec { + // get sources + let file_source = file.as_ref().map(|f| File::with_name(f.as_str())); + let network_source = network.as_ref().map(|n| { + File::from_str( + get_hardcoded_chain_spec(n.to_lowercase().as_str()), + FileFormat::Toml, + ) + }); + + // get the base config from the hierarchy of + // - ENV + // - file + // - network flag + let mut base_getter = Config::builder(); + if let Some(network_source) = &network_source { + base_getter = base_getter.add_source(network_source.clone()); + } + if let Some(file_source) = &file_source { + base_getter = base_getter.add_source(file_source.clone()); + } + let base_config = base_getter + .add_source(Environment::with_prefix("CHAIN")) + .build() + .expect("should build config"); + let base = base_config.get::("base").ok(); + + // construct the config from the hierarchy of + // - ENV + // - file + // - network flag + // - base (if defined) + // - defaults + let default = serde_json::to_string(&ChainSpec::default()).expect("should serialize to string"); + let mut config_builder = + Config::builder().add_source(File::from_str(default.as_str(), FileFormat::Json)); + if let Some(base) = base { + let base_spec = get_hardcoded_chain_spec(base.as_str()); + + // base config must not have a base key, recursive base is not allowed + Config::builder() + .add_source(File::from_str(base_spec, FileFormat::Toml)) + .build() + .expect("should build base config") + .get::("base") + .expect_err("base config must not have a base key"); + + config_builder = config_builder.add_source(File::from_str(base_spec, FileFormat::Toml)); + } + if let Some(network_source) = network_source { + config_builder = config_builder.add_source(network_source); + } + if let Some(file_source) = file_source { + config_builder = config_builder.add_source(file_source); + } + let c = config_builder + .add_source(Environment::with_prefix("CHAIN")) + .build() + .expect("should build config"); + + let id = c.get::("id").ok(); + if let Some(id) = id { + if id == 0 { + panic!("chain id must be non-zero"); + } + } else { + panic!("chain id must be defined"); + } + + c.try_deserialize().expect("should deserialize config") +} + +macro_rules! define_hardcoded_chain_specs { + ($($network:ident),+) => { + paste! { + $( + const [< $network:upper _SPEC >]: &str = include_str!(concat!("../../chain_specs/", stringify!($network), ".toml")); + )+ + + fn get_hardcoded_chain_spec(network: &str) -> &'static str { + match network { + $( + stringify!($network) => [< $network:upper _SPEC >], + )+ + _ => panic!("unknown hardcoded network: {}", network), + } + } + + pub const HARDCODED_CHAIN_SPECS: &[&'static str] = &[$(stringify!($network),)+]; + } + }; +} + +define_hardcoded_chain_specs!( + dev, + ethereum, + ethereum_sepolia, + optimism, + optimism_sepolia, + base, + base_sepolia, + arbitrum, + arbitrum_sepolia, + polygon, + polygon_amoy, + avax, + avax_fuji, + boba_sepolia, + optimism_devnet +); diff --git a/bin/rundler/src/cli/metrics.rs b/bin/rundler/src/cli/metrics.rs index 1b5c0486..85384a2d 100644 --- a/bin/rundler/src/cli/metrics.rs +++ b/bin/rundler/src/cli/metrics.rs @@ -68,160 +68,71 @@ fn collect_tokio( runtime_metrics: &tokio::runtime::RuntimeMetrics, worker_metrics: tokio_metrics::RuntimeMetrics, ) { - gauge!( - format!("{}num_workers", TOKIO_PREFIX), - runtime_metrics.num_workers() as f64 - ); - gauge!( - format!("{}num_blocking_threads", TOKIO_PREFIX), - runtime_metrics.num_blocking_threads() as f64 - ); - gauge!( - format!("{}active_tasks_count", TOKIO_PREFIX), - runtime_metrics.active_tasks_count() as f64 - ); - gauge!( - format!("{}num_idle_blocking_threads", TOKIO_PREFIX), - runtime_metrics.num_idle_blocking_threads() as f64 - ); - gauge!( - format!("{}blocking_queue_depth", TOKIO_PREFIX), - runtime_metrics.blocking_queue_depth() as f64 - ); - gauge!( - format!("{}total_park_count", TOKIO_PREFIX), - worker_metrics.total_park_count as f64 - ); - gauge!( - format!("{}max_park_count", TOKIO_PREFIX), - worker_metrics.max_park_count as f64 - ); - gauge!( - format!("{}min_park_count", TOKIO_PREFIX), - worker_metrics.min_park_count as f64 - ); - gauge!( - format!("{}mean_poll_duration", TOKIO_PREFIX), - worker_metrics.mean_poll_duration.as_secs_f64() - ); - gauge!( - format!("{}mean_poll_duration_worker_min", TOKIO_PREFIX), - worker_metrics.mean_poll_duration_worker_min.as_secs_f64() - ); - gauge!( - format!("{}mean_poll_duration_worker_max", TOKIO_PREFIX), - worker_metrics.mean_poll_duration_worker_max.as_secs_f64() - ); - gauge!( - format!("{}total_noop_count", TOKIO_PREFIX), - worker_metrics.total_noop_count as f64, - ); - gauge!( - format!("{}max_noop_count", TOKIO_PREFIX), - worker_metrics.max_noop_count as f64, - ); - gauge!( - format!("{}min_noop_count", TOKIO_PREFIX), - worker_metrics.min_noop_count as f64, - ); - gauge!( - format!("{}total_steal_count", TOKIO_PREFIX), - worker_metrics.total_steal_count as f64, - ); - gauge!( - format!("{}max_steal_count", TOKIO_PREFIX), - worker_metrics.max_steal_count as f64, - ); - gauge!( - format!("{}min_steal_count", TOKIO_PREFIX), - worker_metrics.min_steal_count as f64, - ); - gauge!( - format!("{}total_steal_operations", TOKIO_PREFIX), - worker_metrics.total_steal_operations as f64, - ); - gauge!( - format!("{}max_steal_operations", TOKIO_PREFIX), - worker_metrics.max_steal_operations as f64, - ); - gauge!( - format!("{}min_steal_operations", TOKIO_PREFIX), - worker_metrics.min_steal_operations as f64, - ); - gauge!( - format!("{}num_remote_schedules", TOKIO_PREFIX), - worker_metrics.num_remote_schedules as f64, - ); - gauge!( - format!("{}total_local_schedule_count", TOKIO_PREFIX), - worker_metrics.total_local_schedule_count as f64, - ); - gauge!( - format!("{}max_local_schedule_count", TOKIO_PREFIX), - worker_metrics.max_local_schedule_count as f64, - ); - gauge!( - format!("{}min_local_schedule_count", TOKIO_PREFIX), - worker_metrics.min_local_schedule_count as f64, - ); - gauge!( - format!("{}total_overflow_count", TOKIO_PREFIX), - worker_metrics.total_overflow_count as f64, - ); - gauge!( - format!("{}max_overflow_count", TOKIO_PREFIX), - worker_metrics.max_overflow_count as f64, - ); - gauge!( - format!("{}min_overflow_count", TOKIO_PREFIX), - worker_metrics.min_overflow_count as f64, - ); - gauge!( - format!("{}total_polls_count", TOKIO_PREFIX), - worker_metrics.total_polls_count as f64, - ); - gauge!( - format!("{}max_polls_count", TOKIO_PREFIX), - worker_metrics.max_polls_count as f64, - ); - gauge!( - format!("{}min_polls_count", TOKIO_PREFIX), - worker_metrics.min_polls_count as f64, - ); - gauge!( - format!("{}total_busy_duration", TOKIO_PREFIX), - worker_metrics.total_busy_duration.as_secs_f64(), - ); - gauge!( - format!("{}max_busy_duration", TOKIO_PREFIX), - worker_metrics.max_busy_duration.as_secs_f64(), - ); - gauge!( - format!("{}min_busy_duration", TOKIO_PREFIX), - worker_metrics.min_busy_duration.as_secs_f64(), - ); - gauge!( - format!("{}injection_queue_depth", TOKIO_PREFIX), - worker_metrics.injection_queue_depth as f64, - ); - gauge!( - format!("{}total_local_queue_depth", TOKIO_PREFIX), - worker_metrics.total_local_queue_depth as f64, - ); - gauge!( - format!("{}max_local_queue_depth", TOKIO_PREFIX), - worker_metrics.max_local_queue_depth as f64, - ); - gauge!( - format!("{}min_local_queue_depth", TOKIO_PREFIX), - worker_metrics.min_local_queue_depth as f64, - ); - gauge!( - format!("{}budget_forced_yield_count", TOKIO_PREFIX), - worker_metrics.budget_forced_yield_count as f64, - ); - gauge!( - format!("{}io_driver_ready_count", TOKIO_PREFIX), - worker_metrics.io_driver_ready_count as f64, - ); + gauge!(format!("{}num_workers", TOKIO_PREFIX)).set(runtime_metrics.num_workers() as f64); + gauge!(format!("{}num_blocking_threads", TOKIO_PREFIX)) + .set(runtime_metrics.num_blocking_threads() as f64); + gauge!(format!("{}active_tasks_count", TOKIO_PREFIX)) + .set(runtime_metrics.active_tasks_count() as f64); + gauge!(format!("{}num_idle_blocking_threads", TOKIO_PREFIX)) + .set(runtime_metrics.num_idle_blocking_threads() as f64); + gauge!(format!("{}blocking_queue_depth", TOKIO_PREFIX)) + .set(runtime_metrics.blocking_queue_depth() as f64); + gauge!(format!("{}total_park_count", TOKIO_PREFIX)).set(worker_metrics.total_park_count as f64); + gauge!(format!("{}max_park_count", TOKIO_PREFIX)).set(worker_metrics.max_park_count as f64); + gauge!(format!("{}min_park_count", TOKIO_PREFIX)).set(worker_metrics.min_park_count as f64); + gauge!(format!("{}mean_poll_duration", TOKIO_PREFIX)) + .set(worker_metrics.mean_poll_duration.as_secs_f64()); + gauge!(format!("{}mean_poll_duration_worker_min", TOKIO_PREFIX)) + .set(worker_metrics.mean_poll_duration_worker_min.as_secs_f64()); + gauge!(format!("{}mean_poll_duration_worker_max", TOKIO_PREFIX)) + .set(worker_metrics.mean_poll_duration_worker_max.as_secs_f64()); + gauge!(format!("{}total_noop_count", TOKIO_PREFIX)).set(worker_metrics.total_noop_count as f64); + gauge!(format!("{}max_noop_count", TOKIO_PREFIX)).set(worker_metrics.max_noop_count as f64); + gauge!(format!("{}min_noop_count", TOKIO_PREFIX)).set(worker_metrics.min_noop_count as f64); + gauge!(format!("{}total_steal_count", TOKIO_PREFIX)) + .set(worker_metrics.total_steal_count as f64); + gauge!(format!("{}max_steal_count", TOKIO_PREFIX),).set(worker_metrics.max_steal_count as f64); + gauge!(format!("{}min_steal_count", TOKIO_PREFIX),).set(worker_metrics.min_steal_count as f64); + gauge!(format!("{}total_steal_operations", TOKIO_PREFIX)) + .set(worker_metrics.total_steal_operations as f64); + gauge!(format!("{}max_steal_operations", TOKIO_PREFIX)) + .set(worker_metrics.max_steal_operations as f64); + gauge!(format!("{}min_steal_operations", TOKIO_PREFIX)) + .set(worker_metrics.min_steal_operations as f64); + gauge!(format!("{}num_remote_schedules", TOKIO_PREFIX)) + .set(worker_metrics.num_remote_schedules as f64); + gauge!(format!("{}total_local_schedule_count", TOKIO_PREFIX)) + .set(worker_metrics.total_local_schedule_count as f64); + gauge!(format!("{}max_local_schedule_count", TOKIO_PREFIX),) + .set(worker_metrics.max_local_schedule_count as f64); + gauge!(format!("{}min_local_schedule_count", TOKIO_PREFIX),) + .set(worker_metrics.min_local_schedule_count as f64); + gauge!(format!("{}total_overflow_count", TOKIO_PREFIX)) + .set(worker_metrics.total_overflow_count as f64); + gauge!(format!("{}max_overflow_count", TOKIO_PREFIX)) + .set(worker_metrics.max_overflow_count as f64); + gauge!(format!("{}min_overflow_count", TOKIO_PREFIX),) + .set(worker_metrics.min_overflow_count as f64); + gauge!(format!("{}total_polls_count", TOKIO_PREFIX)) + .set(worker_metrics.total_polls_count as f64); + gauge!(format!("{}max_polls_count", TOKIO_PREFIX)).set(worker_metrics.max_polls_count as f64); + gauge!(format!("{}min_polls_count", TOKIO_PREFIX)).set(worker_metrics.min_polls_count as f64); + gauge!(format!("{}total_busy_duration", TOKIO_PREFIX)) + .set(worker_metrics.total_busy_duration.as_secs_f64()); + gauge!(format!("{}max_busy_duration", TOKIO_PREFIX)) + .set(worker_metrics.max_busy_duration.as_secs_f64()); + gauge!(format!("{}min_busy_duration", TOKIO_PREFIX)) + .set(worker_metrics.min_busy_duration.as_secs_f64()); + gauge!(format!("{}injection_queue_depth", TOKIO_PREFIX)) + .set(worker_metrics.injection_queue_depth as f64); + gauge!(format!("{}total_local_queue_depth", TOKIO_PREFIX)) + .set(worker_metrics.total_local_queue_depth as f64); + gauge!(format!("{}max_local_queue_depth", TOKIO_PREFIX)) + .set(worker_metrics.max_local_queue_depth as f64); + gauge!(format!("{}min_local_queue_depth", TOKIO_PREFIX)) + .set(worker_metrics.min_local_queue_depth as f64); + gauge!(format!("{}budget_forced_yield_count", TOKIO_PREFIX)) + .set(worker_metrics.budget_forced_yield_count as f64); + gauge!(format!("{}io_driver_ready_count", TOKIO_PREFIX)) + .set(worker_metrics.io_driver_ready_count as f64); } diff --git a/bin/rundler/src/cli/mod.rs b/bin/rundler/src/cli/mod.rs index dd8c4b6d..fca7baad 100644 --- a/bin/rundler/src/cli/mod.rs +++ b/bin/rundler/src/cli/mod.rs @@ -11,10 +11,11 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -use anyhow::Context; +use anyhow::{bail, Context}; use clap::{builder::PossibleValuesParser, Args, Parser, Subcommand}; mod builder; +mod chain_spec; mod json; mod metrics; mod node; @@ -26,7 +27,7 @@ use builder::BuilderCliArgs; use node::NodeCliArgs; use pool::PoolCliArgs; use rpc::RpcCliArgs; -use rundler_rpc::EthApiSettings; +use rundler_rpc::{EthApiSettings, RundlerApiSettings}; use rundler_sim::{ EstimationSettings, PrecheckSettings, PriorityFeeMode, SimulationSettings, MIN_CALL_GAS_LIMIT, }; @@ -51,21 +52,25 @@ pub async fn run() -> anyhow::Result<()> { ) .context("metrics server should start")?; + let cs = chain_spec::resolve_chain_spec(&opt.common.network, &opt.common.chain_spec); + tracing::info!("Chain spec: {:#?}", cs); + hybrid_compute::init( opt.common.hc_helper_addr, opt.common.hc_sys_account, opt.common.hc_sys_owner, opt.common.hc_sys_privkey, - opt.common.entry_points[0].parse::
().expect("Must provide an entry_point"), - opt.common.chain_id, + //opt.common.entry_points[0].parse::
().expect("Must provide an entry_point"), + cs.entry_point_address_v0_6, + cs.id, opt.common.node_http.clone().expect("Must provide node_http"), ); match opt.command { - Command::Node(args) => node::run(*args, opt.common).await?, - Command::Pool(args) => pool::run(args, opt.common).await?, - Command::Rpc(args) => rpc::run(args, opt.common).await?, - Command::Builder(args) => builder::run(args, opt.common).await?, + Command::Node(args) => node::run(cs, *args, opt.common).await?, + Command::Pool(args) => pool::run(cs, args, opt.common).await?, + Command::Rpc(args) => rpc::run(cs, args, opt.common).await?, + Command::Builder(args) => builder::run(cs, args, opt.common).await?, } tracing::info!("Shutdown, goodbye"); @@ -104,26 +109,24 @@ enum Command { #[derive(Debug, Args)] #[command(next_help_heading = "Common")] pub struct CommonArgs { - /// Entry point address to target + /// Network flag #[arg( - long = "entry_points", - name = "entry_points", - env = "ENTRY_POINTS", - default_values_t = Vec::::new(), // required or will error - value_delimiter = ',', - global = true - )] - entry_points: Vec, - - /// Chain ID to target + long = "network", + name = "network", + env = "NETWORK", + value_parser = PossibleValuesParser::new(chain_spec::HARDCODED_CHAIN_SPECS), + global = true) + ] + network: Option, + + /// Chain spec file path #[arg( - long = "chain_id", - name = "chain_id", - env = "CHAIN_ID", - default_value = "1337", + long = "chain_spec", + name = "chain_spec", + env = "CHAIN_SPEC", global = true )] - chain_id: u64, + chain_spec: Option, /// ETH Node HTTP URL to connect to #[arg( @@ -134,6 +137,10 @@ pub struct CommonArgs { )] node_http: Option, + /// Flag for turning unsafe bundling mode on + #[arg(long = "unsafe", env = "UNSAFE", global = true)] + unsafe_mode: bool, + #[arg( long = "max_verification_gas", name = "max_verification_gas", @@ -170,6 +177,17 @@ pub struct CommonArgs { )] min_unstake_delay: u32, + /// String representation of the timeout of a custom tracer in a format that is parsable by the + /// `ParseDuration` function on the ethereum node. See Docs: https://pkg.go.dev/time#ParseDuration + #[arg( + long = "tracer_timeout", + name = "tracer_timeout", + env = "TRACER_TIMEOUT", + default_value = "10s", + global = true + )] + tracer_timeout: String, + /// Amount of blocks to search when calling eth_getUserOperationByHash. /// Defaults from 0 to latest block #[arg( @@ -190,19 +208,20 @@ pub struct CommonArgs { max_simulate_handle_ops_gas: u64, #[arg( - long = "validation_estimation_gas_fee", - name = "validation_estimation_gas_fee", - env = "VALIDATION_ESTIMATION_GAS_FEE", + long = "verification_estimation_gas_fee", + name = "verification_estimation_gas_fee", + env = "VERIFICATION_ESTIMATION_GAS_FEE", default_value = "1000000000000", // 10K gwei global = true )] - validation_estimation_gas_fee: u64, + verification_estimation_gas_fee: u64, #[arg( long = "bundle_priority_fee_overhead_percent", name = "bundle_priority_fee_overhead_percent", env = "BUNDLE_PRIORITY_FEE_OVERHEAD_PERCENT", - default_value = "0" + default_value = "0", + global = true )] bundle_priority_fee_overhead_percent: u64, @@ -211,7 +230,8 @@ pub struct CommonArgs { name = "priority_fee_mode_kind", env = "PRIORITY_FEE_MODE_KIND", value_parser = PossibleValuesParser::new(["base_fee_percent", "priority_fee_increase_percent"]), - default_value = "priority_fee_increase_percent" + default_value = "priority_fee_increase_percent", + global = true )] priority_fee_mode_kind: String, @@ -219,7 +239,8 @@ pub struct CommonArgs { long = "priority_fee_mode_value", name = "priority_fee_mode_value", env = "PRIORITY_FEE_MODE_VALUE", - default_value = "0" + default_value = "0", + global = true )] priority_fee_mode_value: u64, @@ -227,7 +248,8 @@ pub struct CommonArgs { long = "base_fee_accept_percent", name = "base_fee_accept_percent", env = "BASE_FEE_ACCEPT_PERCENT", - default_value = "50" + default_value = "50", + global = true )] base_fee_accept_percent: u64, @@ -235,42 +257,65 @@ pub struct CommonArgs { long = "pre_verification_gas_accept_percent", name = "pre_verification_gas_accept_percent", env = "PRE_VERIFICATION_GAS_ACCEPT_PERCENT", - default_value = "50" + default_value = "50", + global = true )] pre_verification_gas_accept_percent: u64, - /// Interval at which the builder polls an Eth node for new blocks and - /// mined transactions. - #[arg( - long = "eth_poll_interval_millis", - name = "eth_poll_interval_millis", - env = "ETH_POLL_INTERVAL_MILLIS", - default_value = "100" - )] - pub eth_poll_interval_millis: u64, - #[arg( long = "aws_region", name = "aws_region", env = "AWS_REGION", - default_value = "us-east-1" + default_value = "us-east-1", + global = true )] aws_region: String, #[arg( long = "mempool_config_path", name = "mempool_config_path", - env = "MEMPOOL_CONFIG_PATH" + env = "MEMPOOL_CONFIG_PATH", + global = true )] pub mempool_config_path: Option, #[arg( - long = "num_builders", - name = "num_builders", - env = "NUM_BUILDERS", - default_value = "1" + long = "disable_entry_point_v0_6", + name = "disable_entry_point_v0_6", + env = "DISABLE_ENTRY_POINT_V0_6", + default_value = "false", + global = true + )] + pub disable_entry_point_v0_6: bool, + + // Ignored if entry_point_v0_6_enabled is false + #[arg( + long = "num_builders_v0_6", + name = "num_builders_v0_6", + env = "NUM_BUILDERS_V0_6", + default_value = "1", + global = true + )] + pub num_builders_v0_6: u64, + + #[arg( + long = "disable_entry_point_v0_7", + name = "disable_entry_point_v0_7", + env = "DISABLE_ENTRY_POINT_V0_7", + default_value = "false", + global = true )] - pub num_builders: u64, + pub disable_entry_point_v0_7: bool, + + // Ignored if entry_point_v0_7_enabled is false + #[arg( + long = "num_builders_v0_7", + name = "num_builders_v0_7", + env = "NUM_BUILDERS_V0_7", + default_value = "1", + global = true + )] + pub num_builders_v0_7: u64, #[arg( long = "hc_helper_addr", @@ -329,8 +374,11 @@ impl TryFrom<&CommonArgs> for EstimationSettings { Ok(Self { max_verification_gas: value.max_verification_gas, max_call_gas, + max_paymaster_verification_gas: value.max_verification_gas, + max_paymaster_post_op_gas: max_call_gas, + max_total_execution_gas: value.max_bundle_gas, max_simulate_handle_ops_gas: value.max_simulate_handle_ops_gas, - validation_estimation_gas_fee: value.validation_estimation_gas_fee, + verification_estimation_gas_fee: value.verification_estimation_gas_fee, }) } } @@ -338,9 +386,8 @@ impl TryFrom<&CommonArgs> for EstimationSettings { impl TryFrom<&CommonArgs> for PrecheckSettings { type Error = anyhow::Error; - fn try_from(value: &CommonArgs) -> anyhow::Result { + fn try_from(value: &CommonArgs) -> Result { Ok(Self { - chain_id: value.chain_id, max_verification_gas: value.max_verification_gas.into(), max_total_execution_gas: value.max_bundle_gas.into(), bundle_priority_fee_overhead_percent: value.bundle_priority_fee_overhead_percent, @@ -354,20 +401,42 @@ impl TryFrom<&CommonArgs> for PrecheckSettings { } } -impl From<&CommonArgs> for SimulationSettings { - fn from(value: &CommonArgs) -> Self { - Self::new( +impl TryFrom<&CommonArgs> for SimulationSettings { + type Error = anyhow::Error; + + fn try_from(value: &CommonArgs) -> Result { + if go_parse_duration::parse_duration(&value.tracer_timeout).is_err() { + bail!("Invalid value for tracer_timeout, must be parsable by the ParseDuration function. See docs https://pkg.go.dev/time#ParseDuration") + } + + Ok(Self::new( value.min_unstake_delay, value.min_stake_value, value.max_simulate_handle_ops_gas, value.max_verification_gas, - ) + value.tracer_timeout.clone(), + )) } } impl From<&CommonArgs> for EthApiSettings { fn from(value: &CommonArgs) -> Self { - Self::new(value.user_operation_event_block_distance) + Self::new(value.user_operation_event_block_distance) + } +} + +impl TryFrom<&CommonArgs> for RundlerApiSettings { + type Error = anyhow::Error; + + fn try_from(value: &CommonArgs) -> Result { + Ok(Self { + priority_fee_mode: PriorityFeeMode::try_from( + value.priority_fee_mode_kind.as_str(), + value.priority_fee_mode_value, + )?, + bundle_priority_fee_overhead_percent: value.bundle_priority_fee_overhead_percent, + max_verification_gas: value.max_verification_gas, + }) } } diff --git a/bin/rundler/src/cli/node/events.rs b/bin/rundler/src/cli/node/events.rs index 21af478f..8066acd9 100644 --- a/bin/rundler/src/cli/node/events.rs +++ b/bin/rundler/src/cli/node/events.rs @@ -13,7 +13,6 @@ use std::fmt::Display; -use ethers::types::Address; use rundler_builder::BuilderEvent; use rundler_pool::PoolEvent; @@ -23,12 +22,6 @@ pub enum Event { BuilderEvent(BuilderEvent), } -#[derive(Clone, Debug)] -pub struct WithEntryPoint { - pub entry_point: Address, - pub event: T, -} - impl From for Event { fn from(event: PoolEvent) -> Self { Self::PoolEvent(event) diff --git a/bin/rundler/src/cli/node/mod.rs b/bin/rundler/src/cli/node/mod.rs index 91019772..8a4b804e 100644 --- a/bin/rundler/src/cli/node/mod.rs +++ b/bin/rundler/src/cli/node/mod.rs @@ -16,6 +16,7 @@ use rundler_builder::{BuilderEvent, BuilderTask, LocalBuilderBuilder}; use rundler_pool::{LocalPoolBuilder, PoolEvent, PoolTask}; use rundler_rpc::RpcTask; use rundler_task::spawn_tasks_with_shutdown; +use rundler_types::chain::ChainSpec; use rundler_utils::emit::{self, WithEntryPoint, EVENT_CHANNEL_CAPACITY}; use tokio::sync::broadcast; @@ -43,20 +44,30 @@ pub struct NodeCliArgs { rpc: RpcArgs, } -pub async fn run(bundler_args: NodeCliArgs, common_args: CommonArgs) -> anyhow::Result<()> { +pub async fn run( + chain_spec: ChainSpec, + bundler_args: NodeCliArgs, + common_args: CommonArgs, +) -> anyhow::Result<()> { let NodeCliArgs { pool: pool_args, builder: builder_args, rpc: rpc_args, } = bundler_args; - let pool_task_args = pool_args.to_args(&common_args, None).await?; - let builder_task_args = builder_args.to_args(&common_args, None).await?; + let pool_task_args = pool_args + .to_args(chain_spec.clone(), &common_args, None) + .await?; + let builder_task_args = builder_args + .to_args(chain_spec.clone(), &common_args, None) + .await?; let rpc_task_args = rpc_args.to_args( + chain_spec, &common_args, (&common_args).try_into()?, (&common_args).into(), (&common_args).try_into()?, + (&common_args).try_into()?, )?; let (event_sender, event_rx) = diff --git a/bin/rundler/src/cli/pool.rs b/bin/rundler/src/cli/pool.rs index f549005d..53938d27 100644 --- a/bin/rundler/src/cli/pool.rs +++ b/bin/rundler/src/cli/pool.rs @@ -15,10 +15,11 @@ use std::{collections::HashMap, net::SocketAddr, time::Duration}; use anyhow::Context; use clap::Args; -use ethers::types::{Chain, H256}; +use ethers::types::Address; use rundler_pool::{LocalPoolBuilder, PoolConfig, PoolTask, PoolTaskArgs}; -use rundler_sim::MempoolConfig; +use rundler_sim::MempoolConfigs; use rundler_task::spawn_tasks_with_shutdown; +use rundler_types::{chain::ChainSpec, EntryPointVersion}; use rundler_utils::emit::{self, EVENT_CHANNEL_CAPACITY}; use tokio::sync::broadcast; @@ -88,6 +89,27 @@ pub struct PoolArgs { )] pub allowlist_path: Option, + /// Interval at which the pool polls an Eth node for new blocks + #[arg( + long = "pool.chain_poll_interval_millis", + name = "pool.chain_poll_interval_millis", + env = "POOL_CHAIN_POLL_INTERVAL_MILLIS", + default_value = "100", + global = true + )] + pub chain_poll_interval_millis: u64, + + /// The amount of times to retry syncing the chain before giving up and + /// waiting for the next block. + #[arg( + long = "pool.chain_sync_max_retries", + name = "pool.chain_sync_max_retries", + env = "POOL_CHAIN_SYNC_MAX_RETRIES", + default_value = "5", + global = true + )] + pub chain_sync_max_retries: u64, + #[arg( long = "pool.chain_history_size", name = "pool.chain_history_size", @@ -117,6 +139,38 @@ pub struct PoolArgs { default_value = "10" )] pub throttled_entity_live_blocks: u64, + + #[arg( + long = "pool.paymaster_tracking_enabled", + name = "pool.paymaster_tracking_enabled", + env = "POOL_PAYMASTER_TRACKING_ENABLED", + default_value = "true" + )] + pub paymaster_tracking_enabled: bool, + + #[arg( + long = "pool.paymaster_cache_length", + name = "pool.paymaster_cache_length", + env = "POOL_PAYMASTER_CACHE_LENGTH", + default_value = "10000" + )] + pub paymaster_cache_length: u32, + + #[arg( + long = "pool.reputation_tracking_enabled", + name = "pool.reputation_tracking_enabled", + env = "POOL_REPUTATION_TRACKING_ENABLED", + default_value = "true" + )] + pub reputation_tracking_enabled: bool, + + #[arg( + long = "pool.drop_min_num_blocks", + name = "pool.drop_min_num_blocks", + env = "POOL_DROP_MIN_NUM_BLOCKS", + default_value = "10" + )] + pub drop_min_num_blocks: u64, } impl PoolArgs { @@ -124,6 +178,7 @@ impl PoolArgs { /// common and op pool specific arguments. pub async fn to_args( &self, + chain_spec: ChainSpec, common: &CommonArgs, remote_address: Option, ) -> anyhow::Result { @@ -139,48 +194,69 @@ impl PoolArgs { tracing::info!("allowlist: {:?}", allowlist); let mempool_channel_configs = match &common.mempool_config_path { - Some(path) => { - get_json_config::>(path, &common.aws_region).await? - } - None => HashMap::from([(H256::zero(), MempoolConfig::default())]), + Some(path) => get_json_config::(path, &common.aws_region) + .await + .with_context(|| format!("should load mempool configurations from {path}"))?, + None => MempoolConfigs::default(), }; tracing::info!("Mempool channel configs: {:?}", mempool_channel_configs); - let pool_configs = common - .entry_points - .iter() - .map(|ep| { - let entry_point = ep.parse().context("Invalid entry_points argument")?; - Ok(PoolConfig { - entry_point, - chain_id: common.chain_id, - // Currently use the same shard count as the number of builders - num_shards: common.num_builders, - same_sender_mempool_count: self.same_sender_mempool_count, - min_replacement_fee_increase_percentage: self - .min_replacement_fee_increase_percentage, - max_size_of_pool_bytes: self.max_size_in_bytes, - blocklist: blocklist.clone(), - allowlist: allowlist.clone(), - precheck_settings: common.try_into()?, - sim_settings: common.into(), - mempool_channel_configs: mempool_channel_configs.clone(), - throttled_entity_mempool_count: self.throttled_entity_mempool_count, - throttled_entity_live_blocks: self.throttled_entity_live_blocks, - }) - }) - .collect::>>()?; + let chain_id = chain_spec.id; + let pool_config_base = PoolConfig { + // update per entry point + entry_point: Address::default(), + entry_point_version: EntryPointVersion::Unspecified, + num_shards: 0, + mempool_channel_configs: HashMap::new(), + // Base config + chain_id, + same_sender_mempool_count: self.same_sender_mempool_count, + min_replacement_fee_increase_percentage: self.min_replacement_fee_increase_percentage, + max_size_of_pool_bytes: self.max_size_in_bytes, + blocklist: blocklist.clone(), + allowlist: allowlist.clone(), + precheck_settings: common.try_into()?, + sim_settings: common.try_into()?, + throttled_entity_mempool_count: self.throttled_entity_mempool_count, + throttled_entity_live_blocks: self.throttled_entity_live_blocks, + paymaster_tracking_enabled: self.paymaster_tracking_enabled, + paymaster_cache_length: self.paymaster_cache_length, + reputation_tracking_enabled: self.reputation_tracking_enabled, + drop_min_num_blocks: self.drop_min_num_blocks, + }; + + let mut pool_configs = vec![]; + + if !common.disable_entry_point_v0_6 { + pool_configs.push(PoolConfig { + entry_point: chain_spec.entry_point_address_v0_6, + entry_point_version: EntryPointVersion::V0_6, + num_shards: common.num_builders_v0_6, + mempool_channel_configs: mempool_channel_configs + .get_for_entry_point(chain_spec.entry_point_address_v0_6), + ..pool_config_base.clone() + }); + } + if !common.disable_entry_point_v0_7 { + pool_configs.push(PoolConfig { + entry_point: chain_spec.entry_point_address_v0_7, + entry_point_version: EntryPointVersion::V0_7, + num_shards: common.num_builders_v0_7, + mempool_channel_configs: mempool_channel_configs + .get_for_entry_point(chain_spec.entry_point_address_v0_7), + ..pool_config_base.clone() + }); + } Ok(PoolTaskArgs { - chain_id: common.chain_id, - chain_history_size: self - .chain_history_size - .unwrap_or_else(|| default_chain_history_size(common.chain_id)), + chain_spec, + unsafe_mode: common.unsafe_mode, http_url: common .node_http .clone() .context("pool requires node_http arg")?, - http_poll_interval: Duration::from_millis(common.eth_poll_interval_millis), + chain_poll_interval: Duration::from_millis(self.chain_poll_interval_millis), + chain_max_sync_retries: self.chain_sync_max_retries, pool_configs, remote_address, chain_update_channel_capacity: self.chain_update_channel_capacity.unwrap_or(1024), @@ -188,25 +264,6 @@ impl PoolArgs { } } -const SMALL_HISTORY_SIZE: u64 = 16; -const LARGE_HISTORY_SIZE: u64 = 128; - -// Mainnets that are known to not have large reorgs can use the small history -// size. Use the large history size for all testnets because I don't trust them. -const SMALL_HISTORY_CHAIN_IDS: &[u64] = &[ - Chain::Mainnet as u64, - Chain::Arbitrum as u64, - Chain::Optimism as u64, -]; - -fn default_chain_history_size(chain_id: u64) -> u64 { - if SMALL_HISTORY_CHAIN_IDS.contains(&chain_id) { - SMALL_HISTORY_SIZE - } else { - LARGE_HISTORY_SIZE - } -} - /// CLI options for the Pool server standalone #[derive(Args, Debug)] pub struct PoolCliArgs { @@ -214,11 +271,16 @@ pub struct PoolCliArgs { pool: PoolArgs, } -pub async fn run(pool_args: PoolCliArgs, common_args: CommonArgs) -> anyhow::Result<()> { +pub async fn run( + chain_spec: ChainSpec, + pool_args: PoolCliArgs, + common_args: CommonArgs, +) -> anyhow::Result<()> { let PoolCliArgs { pool: pool_args } = pool_args; let (event_sender, event_rx) = broadcast::channel(EVENT_CHANNEL_CAPACITY); let task_args = pool_args .to_args( + chain_spec, &common_args, Some(format!("{}:{}", pool_args.host, pool_args.port).parse()?), ) diff --git a/bin/rundler/src/cli/rpc.rs b/bin/rundler/src/cli/rpc.rs index 80980d6f..7188479f 100644 --- a/bin/rundler/src/cli/rpc.rs +++ b/bin/rundler/src/cli/rpc.rs @@ -17,9 +17,10 @@ use anyhow::Context; use clap::Args; use rundler_builder::RemoteBuilderClient; use rundler_pool::RemotePoolClient; -use rundler_rpc::{EthApiSettings, RpcTask, RpcTaskArgs}; +use rundler_rpc::{EthApiSettings, RpcTask, RpcTaskArgs, RundlerApiSettings}; use rundler_sim::{EstimationSettings, PrecheckSettings}; use rundler_task::{server::connect_with_retries_shutdown, spawn_tasks_with_shutdown}; +use rundler_types::chain::ChainSpec; use super::CommonArgs; @@ -52,7 +53,7 @@ pub struct RpcArgs { env = "RPC_API", default_value = "eth,rundler", value_delimiter = ',', - value_parser = ["eth", "debug", "rundler"] + value_parser = ["eth", "debug", "rundler", "admin"] )] api: Vec, @@ -81,9 +82,11 @@ impl RpcArgs { #[allow(clippy::too_many_arguments)] pub fn to_args( &self, + chain_spec: ChainSpec, common: &CommonArgs, precheck_settings: PrecheckSettings, eth_api_settings: EthApiSettings, + rundler_api_settings: RundlerApiSettings, estimation_settings: EstimationSettings, ) -> anyhow::Result { let apis = self @@ -93,25 +96,23 @@ impl RpcArgs { .collect::, _>>()?; Ok(RpcTaskArgs { + chain_spec, + unsafe_mode: common.unsafe_mode, port: self.port, host: self.host.clone(), - entry_points: common - .entry_points - .iter() - .map(|ep| ep.parse()) - .collect::, _>>() - .context("Invalid entry_points argument")?, rpc_url: common .node_http .clone() .context("rpc requires node_http arg")?, - chain_id: common.chain_id, api_namespaces: apis, precheck_settings, eth_api_settings, + rundler_api_settings, estimation_settings, rpc_timeout: Duration::from_secs(self.timeout_seconds.parse()?), max_connections: self.max_connections, + entry_point_v0_6_enabled: !common.disable_entry_point_v0_6, + entry_point_v0_7_enabled: !common.disable_entry_point_v0_7, }) } } @@ -141,7 +142,11 @@ pub struct RpcCliArgs { builder_url: String, } -pub async fn run(rpc_args: RpcCliArgs, common_args: CommonArgs) -> anyhow::Result<()> { +pub async fn run( + chain_spec: ChainSpec, + rpc_args: RpcCliArgs, + common_args: CommonArgs, +) -> anyhow::Result<()> { let RpcCliArgs { rpc: rpc_args, pool_url, @@ -149,16 +154,18 @@ pub async fn run(rpc_args: RpcCliArgs, common_args: CommonArgs) -> anyhow::Resul } = rpc_args; let task_args = rpc_args.to_args( + chain_spec.clone(), &common_args, (&common_args).try_into()?, (&common_args).into(), (&common_args).try_into()?, + (&common_args).try_into()?, )?; let pool = connect_with_retries_shutdown( "op pool from rpc", &pool_url, - RemotePoolClient::connect, + |url| RemotePoolClient::connect(url, chain_spec.clone()), tokio::signal::ctrl_c(), ) .await?; diff --git a/bin/tools/src/bin/get_example_ops.rs b/bin/tools/src/bin/get_example_ops.rs deleted file mode 100644 index 0c967a82..00000000 --- a/bin/tools/src/bin/get_example_ops.rs +++ /dev/null @@ -1,38 +0,0 @@ -// This file is part of Rundler. -// -// Rundler is free software: you can redistribute it and/or modify it under the -// terms of the GNU Lesser General Public License as published by the Free Software -// Foundation, either version 3 of the License, or (at your option) any later version. -// -// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; -// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -// See the GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License along with Rundler. -// If not, see https://www.gnu.org/licenses/. - -use dotenv::dotenv; -use rundler_dev::DevClients; -use rundler_rpc::RpcUserOperation; - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - dotenv()?; - let clients = DevClients::new_from_env()?; - // We'll make operations that call the entry point's addStake. - let op = clients - .new_wallet_op(clients.entry_point.add_stake(1), 1.into()) - .await?; - println!("User operation to make wallet call EntryPoint#addStake():"); - println!( - "{}", - serde_json::to_string(&RpcUserOperation::from(op.clone()))? - ); - let op = clients - .new_wallet_op_with_paymaster(clients.entry_point.add_stake(1), 1.into()) - .await?; - println!(); - println!("User operation to make wallet call EntryPoint#addStake() with paymaster:"); - println!("{}", serde_json::to_string(&RpcUserOperation::from(op))?); - Ok(()) -} diff --git a/crates/builder/Cargo.toml b/crates/builder/Cargo.toml index 833a6be5..cc910c2a 100644 --- a/crates/builder/Cargo.toml +++ b/crates/builder/Cargo.toml @@ -7,7 +7,6 @@ license.workspace = true repository.workspace = true [dependencies] -rundler-pool = { path = "../pool" } rundler-provider = { path = "../provider" } rundler-sim = { path = "../sim" } rundler-task = { path = "../task" } @@ -29,7 +28,7 @@ pin-project.workspace = true prost.workspace = true parse-display.workspace = true reqwest.workspace = true -rslock = "0.2.2" +rslock = "0.3.0" rusoto_core = { version = "0.48.0", default-features = false, features = ["rustls"] } rusoto_kms = { version = "0.48.0", default-features = false, features = ["rustls"] } thiserror.workspace = true @@ -41,12 +40,13 @@ tonic-reflection.workspace = true tracing.workspace = true serde.workspace = true serde_json.workspace = true +strum.workspace = true mockall = {workspace = true, optional = true } [dev-dependencies] mockall.workspace = true -rundler-pool = { path = "../pool", features = ["test-utils"] } +rundler-types = { path = "../types", features = ["test-utils"] } rundler-provider = { path = "../provider", features = ["test-utils"] } rundler-sim = { path = "../sim", features = ["test-utils"] } diff --git a/crates/builder/src/bundle_proposer.rs b/crates/builder/src/bundle_proposer.rs index 88187763..0c1ed11a 100644 --- a/crates/builder/src/bundle_proposer.rs +++ b/crates/builder/src/bundle_proposer.rs @@ -12,13 +12,12 @@ // If not, see https://www.gnu.org/licenses/. use std::{ - cmp, collections::{BTreeMap, HashMap, HashSet}, future::Future, + marker::PhantomData, mem, pin::Pin, sync::Arc, - time::Duration, }; use anyhow::Context; @@ -29,40 +28,54 @@ use futures_util::TryFutureExt; use linked_hash_map::LinkedHashMap; #[cfg(test)] use mockall::automock; -use rundler_pool::{PoolOperation, PoolServer}; -use rundler_provider::{EntryPoint, HandleOpsOut, Provider}; +use rundler_provider::{ + BundleHandler, EntryPoint, HandleOpsOut, L1GasProvider, Provider, SignatureAggregator, +}; use rundler_sim::{ - gas::{self, GasOverheads}, - EntityInfo, EntityInfos, ExpectedStorage, FeeEstimator, PriorityFeeMode, SimulationError, - SimulationResult, SimulationViolation, Simulator, ViolationError, + gas, ExpectedStorage, FeeEstimator, PriorityFeeMode, SimulationError, SimulationResult, + Simulator, ViolationError, }; use rundler_types::{ - Entity, EntityType, EntityUpdate, EntityUpdateType, GasFees, Timestamp, UserOperation, - UserOpsPerAggregator, + chain::ChainSpec, + pool::{Pool, PoolOperation, SimulationViolation}, + Entity, EntityInfo, EntityInfos, EntityType, EntityUpdate, EntityUpdateType, GasFees, + Timestamp, UserOperation, UserOperationVariant, UserOpsPerAggregator, BUNDLE_BYTE_OVERHEAD, + TIME_RANGE_BUFFER, USER_OP_OFFSET_WORD_SIZE, }; use rundler_utils::{emit::WithEntryPoint, math}; use tokio::{sync::broadcast, try_join}; use tracing::{error, info, warn}; -use rundler_types::hybrid_compute; -use crate::emit::{BuilderEvent, OpRejectionReason, SkipReason}; +use crate::emit::{BuilderEvent, ConditionNotMetReason, OpRejectionReason, SkipReason}; +use rundler_types::hybrid_compute; -/// A user op must be valid for at least this long into the future to be included. -const TIME_RANGE_BUFFER: Duration = Duration::from_secs(60); /// Extra buffer percent to add on the bundle transaction gas estimate to be sure it will be enough const BUNDLE_TRANSACTION_GAS_OVERHEAD_PERCENT: u64 = 5; -#[derive(Debug, Default)] -pub(crate) struct Bundle { - pub(crate) ops_per_aggregator: Vec, +#[derive(Debug)] +pub(crate) struct Bundle { + pub(crate) ops_per_aggregator: Vec>, pub(crate) gas_estimate: U256, pub(crate) gas_fees: GasFees, pub(crate) expected_storage: ExpectedStorage, - pub(crate) rejected_ops: Vec, + pub(crate) rejected_ops: Vec, pub(crate) entity_updates: Vec, } -impl Bundle { +impl Default for Bundle { + fn default() -> Self { + Self { + ops_per_aggregator: Vec::new(), + gas_estimate: U256::zero(), + gas_fees: GasFees::default(), + expected_storage: ExpectedStorage::default(), + rejected_ops: Vec::new(), + entity_updates: Vec::new(), + } + } +} + +impl Bundle { pub(crate) fn len(&self) -> usize { self.ops_per_aggregator .iter() @@ -74,38 +87,70 @@ impl Bundle { self.ops_per_aggregator.is_empty() } - pub(crate) fn iter_ops(&self) -> impl Iterator + '_ { + pub(crate) fn iter_ops(&self) -> impl Iterator + '_ { self.ops_per_aggregator.iter().flat_map(|ops| &ops.user_ops) } } -#[cfg_attr(test, automock)] #[async_trait] +#[cfg_attr(test, automock(type UO = rundler_types::v0_6::UserOperation;))] pub(crate) trait BundleProposer: Send + Sync + 'static { - async fn make_bundle(&self, required_fees: Option) -> anyhow::Result; + type UO: UserOperation; + + /// Constructs the next bundle + /// + /// If `min_fees` is `Some`, the proposer will ensure the bundle has + /// at least `min_fees`. + async fn make_bundle( + &mut self, + min_fees: Option, + is_replacement: bool, + ) -> BundleProposerResult>; + + /// Gets the current gas fees + /// + /// If `min_fees` is `Some`, the proposer will ensure the gas fees returned are at least `min_fees`. + async fn estimate_gas_fees( + &self, + min_fees: Option, + ) -> BundleProposerResult<(GasFees, U256)>; + + /// Notifies the proposer that a condition was not met during the last bundle proposal + fn notify_condition_not_met(&mut self); +} + +pub(crate) type BundleProposerResult = std::result::Result; + +#[derive(Debug, thiserror::Error)] +pub(crate) enum BundleProposerError { + #[error("No operations initially")] + NoOperationsInitially, + #[error("No operations after fee filtering")] + NoOperationsAfterFeeFilter, + #[error(transparent)] + ProviderError(#[from] rundler_provider::ProviderError), + /// All other errors + #[error(transparent)] + Other(#[from] anyhow::Error), } #[derive(Debug)] -pub(crate) struct BundleProposerImpl -where - S: Simulator, - E: EntryPoint, - P: Provider, - C: PoolServer, -{ +pub(crate) struct BundleProposerImpl { builder_index: u64, - pool: C, + pool: M, simulator: S, entry_point: E, provider: Arc

, settings: Settings, fee_estimator: FeeEstimator

, event_sender: broadcast::Sender>, + condition_not_met_notified: bool, + _uo_type: PhantomData, } #[derive(Debug)] pub(crate) struct Settings { - pub(crate) chain_id: u64, + pub(crate) chain_spec: ChainSpec, pub(crate) max_bundle_size: u64, pub(crate) max_bundle_gas: u64, pub(crate) beneficiary: Address, @@ -114,50 +159,90 @@ pub(crate) struct Settings { } #[async_trait] -impl BundleProposer for BundleProposerImpl +impl BundleProposer for BundleProposerImpl where - S: Simulator, - E: EntryPoint, + UO: UserOperation + From, + UserOperationVariant: AsRef, + S: Simulator, + E: EntryPoint + SignatureAggregator + BundleHandler + L1GasProvider + SimulationProvider, P: Provider, - C: PoolServer, + M: Pool, { - async fn make_bundle(&self, required_fees: Option) -> anyhow::Result { + type UO = UO; + + async fn estimate_gas_fees( + &self, + required_fees: Option, + ) -> BundleProposerResult<(GasFees, U256)> { + Ok(self + .fee_estimator + .required_bundle_fees(required_fees) + .await?) + } + + fn notify_condition_not_met(&mut self) { + self.condition_not_met_notified = true; + } + + async fn make_bundle( + &mut self, + required_fees: Option, + is_replacement: bool, + ) -> BundleProposerResult> { let (ops, (block_hash, _), (bundle_fees, base_fee)) = try_join!( self.get_ops_from_pool(), self.provider .get_latest_block_hash_and_number() - .map_err(anyhow::Error::from), - self.fee_estimator.required_bundle_fees(required_fees) + .map_err(BundleProposerError::from), + self.estimate_gas_fees(required_fees) )?; + if ops.is_empty() { + return Err(BundleProposerError::NoOperationsInitially); + } - // Limit the amount of gas in the bundle - tracing::debug!( - "Builder index: {}, starting bundle proposal with {} ops", - self.builder_index, - ops.len(), - ); + tracing::debug!("Starting bundle proposal with {} ops", ops.len()); - // Do an initial filtering of ops that we want to simulate. + // (0) Determine fees required for ops to be included in a bundle + // if replacing, just require bundle fees increase chances of unsticking + let required_op_fees = if is_replacement { + bundle_fees + } else { + self.fee_estimator.required_op_fees(bundle_fees) + }; + let all_paymaster_addresses = ops + .iter() + .filter_map(|op| op.uo.paymaster()) + .collect::>(); + + // (1) Filter out ops that don't pay enough to be included + let fee_futs = ops + .into_iter() + .map(|op| self.check_fees(op, base_fee, required_op_fees)) + .collect::>(); + let ops = future::join_all(fee_futs) + .await + .into_iter() + .flatten() + .collect::>(); + + tracing::debug!("Bundle proposal after fee limit had {} ops", ops.len()); + if ops.is_empty() { + return Err(BundleProposerError::NoOperationsAfterFeeFilter); + } + + // (2) Limit the amount of operations for simulation let (ops, gas_limit) = self.limit_user_operations_for_simulation(ops); + tracing::debug!( - "Builder index: {}, bundle proposal after limit had {} ops and {:?} gas limit", - self.builder_index, + "Bundle proposal after gas limit had {} ops and {:?} gas limit", ops.len(), gas_limit ); - // Determine fees required for ops to be included in a bundle - let required_op_fees = self.fee_estimator.required_op_fees(bundle_fees); - - let all_paymaster_addresses = ops - .iter() - .filter_map(|op| op.uo.paymaster()) - .collect::>(); - - // Filter ops and simulate + // (3) simulate ops let simulation_futures = ops .into_iter() - .map(|op| self.filter_and_simulate(op, block_hash, base_fee, required_op_fees)) + .map(|op| self.simulate_op(op, block_hash)) .collect::>(); let ops_with_simulations_future = future::join_all(simulation_futures); @@ -180,12 +265,21 @@ where let gas_estimate = self.estimate_gas_rejecting_failed_ops(&mut context).await?; if let Some(gas_estimate) = gas_estimate { tracing::debug!( - "Builder index: {}, bundle proposal succeeded with {} ops and {:?} gas limit", - self.builder_index, + "Bundle proposal succeeded with {} ops and {:?} gas limit", context.iter_ops().count(), gas_estimate ); + // If recently notified that a bundle condition was not met, check each of + // the conditions again to ensure if they are met, rejecting OPs if they are not. + if self.condition_not_met_notified { + self.condition_not_met_notified = false; + self.check_conditions_met(&mut context).await?; + if context.is_empty() { + break; + } + } + let mut expected_storage = ExpectedStorage::default(); for op in context.iter_ops_with_simulations() { expected_storage.merge(&op.simulation.expected_storage)?; @@ -210,17 +304,19 @@ where }) } } - -impl BundleProposerImpl +use rundler_provider::SimulationProvider; +impl BundleProposerImpl where - S: Simulator, - E: EntryPoint, + UO: UserOperation + From, + UserOperationVariant: AsRef, + S: Simulator, + E: EntryPoint + SignatureAggregator + BundleHandler + L1GasProvider + SimulationProvider, P: Provider, - C: PoolServer, + M: Pool, { pub(crate) fn new( builder_index: u64, - pool: C, + pool: M, simulator: S, entry_point: E, provider: Arc

, @@ -234,33 +330,35 @@ where entry_point, provider: provider.clone(), fee_estimator: FeeEstimator::new( + &settings.chain_spec, provider, - settings.chain_id, settings.priority_fee_mode, settings.bundle_priority_fee_overhead_percent, ), settings, event_sender, + condition_not_met_notified: false, + _uo_type: PhantomData, } } - // Filter and simulate a single op. Returns None if the op should be skipped. + // Check fees for a single user op. Returns None if the op should be skipped. // // Filters on: - // - gas fees - // - pre-verification gas - // - any errors - async fn filter_and_simulate( + // - insufficient gas fees + // - insufficient pre-verification gas + async fn check_fees( &self, op: PoolOperation, - block_hash: H256, base_fee: U256, required_op_fees: GasFees, - ) -> Option<(PoolOperation, Result)> { - println!("HC filter_and_simulate op {:?}", op); + ) -> Option { + let op_hash = self.op_hash(&op.uo); + println!("HC proposer check_fees op {:?}", op); + // filter by fees - if op.uo.max_fee_per_gas < required_op_fees.max_fee_per_gas - || op.uo.max_priority_fee_per_gas < required_op_fees.max_priority_fee_per_gas + if op.uo.max_fee_per_gas() < required_op_fees.max_fee_per_gas + || op.uo.max_priority_fee_per_gas() < required_op_fees.max_priority_fee_per_gas { self.emit(BuilderEvent::skipped_op( self.builder_index, @@ -268,8 +366,8 @@ where SkipReason::InsufficientFees { required_fees: required_op_fees, actual_fees: GasFees { - max_fee_per_gas: op.uo.max_fee_per_gas, - max_priority_fee_per_gas: op.uo.max_priority_fee_per_gas, + max_fee_per_gas: op.uo.max_fee_per_gas(), + max_priority_fee_per_gas: op.uo.max_priority_fee_per_gas(), }, }, )); @@ -277,28 +375,31 @@ where } // Check if the pvg is enough - let mut required_pvg = gas::calc_required_pre_verification_gas( - &op.uo, - self.entry_point.address(), - self.provider.clone(), - self.settings.chain_id, + let mut required_pvg = match gas::calc_required_pre_verification_gas( + &self.settings.chain_spec, + &self.entry_point, + op.uo.as_ref(), base_fee, ) .await - .map_err(|e| { - self.emit(BuilderEvent::skipped_op( - self.builder_index, - self.op_hash(&op.uo), - SkipReason::Other { - reason: Arc::new(format!( - "Failed to calculate required pre-verification gas for op: {e:?}, skipping" - )), - }, - )); - e - }) - .ok()?; - let hc_hash = op.uo.op_hc_hash(); + { + Ok(pvg) => pvg, + Err(e) => { + error!("Failed to calculate required pre-verification gas for op: {e:?}, skipping"); + self.emit(BuilderEvent::skipped_op( + self.builder_index, + op_hash, + SkipReason::Other { + reason: Arc::new(format!( + "Failed to calculate required pre-verification gas for op: {e:?}, skipping" + )), + }, + )); + return None; + } + }; + + let hc_hash = op.uo.hc_hash(); let mut is_hc:bool = false; if let Some(hc_pvg) = hybrid_compute::hc_get_pvg(hc_hash) { @@ -311,10 +412,10 @@ where println!("HC no pvg override for op_hash {:?}, required_pvg {:?}", hc_hash, required_pvg); } - if op.uo.pre_verification_gas < required_pvg { + if op.uo.pre_verification_gas() < required_pvg { if is_hc { // Workaround - reject op here instead of waiting indefinitely. - println!("HC WARN rejecting op_hash {:?}, pre_verifification_gas {:?} < {:?}", hc_hash, op.uo.pre_verification_gas, required_pvg); + println!("HC WARN rejecting op_hash {:?}, pre_verifification_gas {:?} < {:?}", hc_hash, op.uo.pre_verification_gas(), required_pvg); self.emit(BuilderEvent::rejected_op( self.builder_index, @@ -323,30 +424,48 @@ where message: Arc::new("HC insufficient pre_verification_gas".to_owned()), }, )); - let err_result = (op, Err(SimulationError{ violation_error: ViolationError::Violations(Vec::new()), entity_infos: None } )); - return Some(err_result); + //let err_result = (op, Err(SimulationError{ violation_error: ViolationError::Violations(Vec::new()), entity_infos: None } )); + //return Some(err_result); + return None; // FIXME } else { self.emit(BuilderEvent::skipped_op( self.builder_index, - self.op_hash(&op.uo), + op_hash, SkipReason::InsufficientPreVerificationGas { base_fee, op_fees: GasFees { - max_fee_per_gas: op.uo.max_fee_per_gas, - max_priority_fee_per_gas: op.uo.max_priority_fee_per_gas, + max_fee_per_gas: op.uo.max_fee_per_gas(), + max_priority_fee_per_gas: op.uo.max_priority_fee_per_gas(), }, required_pvg, - actual_pvg: op.uo.pre_verification_gas, + actual_pvg: op.uo.pre_verification_gas(), }, )); return None; } } + Some(op) + } + + // Simulate a single op. Returns None if the op should be skipped. + // + // Filters on any errors + async fn simulate_op( + &self, + op: PoolOperation, + block_hash: H256, + ) -> Option<(PoolOperation, Result)> { + let op_hash = self.op_hash(&op.uo); + // Simulate let result = self .simulator - .simulate_validation(op.uo.clone(), Some(block_hash), Some(op.expected_code_hash)) + .simulate_validation( + op.uo.clone().into(), + Some(block_hash), + Some(op.expected_code_hash), + ) .await; let result = match result { Ok(success) => (op, Ok(success)), @@ -361,7 +480,7 @@ where } => { self.emit(BuilderEvent::skipped_op( self.builder_index, - self.op_hash(&op.uo), + op_hash, SkipReason::Other { reason: Arc::new(format!("Failed to simulate op: {error:?}, skipping")), }, @@ -378,18 +497,17 @@ where &self, ops_with_simulations: Vec<(PoolOperation, Result)>, mut balances_by_paymaster: HashMap, - ) -> ProposalContext { + ) -> ProposalContext { let all_sender_addresses: HashSet

= ops_with_simulations .iter() - .map(|(op, _)| op.uo.sender) + .map(|(op, _)| op.uo.sender()) .collect(); - let mut context = ProposalContext::new(); + let mut context = ProposalContext::::new(); let mut paymasters_to_reject = Vec::::new(); - let ov = GasOverheads::default(); - let mut gas_spent = ov.transaction_gas_overhead; + let mut gas_spent = self.settings.chain_spec.transaction_intrinsic_gas; let mut cleanup_keys:Vec = Vec::new(); - + let mut constructed_bundle_size = BUNDLE_BYTE_OVERHEAD; for (po, simulation) in ops_with_simulations { let op = po.clone().uo; let simulation = match simulation { @@ -410,7 +528,7 @@ where // try to use EntityInfos from the latest simulation, but if it doesn't exist use the EntityInfos from the previous simulation let infos = entity_infos.map_or(po.entity_infos, |e| e); context.process_simulation_violations(violations, infos); - context.rejected_ops.push((op, po.entity_infos)); + context.rejected_ops.push((op.into(), po.entity_infos)); } continue; } @@ -428,38 +546,43 @@ where valid_range: simulation.valid_time_range, }, )); - context.rejected_ops.push((op, po.entity_infos)); + context.rejected_ops.push((op.into(), po.entity_infos)); + continue; + } + + let op_size_bytes: usize = op.abi_encoded_size(); + + let op_size_with_offset_word = op_size_bytes.saturating_add(USER_OP_OFFSET_WORD_SIZE); + + if op_size_with_offset_word.saturating_add(constructed_bundle_size) + >= self.settings.chain_spec.max_transaction_size_bytes + { continue; } // Skip this op if the bundle does not have enough remaining gas to execute it. - let mut required_gas = get_gas_required_for_op( - gas_spent, - self.settings.chain_id, - ov, - &op, - simulation.requires_post_op, - ); + let mut required_gas = gas_spent + + gas::user_operation_execution_gas_limit(&self.settings.chain_spec, &op, false); - let hc_hash = op.op_hc_hash(); + let hc_hash = op.hc_hash(); let hc_ent = hybrid_compute::get_hc_ent(hc_hash); if hc_ent.is_some() { required_gas += hc_ent.clone().unwrap().oc_gas; println!("HC bundle_properer found hc_ent {:?} op_hash {:?} required_gas {:?}", hc_ent, hc_hash, required_gas); } - if required_gas > self.settings.max_bundle_gas.into() { + if required_gas > self.settings.max_bundle_gas.into() { continue; } if let Some(&other_sender) = simulation .accessed_addresses .iter() - .find(|&address| *address != op.sender && all_sender_addresses.contains(address)) + .find(|&address| *address != op.sender() && all_sender_addresses.contains(address)) { // Exclude ops that access the sender of another op in the // batch, but don't reject them (remove them from pool). - info!("Excluding op from {:?} because it accessed the address of another sender in the bundle.", op.sender); + info!("Excluding op from {:?} because it accessed the address of another sender in the bundle.", op.sender()); self.emit(BuilderEvent::skipped_op( self.builder_index, self.op_hash(&op), @@ -483,71 +606,149 @@ where } // Update the running gas that would need to be be spent to execute the bundle so far. - gas_spent += gas::user_operation_execution_gas_limit( - &op, - self.settings.chain_id, - false, - simulation.requires_post_op, - ); + gas_spent += + gas::user_operation_execution_gas_limit(&self.settings.chain_spec, &op, false); if hc_ent.is_some() { gas_spent += hc_ent.clone().unwrap().oc_gas; //println!("HC insert, hc_ent {:?}", hc_ent); - let op2 = hc_ent.clone().unwrap().user_op; - let sim2 = SimulationResult::default(); + let u_op2:UserOperationVariant = hc_ent.clone().unwrap().user_op.into(); + + let sim_result = self.simulator.simulate_validation(u_op2.clone().into(), None, None).await.expect("Failed to unwrap sim_result"); // FIXME + context .groups_by_aggregator .entry(simulation.aggregator_address()) .or_default() .ops_with_simulations - .push(OpWithSimulation { op:op2, simulation:sim2 }); + .push(OpWithSimulation { op:u_op2.into(), simulation:sim_result }); cleanup_keys.push(hc_ent.clone().unwrap().map_key); + } + constructed_bundle_size = + constructed_bundle_size.saturating_add(op_size_with_offset_word); + context .groups_by_aggregator .entry(simulation.aggregator_address()) .or_default() .ops_with_simulations - .push(OpWithSimulation { op, simulation }); + .push(OpWithSimulation { + op: op.into(), + simulation, + }); } - if cleanup_keys.len() > 0 { + if cleanup_keys.len() > 0 { println!("HC cleanup_keys {:?}", cleanup_keys); let cfg = hybrid_compute::HC_CONFIG.lock().unwrap().clone(); let c_nonce = self.entry_point.get_nonce(cfg.sys_account, U256::zero()).await.unwrap(); - let cleanup_op = hybrid_compute::rr_op(&cfg, c_nonce, cleanup_keys).await; + let cleanup_op:UserOperationVariant = hybrid_compute::rr_op(&cfg, c_nonce, cleanup_keys).await.into(); + + let cleanup_sim = self.simulator.simulate_validation(cleanup_op.clone().into(), None, None).await.expect("Failed to unwrap sim_result"); // FIXME context .groups_by_aggregator .entry(None) .or_default() .ops_with_simulations - .push(OpWithSimulation { op:cleanup_op, simulation:SimulationResult::default() }); + .push(OpWithSimulation { op:cleanup_op.into(), simulation:cleanup_sim }); } for paymaster in paymasters_to_reject { // No need to update aggregator signatures because we haven't computed them yet. - let _ = - context.reject_entity(Entity::paymaster(paymaster.address), paymaster.is_staked); + let _ = context.reject_entity(paymaster.entity, paymaster.is_staked); } self.compute_all_aggregator_signatures(&mut context).await; context } - async fn reject_index(&self, context: &mut ProposalContext, i: usize) { + async fn check_conditions_met(&self, context: &mut ProposalContext) -> anyhow::Result<()> { + let futs = context + .iter_ops_with_simulations() + .enumerate() + .map(|(i, op)| async move { + self.check_op_conditions_met(&op.simulation.expected_storage) + .await + .map(|reason| (i, reason)) + }) + .collect::>(); + + let to_reject = future::join_all(futs).await.into_iter().flatten(); + + for (index, reason) in to_reject { + self.emit(BuilderEvent::rejected_op( + self.builder_index, + self.op_hash(&context.get_op_at(index)?.op), + OpRejectionReason::ConditionNotMet(reason), + )); + self.reject_index(context, index).await; + } + + Ok(()) + } + + async fn check_op_conditions_met( + &self, + expected_storage: &ExpectedStorage, + ) -> Option { + let futs = expected_storage + .0 + .iter() + .map(|(address, slots)| async move { + let storage = match self + .provider + .batch_get_storage_at(*address, slots.keys().copied().collect()) + .await + { + Ok(storage) => storage, + Err(e) => { + error!("Error getting storage for address {address:?} failing open: {e:?}"); + return None; + } + }; + + for ((slot, expected), actual) in slots.iter().zip(storage) { + if *expected != actual { + return Some(ConditionNotMetReason { + address: *address, + slot: *slot, + expected: *expected, + actual, + }); + } + } + None + }); + + let results = future::join_all(futs).await; + for result in results { + if result.is_some() { + return result; + } + } + None + } + + async fn reject_index(&self, context: &mut ProposalContext, i: usize) { let changed_aggregator = context.reject_index(i); self.compute_aggregator_signatures(context, &changed_aggregator) .await; } - async fn reject_entity(&self, context: &mut ProposalContext, entity: Entity, is_staked: bool) { + async fn reject_entity( + &self, + context: &mut ProposalContext, + entity: Entity, + is_staked: bool, + ) { let changed_aggregators = context.reject_entity(entity, is_staked); self.compute_aggregator_signatures(context, &changed_aggregators) .await; } - async fn compute_all_aggregator_signatures(&self, context: &mut ProposalContext) { + async fn compute_all_aggregator_signatures(&self, context: &mut ProposalContext) { let aggregators: Vec<_> = context .groups_by_aggregator .keys() @@ -560,7 +761,7 @@ where async fn compute_aggregator_signatures<'a>( &self, - context: &mut ProposalContext, + context: &mut ProposalContext, aggregators: impl IntoIterator, ) { let signature_futures = aggregators.into_iter().filter_map(|&aggregator| { @@ -580,17 +781,17 @@ where /// op(s) caused the failure. async fn estimate_gas_rejecting_failed_ops( &self, - context: &mut ProposalContext, - ) -> anyhow::Result> { + context: &mut ProposalContext, + ) -> BundleProposerResult> { // sum up the gas needed for all the ops in the bundle // and apply an overhead multiplier let gas = math::increase_by_percent( - context.get_bundle_gas_limit(self.settings.chain_id), + context.get_bundle_gas_limit(&self.settings.chain_spec), BUNDLE_TRANSACTION_GAS_OVERHEAD_PERCENT, ); // call handle ops with the bundle to filter any rejected ops before sending - println!("HC bundle_proposer gas1 {:?} {:?}", gas, context.to_ops_per_aggregator()); + println!("HC bundle_proposer gas1 {:?} {:?}", gas, context.to_ops_per_aggregator()); let handle_ops_out = self .entry_point .call_handle_ops( @@ -628,27 +829,30 @@ where } } - async fn get_ops_from_pool(&self) -> anyhow::Result> { + async fn get_ops_from_pool(&self) -> BundleProposerResult> { // Use builder's index as the shard index to ensure that two builders don't // attempt to bundle the same operations. // // NOTE: this assumes that the pool server has as many shards as there // are builders. - self.pool + Ok(self + .pool .get_ops( self.entry_point.address(), self.settings.max_bundle_size, self.builder_index, ) .await - .context("should get ops from pool") + .context("should get ops from pool")? + .into_iter() + .collect()) } async fn get_balances_by_paymaster( &self, addresses: impl IntoIterator, block_hash: H256, - ) -> anyhow::Result> { + ) -> BundleProposerResult> { let futures = addresses.into_iter().map(|address| async move { let deposit = self .entry_point @@ -665,14 +869,15 @@ where async fn aggregate_signatures( &self, aggregator: Address, - group: &AggregatorGroup, + group: &AggregatorGroup, ) -> (Address, anyhow::Result>) { let ops = group .ops_with_simulations .iter() .map(|op_with_simulation| op_with_simulation.op.clone()) .collect(); - let result = Arc::clone(&self.provider) + let result = self + .entry_point .aggregate_signatures(aggregator, ops) .await .map_err(anyhow::Error::from); @@ -681,7 +886,7 @@ where async fn process_failed_op( &self, - context: &mut ProposalContext, + context: &mut ProposalContext, index: usize, message: String, ) -> anyhow::Result<()> { @@ -741,7 +946,7 @@ where // from the bundle and from the pool. async fn process_post_op_revert( &self, - context: &mut ProposalContext, + context: &mut ProposalContext, gas: U256, ) -> anyhow::Result<()> { let agg_groups = context.to_ops_per_aggregator(); @@ -794,7 +999,7 @@ where async fn check_for_post_op_revert_single_op( &self, - op: UserOperation, + op: UO, gas: U256, op_index: usize, ) -> Vec { @@ -830,7 +1035,7 @@ where async fn check_for_post_op_revert_agg_ops( &self, - group: UserOpsPerAggregator, + group: UserOpsPerAggregator, gas: U256, start_index: usize, ) -> Vec { @@ -871,12 +1076,8 @@ where for op in ops { // Here we use optimistic gas limits for the UOs by assuming none of the paymaster UOs use postOp calls. // This way after simulation once we have determined if each UO actually uses a postOp call or not we can still pack a full bundle - let gas = gas::user_operation_execution_gas_limit( - &op.uo, - self.settings.chain_id, - false, - false, - ); + let gas = + gas::user_operation_execution_gas_limit(&self.settings.chain_spec, &op.uo, false); if gas_left < gas { self.emit(BuilderEvent::skipped_op( self.builder_index, @@ -903,22 +1104,26 @@ where }); } - fn op_hash(&self, op: &UserOperation) -> H256 { - op.op_hash(self.entry_point.address(), self.settings.chain_id) + fn op_hash(&self, op: &T) -> H256 + where + T: UserOperation, + { + op.hash(self.entry_point.address(), self.settings.chain_spec.id) } } #[derive(Debug)] -struct OpWithSimulation { - op: UserOperation, +struct OpWithSimulation { + op: UO, simulation: SimulationResult, } -impl OpWithSimulation { - fn op_with_replaced_sig(&self) -> UserOperation { +impl OpWithSimulation { + fn op_with_replaced_sig(&self) -> UO { let mut op = self.op.clone(); - if let Some(aggregator) = &self.simulation.aggregator { - op.signature = aggregator.signature.clone(); + if self.simulation.aggregator.is_some() { + // if using an aggregator, clear out the user op signature + op.clear_signature(); } op } @@ -929,24 +1134,33 @@ impl OpWithSimulation { /// `Vec` that will eventually be passed to the entry /// point, but contains extra context needed for the computation. #[derive(Debug)] -struct ProposalContext { - groups_by_aggregator: LinkedHashMap, AggregatorGroup>, - rejected_ops: Vec<(UserOperation, EntityInfos)>, +struct ProposalContext { + groups_by_aggregator: LinkedHashMap, AggregatorGroup>, + rejected_ops: Vec<(UO, EntityInfos)>, // This is a BTreeMap so that the conversion to a Vec is deterministic, mainly for tests entity_updates: BTreeMap, } -#[derive(Debug, Default)] -struct AggregatorGroup { - ops_with_simulations: Vec, +#[derive(Debug)] +struct AggregatorGroup { + ops_with_simulations: Vec>, signature: Bytes, } -impl ProposalContext { +impl Default for AggregatorGroup { + fn default() -> Self { + Self { + ops_with_simulations: Vec::new(), + signature: Bytes::new(), + } + } +} + +impl ProposalContext { fn new() -> Self { Self { - groups_by_aggregator: LinkedHashMap::, AggregatorGroup>::new(), - rejected_ops: Vec::<(UserOperation, EntityInfos)>::new(), + groups_by_aggregator: LinkedHashMap::, AggregatorGroup>::new(), + rejected_ops: Vec::<(UO, EntityInfos)>::new(), entity_updates: BTreeMap::new(), } } @@ -970,7 +1184,7 @@ impl ProposalContext { } } - fn get_op_at(&self, index: usize) -> anyhow::Result<&OpWithSimulation> { + fn get_op_at(&self, index: usize) -> anyhow::Result<&OpWithSimulation> { let mut remaining_i = index; for group in self.groups_by_aggregator.values() { if remaining_i < group.ops_with_simulations.len() { @@ -991,7 +1205,7 @@ impl ProposalContext { if remaining_i < group.ops_with_simulations.len() { let rejected = group.ops_with_simulations.remove(remaining_i); println!("HC reject_index at {:?} of {:?} - {:?}", i, group.ops_with_simulations.len(), rejected.op); - if rejected.op.max_fee_per_gas == U256::from(0) { + if rejected.op.max_fee_per_gas() == U256::from(0) { // Assume an Offchain op if i == group.ops_with_simulations.len() { println!("HC ERR rejecting Cleanup op {:?}", rejected.op); @@ -999,13 +1213,14 @@ impl ProposalContext { println!("HC ERR rejecting offchain op {:?}", rejected.op); } } else { - let hc_hash = rejected.op.op_hc_hash(); + let hc_hash = rejected.op.hc_hash(); let hc_ent = hybrid_compute::get_hc_ent(hc_hash); println!("HC rejecting regular op with hash {:?} paired_op {:?}", hc_hash, hc_ent); if hc_ent.is_some() { todo!("Should remove paired op"); } } + self.rejected_ops .push((rejected.op, rejected.simulation.entity_infos)); found_aggregator = Some(aggregator); @@ -1071,7 +1286,7 @@ impl ProposalContext { /// Reject all ops that match the filter, and return the addresses of any aggregators /// whose signature may need to be recomputed. - fn filter_reject(&mut self, filter: impl Fn(&UserOperation) -> bool) -> Vec
{ + fn filter_reject(&mut self, filter: impl Fn(&UO) -> bool) -> Vec
{ let mut changed_aggregators: Vec
= vec![]; let mut aggregators_to_remove: Vec> = vec![]; for (&aggregator, group) in &mut self.groups_by_aggregator { @@ -1097,7 +1312,7 @@ impl ProposalContext { changed_aggregators } - fn to_ops_per_aggregator(&self) -> Vec { + fn to_ops_per_aggregator(&self) -> Vec> { self.groups_by_aggregator .iter() .map(|(&aggregator, group)| UserOpsPerAggregator { @@ -1112,37 +1327,28 @@ impl ProposalContext { .collect() } - fn get_bundle_gas_limit(&self, chain_id: u64) -> U256 { - let ov = GasOverheads::default(); - let mut gas_spent = ov.transaction_gas_overhead; - let mut max_gas = U256::zero(); - for op_with_sim in self.iter_ops_with_simulations() { - let op = &op_with_sim.op; - let required_gas = get_gas_required_for_op( - gas_spent, - chain_id, - ov, - op, - op_with_sim.simulation.requires_post_op, - ); - max_gas = cmp::max(max_gas, required_gas); - gas_spent += gas::user_operation_gas_limit( - op, - chain_id, - false, - op_with_sim.simulation.requires_post_op, - ); - } - max_gas + fn get_bundle_gas_limit(&self, chain_spec: &ChainSpec) -> U256 { + // TODO(danc): in the 0.7 entrypoint we could optimize this by removing the need for + // the 10K gas and 63/64 gas overheads for each op in the bundle and instead calculate exactly + // the limit needed to include that overhead for each op. + // + // In the 0.6 entrypoint we're assuming that we need 1 verification gas buffer for each op in the bundle + // regardless of if it uses a post op or not. We can optimize to calculate the exact gas overhead + // needed to have the buffer for each op. + + self.iter_ops_with_simulations() + .map(|sim_op| gas::user_operation_gas_limit(chain_spec, &sim_op.op, false)) + .fold(U256::zero(), |acc, i| acc + i) + + chain_spec.transaction_intrinsic_gas } - fn iter_ops_with_simulations(&self) -> impl Iterator + '_ { + fn iter_ops_with_simulations(&self) -> impl Iterator> + '_ { self.groups_by_aggregator .values() .flat_map(|group| &group.ops_with_simulations) } - fn iter_ops(&self) -> impl Iterator + '_ { + fn iter_ops(&self) -> impl Iterator + '_ { self.iter_ops_with_simulations().map(|op| &op.op) } @@ -1156,12 +1362,9 @@ impl ProposalContext { if entity_infos.factory.map_or(false, |f| f.is_staked) { let factory = entity_infos.factory.unwrap(); self.entity_updates.insert( - factory.address, + factory.address(), EntityUpdate { - entity: Entity { - kind: EntityType::Factory, - address: factory.address, - }, + entity: factory.entity, update_type: EntityUpdateType::StakedInvalidation, }, ); @@ -1171,12 +1374,9 @@ impl ProposalContext { // [EREP-030] When there is a staked sender (without a staked factory) any error in validation is attributed to it. if entity_infos.sender.is_staked { self.entity_updates.insert( - entity_infos.sender.address, + entity_infos.sender.address(), EntityUpdate { - entity: Entity { - kind: EntityType::Account, - address: entity_infos.sender.address, - }, + entity: entity_infos.sender.entity, update_type: EntityUpdateType::StakedInvalidation, }, ); @@ -1206,12 +1406,13 @@ impl ProposalContext { self.add_entity_update(entity, entity_infos) } SimulationViolation::NotStaked(stake_data) => { - self.add_entity_update(stake_data.entity, entity_infos) + self.add_entity_update(stake_data.needs_stake, entity_infos) } SimulationViolation::UnintendedRevertWithMessage(entity_type, message, address) => { match &message[..4] { - // do not penalize an entity for invalid account nonces, which can occur without malicious intent from the sender - "AA25" => {} + // do not penalize an entity for invalid account nonces or already deployed senders, + // which can occur without malicious intent from the sender or factory + "AA10" | "AA25" => {} _ => { if let Some(entity_address) = address { self.add_entity_update( @@ -1248,7 +1449,7 @@ impl ProposalContext { fn add_entity_update(&mut self, entity: Entity, entity_infos: EntityInfos) { let entity_update = EntityUpdate { entity, - update_type: ProposalContext::get_entity_update_type(entity.kind, entity_infos), + update_type: ProposalContext::::get_entity_update_type(entity.kind, entity_infos), }; self.entity_updates.insert(entity.address, entity_update); } @@ -1305,37 +1506,22 @@ impl ProposalContext { } } -fn get_gas_required_for_op( - gas_spent: U256, - chain_id: u64, - ov: GasOverheads, - op: &UserOperation, - requires_post_op: bool, -) -> U256 { - let post_exec_req_gas = if requires_post_op { - cmp::max(op.verification_gas_limit, ov.bundle_transaction_gas_buffer) - } else { - ov.bundle_transaction_gas_buffer - }; - - gas_spent - + gas::user_operation_pre_verification_gas_limit(op, chain_id, false) - + op.verification_gas_limit * 2 - + op.call_gas_limit - + post_exec_req_gas -} - #[cfg(test)] mod tests { + use std::time::Duration; + use anyhow::anyhow; use ethers::{ types::{H160, U64}, utils::parse_units, }; - use rundler_pool::MockPoolServer; - use rundler_provider::{AggregatorSimOut, MockEntryPoint, MockProvider}; - use rundler_sim::{MockSimulator, SimulationViolation, ViolationError}; - use rundler_types::ValidTimeRange; + use rundler_provider::{AggregatorSimOut, MockEntryPointV0_6, MockProvider}; + use rundler_sim::MockSimulator; + use rundler_types::{ + pool::{MockPool, SimulationViolation}, + v0_6::{UserOperation, ENTRY_POINT_INNER_GAS_OVERHEAD}, + UserOperation as UserOperationTrait, ValidTimeRange, + }; use super::*; @@ -1354,13 +1540,14 @@ mod tests { }]) .await; - let ov = GasOverheads::default(); + let cs = ChainSpec::default(); + let expected_gas = math::increase_by_percent( op.pre_verification_gas + op.verification_gas_limit * 2 + op.call_gas_limit - + ov.bundle_transaction_gas_buffer - + ov.transaction_gas_overhead, + + cs.transaction_intrinsic_gas + + ENTRY_POINT_INNER_GAS_OVERHEAD, BUNDLE_TRANSACTION_GAS_OVERHEAD_PERCENT, ); @@ -1510,6 +1697,8 @@ mod tests { vec![], base_fee, max_priority_fee_per_gas, + false, + ExpectedStorage::default(), ) .await; assert_eq!( @@ -1544,6 +1733,8 @@ mod tests { vec![], base_fee, max_priority_fee_per_gas, + false, + ExpectedStorage::default(), ) .await; assert_eq!( @@ -1586,6 +1777,8 @@ mod tests { vec![], base_fee, max_priority_fee_per_gas, + false, + ExpectedStorage::default(), ) .await; assert_eq!( @@ -1620,7 +1813,7 @@ mod tests { let op_b_aggregated_sig = 21; let aggregator_a_signature = 101; let aggregator_b_signature = 102; - let bundle = mock_make_bundle( + let mut bundle = mock_make_bundle( vec![ MockOp { op: unaggregated_op.clone(), @@ -1677,14 +1870,20 @@ mod tests { vec![], U256::zero(), U256::zero(), + false, + ExpectedStorage::default(), ) .await; // Ops should be grouped by aggregator. Further, the `signature` field - // of each op with an aggregator should be replaced with what was - // returned from simulation. + // of each op with an aggregator should be empty. + + bundle + .ops_per_aggregator + .sort_by(|a, b| a.aggregator.cmp(&b.aggregator)); + assert_eq!( - HashSet::from_iter(bundle.ops_per_aggregator), - HashSet::from([ + bundle.ops_per_aggregator, + vec![ UserOpsPerAggregator { user_ops: vec![unaggregated_op], ..Default::default() @@ -1692,11 +1891,11 @@ mod tests { UserOpsPerAggregator { user_ops: vec![ UserOperation { - signature: bytes(op_a1_aggregated_sig), + signature: Bytes::new(), ..aggregated_op_a1 }, UserOperation { - signature: bytes(op_a2_aggregated_sig), + signature: Bytes::new(), ..aggregated_op_a2 } ], @@ -1705,13 +1904,13 @@ mod tests { }, UserOpsPerAggregator { user_ops: vec![UserOperation { - signature: bytes(op_b_aggregated_sig), + signature: Bytes::new(), ..aggregated_op_b }], aggregator: aggregator_b_address, signature: bytes(aggregator_b_signature) }, - ]), + ], ); } @@ -1761,6 +1960,8 @@ mod tests { vec![deposit, deposit, deposit], U256::zero(), U256::zero(), + false, + ExpectedStorage::default(), ) .await; @@ -1822,6 +2023,8 @@ mod tests { vec![deposit, deposit, deposit], U256::zero(), U256::zero(), + false, + ExpectedStorage::default(), ) .await; @@ -1837,7 +2040,7 @@ mod tests { assert_eq!( bundle.gas_estimate, U256::from(math::increase_by_percent( - 9_000_000 + 5_000 + 21_000, + 9_000_000 + 2 * 5_000 + 21_000, BUNDLE_TRANSACTION_GAS_OVERHEAD_PERCENT )) ); @@ -1845,9 +2048,9 @@ mod tests { #[tokio::test] async fn test_bundle_gas_limit() { + let cs = ChainSpec::default(); let op1 = op_with_gas(100_000.into(), 100_000.into(), 1_000_000.into(), false); let op2 = op_with_gas(100_000.into(), 100_000.into(), 200_000.into(), false); - let chain_id = 1; let mut groups_by_aggregator = LinkedHashMap::new(); groups_by_aggregator.insert( None, @@ -1877,23 +2080,24 @@ mod tests { entity_updates: BTreeMap::new(), }; - // The gas requirement from the execution of the first UO is: g >= p_1 + 2v_1 + c_1 + 5000 - // The gas requirement from the execution of the second UO is: g >= p_1 + v_1 + c_1 + p_2 + 2v_2 + c_2 + 5000 - // The first condition dominates and determines the expected gas limit let expected_gas_limit = op1.pre_verification_gas + op1.verification_gas_limit * 2 + op1.call_gas_limit + 5_000 + + op2.pre_verification_gas + + op2.verification_gas_limit * 2 + + op2.call_gas_limit + + 5_000 + 21_000; - assert_eq!(context.get_bundle_gas_limit(chain_id), expected_gas_limit); + assert_eq!(context.get_bundle_gas_limit(&cs), expected_gas_limit); } #[tokio::test] async fn test_bundle_gas_limit_with_paymaster_op() { + let cs = ChainSpec::default(); let op1 = op_with_gas(100_000.into(), 100_000.into(), 1_000_000.into(), true); // has paymaster let op2 = op_with_gas(100_000.into(), 100_000.into(), 200_000.into(), false); - let chain_id = 1; let mut groups_by_aggregator = LinkedHashMap::new(); groups_by_aggregator.insert( None, @@ -1922,19 +2126,17 @@ mod tests { rejected_ops: vec![], entity_updates: BTreeMap::new(), }; - let gas_limit = context.get_bundle_gas_limit(chain_id); + let gas_limit = context.get_bundle_gas_limit(&cs); - // The gas requirement from the execution of the first UO is: g >= p_1 + 3v_1 + c_1 - // The gas requirement from the execution of the second UO is: g >= p_1 + 3v_1 + c_1 + p_2 + 2v_2 + c_2 + 5000 - // The first condition dominates and determines the expected gas limit let expected_gas_limit = op1.pre_verification_gas + op1.verification_gas_limit * 3 + op1.call_gas_limit + + 5_000 + op2.pre_verification_gas + op2.verification_gas_limit * 2 + op2.call_gas_limit - + 21_000 - + 5_000; + + 5_000 + + 21_000; assert_eq!(gas_limit, expected_gas_limit); } @@ -1952,6 +2154,8 @@ mod tests { vec![], U256::zero(), U256::zero(), + false, + ExpectedStorage::default(), ) .await; @@ -1984,6 +2188,8 @@ mod tests { vec![], U256::zero(), U256::zero(), + false, + ExpectedStorage::default(), ) .await; @@ -2050,6 +2256,8 @@ mod tests { vec![], U256::zero(), U256::zero(), + false, + ExpectedStorage::default(), ) .await; @@ -2066,6 +2274,76 @@ mod tests { ); } + #[tokio::test] + async fn test_condition_not_met_match() { + let op = default_op(); + + let mut expected_storage = ExpectedStorage::default(); + expected_storage.insert(address(1), U256::zero(), U256::zero()); + let actual_storage = expected_storage.clone(); + + let bundle = mock_make_bundle( + vec![MockOp { + op: op.clone(), + simulation_result: Box::new(move || { + Ok(SimulationResult { + expected_storage: expected_storage.clone(), + ..Default::default() + }) + }), + }], + vec![], + vec![HandleOpsOut::Success], + vec![], + U256::zero(), + U256::zero(), + true, + actual_storage, + ) + .await; + + assert_eq!( + bundle.ops_per_aggregator, + vec![UserOpsPerAggregator { + user_ops: vec![op], + ..Default::default() + }] + ); + } + + #[tokio::test] + async fn test_condition_not_met_mismatch() { + let op = default_op(); + + let mut expected_storage = ExpectedStorage::default(); + expected_storage.insert(address(1), U256::zero(), U256::zero()); + let mut actual_storage = ExpectedStorage::default(); + actual_storage.insert(address(1), U256::zero(), U256::from(1)); + + let bundle = mock_make_bundle( + vec![MockOp { + op: op.clone(), + simulation_result: Box::new(move || { + Ok(SimulationResult { + expected_storage: expected_storage.clone(), + ..Default::default() + }) + }), + }], + vec![], + vec![HandleOpsOut::Success], + vec![], + U256::zero(), + U256::zero(), + true, + actual_storage, + ) + .await; + + assert!(bundle.ops_per_aggregator.is_empty()); + assert_eq!(bundle.rejected_ops, vec![op]); + } + struct MockOp { op: UserOperation, simulation_result: Box Result + Send + Sync>, @@ -2076,7 +2354,7 @@ mod tests { signature: Box anyhow::Result> + Send + Sync>, } - async fn simple_make_bundle(mock_ops: Vec) -> Bundle { + async fn simple_make_bundle(mock_ops: Vec) -> Bundle { mock_make_bundle( mock_ops, vec![], @@ -2084,10 +2362,13 @@ mod tests { vec![], U256::zero(), U256::zero(), + false, + ExpectedStorage::default(), ) .await } + #[allow(clippy::too_many_arguments)] async fn mock_make_bundle( mock_ops: Vec, mock_aggregators: Vec, @@ -2095,7 +2376,9 @@ mod tests { mock_paymaster_deposits: Vec, base_fee: U256, max_priority_fee_per_gas: U256, - ) -> Bundle { + notify_condition_not_met: bool, + actual_storage: ExpectedStorage, + ) -> Bundle { let entry_point_address = address(123); let beneficiary = address(124); let current_block_hash = hash(125); @@ -2104,20 +2387,26 @@ mod tests { let ops: Vec<_> = mock_ops .iter() .map(|MockOp { op, .. }| PoolOperation { - uo: op.clone(), + uo: op.clone().into(), expected_code_hash, - ..Default::default() + entry_point: entry_point_address, + sim_block_hash: current_block_hash, + sim_block_number: 0, + account_is_staked: false, + valid_time_range: ValidTimeRange::default(), + entity_infos: EntityInfos::default(), + aggregator: None, }) .collect(); - let mut pool_client = MockPoolServer::new(); + let mut pool_client = MockPool::new(); pool_client .expect_get_ops() .returning(move |_, _, _| Ok(ops.clone())); let simulations_by_op: HashMap<_, _> = mock_ops .into_iter() - .map(|op| (op.op.op_hash(entry_point_address, 0), op.simulation_result)) + .map(|op| (op.op.hash(entry_point_address, 0), op.simulation_result)) .collect(); let mut simulator = MockSimulator::new(); simulator @@ -2125,8 +2414,8 @@ mod tests { .withf(move |_, &block_hash, &code_hash| { block_hash == Some(current_block_hash) && code_hash == Some(expected_code_hash) }) - .returning(move |op, _, _| simulations_by_op[&op.op_hash(entry_point_address, 0)]()); - let mut entry_point = MockEntryPoint::new(); + .returning(move |op, _, _| simulations_by_op[&op.hash(entry_point_address, 0)]()); + let mut entry_point = MockEntryPointV0_6::new(); entry_point .expect_address() .return_const(entry_point_address); @@ -2148,6 +2437,7 @@ mod tests { .into_iter() .map(|agg| (agg.address, agg.signature)) .collect(); + let mut provider = MockProvider::new(); provider .expect_get_latest_block_hash_and_number() @@ -2158,18 +2448,28 @@ mod tests { provider .expect_get_max_priority_fee() .returning(move || Ok(max_priority_fee_per_gas)); - provider + if notify_condition_not_met { + for (addr, slots) in actual_storage.0.into_iter() { + let values = slots.values().cloned().collect::>(); + provider + .expect_batch_get_storage_at() + .withf(move |a, s| *a == addr && s.iter().all(|slot| slots.contains_key(slot))) + .returning(move |_, _| Ok(values.clone())); + } + } + + entry_point .expect_aggregate_signatures() - .returning(move |address, _| Ok(signatures_by_aggregator[&address]()?)); + .returning(move |address, _| Ok(signatures_by_aggregator[&address]().unwrap())); let (event_sender, _) = broadcast::channel(16); - let proposer = BundleProposerImpl::new( + let mut proposer = BundleProposerImpl::new( 0, pool_client, simulator, entry_point, Arc::new(provider), Settings { - chain_id: 0, + chain_spec: ChainSpec::default(), max_bundle_size, max_bundle_gas: 10_000_000, beneficiary, @@ -2178,8 +2478,13 @@ mod tests { }, event_sender, ); + + if notify_condition_not_met { + proposer.notify_condition_not_met(); + } + proposer - .make_bundle(None) + .make_bundle(None, false) .await .expect("should make a bundle") } diff --git a/crates/builder/src/bundle_sender.rs b/crates/builder/src/bundle_sender.rs index 3b84d0bd..088a130a 100644 --- a/crates/builder/src/bundle_sender.rs +++ b/crates/builder/src/bundle_sender.rs @@ -11,31 +11,34 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -use std::sync::{ - atomic::{AtomicBool, Ordering}, - Arc, -}; +use std::{marker::PhantomData, sync::Arc, time::Duration}; use anyhow::{bail, Context}; use async_trait::async_trait; use ethers::types::{transaction::eip2718::TypedTransaction, Address, H256, U256}; use futures_util::StreamExt; -use rundler_pool::PoolServer; -use rundler_provider::EntryPoint; +#[cfg(test)] +use mockall::automock; +use rundler_provider::{BundleHandler, EntryPoint}; use rundler_sim::ExpectedStorage; -use rundler_types::{EntityUpdate, GasFees, UserOperation}; +use rundler_types::{ + builder::BundlingMode, + chain::ChainSpec, + pool::{NewHead, Pool}, + EntityUpdate, UserOperation, +}; use rundler_utils::emit::WithEntryPoint; use tokio::{ join, - sync::{broadcast, mpsc, oneshot}, + sync::{broadcast, mpsc, mpsc::UnboundedReceiver, oneshot}, }; -use tracing::{error, info, trace, warn}; +use tracing::{debug, error, info, instrument, warn}; use rundler_types::hybrid_compute; use crate::{ - bundle_proposer::BundleProposer, + bundle_proposer::{Bundle, BundleProposer, BundleProposerError}, emit::{BuilderEvent, BundleTxDetails}, - transaction_tracker::{SendResult, TrackerUpdate, TransactionTracker}, + transaction_tracker::{TrackerUpdate, TransactionTracker, TransactionTrackerError}, }; #[async_trait] @@ -45,29 +48,25 @@ pub(crate) trait BundleSender: Send + Sync + 'static { #[derive(Debug)] pub(crate) struct Settings { - pub(crate) replacement_fee_percent_increase: u64, - pub(crate) max_fee_increases: u64, + pub(crate) max_replacement_underpriced_blocks: u64, + pub(crate) max_cancellation_fee_increases: u64, + pub(crate) max_blocks_to_wait_for_mine: u64, } #[derive(Debug)] -pub(crate) struct BundleSenderImpl -where - P: BundleProposer, - E: EntryPoint, - T: TransactionTracker, - C: PoolServer, -{ +pub(crate) struct BundleSenderImpl { builder_index: u64, - manual_bundling_mode: Arc, - send_bundle_receiver: mpsc::Receiver, - chain_id: u64, + bundle_action_receiver: Option>, + chain_spec: ChainSpec, beneficiary: Address, proposer: P, entry_point: E, - transaction_tracker: T, + transaction_tracker: Option, pool: C, settings: Settings, event_sender: broadcast::Sender>, + metrics: BuilderMetrics, + _uo_type: PhantomData, } #[derive(Debug)] @@ -77,10 +76,18 @@ struct BundleTx { op_hashes: Vec, } +pub enum BundleSenderAction { + SendBundle(SendBundleRequest), + ChangeMode(BundlingMode), +} + pub struct SendBundleRequest { pub responder: oneshot::Sender, } +/// Response to a `SendBundleRequest` after +/// going through a full cycle of bundling, sending, +/// and waiting for the transaction to be mined. #[derive(Debug)] pub enum SendBundleResult { Success { @@ -89,152 +96,78 @@ pub enum SendBundleResult { tx_hash: H256, }, NoOperationsInitially, - NoOperationsAfterFeeIncreases { - initial_op_count: usize, - attempt_number: u64, - }, StalledAtMaxFeeIncreases, Error(anyhow::Error), } +// Internal result of attempting to send a bundle. +enum SendBundleAttemptResult { + // The bundle was successfully sent + Success, + // There are no operations available to bundle + NoOperationsInitially, + // There were no operations after the fee was increased + NoOperationsAfterFeeFilter, + // There were no operations after the bundle was simulated + NoOperationsAfterSimulation, + // Replacement Underpriced + ReplacementUnderpriced, + // Condition not met + ConditionNotMet, + // Nonce too low + NonceTooLow, +} + #[async_trait] -impl BundleSender for BundleSenderImpl +impl BundleSender for BundleSenderImpl where - P: BundleProposer, - E: EntryPoint, + UO: UserOperation, + P: BundleProposer, + E: EntryPoint + BundleHandler, T: TransactionTracker, - C: PoolServer, + C: Pool, { /// Loops forever, attempting to form and send a bundle on each new block, /// then waiting for one bundle to be mined or dropped before forming the /// next one. + #[instrument(skip_all, fields(entry_point = self.entry_point.address().to_string(), builder_index = self.builder_index))] async fn send_bundles_in_loop(mut self) -> anyhow::Result<()> { - let Ok(mut new_heads) = self.pool.subscribe_new_heads().await else { - error!("Failed to subscribe to new blocks"); - bail!("failed to subscribe to new blocks"); - }; + // trigger for sending bundles + let sender_trigger = BundleSenderTrigger::new( + &self.pool, + self.bundle_action_receiver.take().unwrap(), + Duration::from_millis(self.chain_spec.bundle_max_send_interval_millis), + ) + .await?; - // The new_heads stream can buffer up multiple blocks, but we only want to consume the latest one. - // This task is used to consume the new heads and place them onto a channel that can be synchronously - // consumed until the latest block is reached. - let (tx, mut rx) = mpsc::unbounded_channel(); - tokio::spawn(async move { - loop { - match new_heads.next().await { - Some(b) => { - if tx.send(b).is_err() { - error!("Failed to buffer new block for bundle sender"); - return; - } - } - None => { - error!("Block stream ended"); - return; - } - } - } - }); + // initial state + let mut state = + SenderMachineState::new(sender_trigger, self.transaction_tracker.take().unwrap()); - println!("HC starting send_bundles loop"); + println!("HC starting bundle_sender loop"); loop { - let mut send_bundle_response: Option> = None; - let mut last_block = None; - hybrid_compute::expire_hc_cache(); - - if self.manual_bundling_mode.load(Ordering::Relaxed) { - if let Some(r) = self.send_bundle_receiver.recv().await { - send_bundle_response = Some(r.responder); - } else { - error!("Bundle stream closed in manual mode"); - bail!("Bundle stream closed in manual mode"); - } - } else { - // Wait for new block. Block number doesn't matter as the pool will only notify of new blocks - // after the pool has updated its state. The bundle will be formed using the latest pool state - // and can land in the next block - last_block = rx.recv().await; - - if last_block.is_none() { - error!("Block stream closed"); - bail!("Block stream closed"); - } - // Consume any other blocks that may have been buffered up - loop { - match rx.try_recv() { - Ok(b) => { - last_block = Some(b); - } - Err(mpsc::error::TryRecvError::Empty) => { - break; - } - Err(mpsc::error::TryRecvError::Disconnected) => { - error!("Block stream closed"); - bail!("Block stream closed"); - } - } - } - } - - // Wait for new block. Block number doesn't matter as the pool will only notify of new blocks - // after the pool has updated its state. The bundle will be formed using the latest pool state - // and can land in the next block - self.check_for_and_log_transaction_update().await; - let result = self.send_bundle_with_increasing_gas_fees().await; - - match &result { - SendBundleResult::Success { - block_number, - attempt_number, - tx_hash, - } => { - println!("HC send_bundle_with_increasing_gas_fees result {:?}", result); - if *attempt_number == 0 { - info!("Bundle with hash {tx_hash:?} landed in block {block_number}"); - } else { - info!("Bundle with hash {tx_hash:?} landed in block {block_number} after increasing gas fees {attempt_number} time(s)"); - } - } - SendBundleResult::NoOperationsInitially => trace!("No ops to send at block {}", last_block.unwrap_or_default().block_number), - SendBundleResult::NoOperationsAfterFeeIncreases { - initial_op_count, - attempt_number, - } => { - println!("HC send_bundle_with_increasing_gas_fees result {:?}", result); - info!("Bundle initially had {initial_op_count} operations, but after increasing gas fees {attempt_number} time(s) it was empty"); - } - SendBundleResult::StalledAtMaxFeeIncreases => { - println!("HC send_bundle_with_increasing_gas_fees result {:?}", result); - warn!("Bundle failed to mine after {} fee increases", self.settings.max_fee_increases); - } - SendBundleResult::Error(error) => { - println!("HC send_bundle_with_increasing_gas_fees result {:?}", result); - BuilderMetrics::increment_bundle_txns_failed(self.builder_index); - error!("Failed to send bundle. Will retry next block: {error:#?}"); - } - } - - if let Some(t) = send_bundle_response.take() { - if t.send(result).is_err() { - error!("Failed to send bundle result to manual caller"); - } + if let Err(e) = self.step_state(&mut state).await { + error!("Error in bundle sender loop: {e:#?}"); + self.metrics.increment_state_machine_errors(); + state.reset(); } } } } -impl BundleSenderImpl +impl BundleSenderImpl where - P: BundleProposer, - E: EntryPoint, + UO: UserOperation, + P: BundleProposer, + E: EntryPoint + BundleHandler, T: TransactionTracker, - C: PoolServer, + C: Pool, { #[allow(clippy::too_many_arguments)] pub(crate) fn new( builder_index: u64, - manual_bundling_mode: Arc, - send_bundle_receiver: mpsc::Receiver, - chain_id: u64, + bundle_action_receiver: mpsc::Receiver, + chain_spec: ChainSpec, beneficiary: Address, proposer: P, entry_point: E, @@ -245,241 +178,446 @@ where ) -> Self { Self { builder_index, - manual_bundling_mode, - send_bundle_receiver, - chain_id, + bundle_action_receiver: Some(bundle_action_receiver), + chain_spec, beneficiary, proposer, - entry_point, - transaction_tracker, + transaction_tracker: Some(transaction_tracker), pool, settings, event_sender, + metrics: BuilderMetrics { + builder_index, + entry_point: entry_point.address(), + }, + entry_point, + _uo_type: PhantomData, } } - async fn check_for_and_log_transaction_update(&self) { - let update = self.transaction_tracker.check_for_update_now().await; - let update = match update { - Ok(update) => update, - Err(error) => { - error!("Failed to check for transaction updates: {error:#?}"); - return; + async fn step_state( + &mut self, + state: &mut SenderMachineState, + ) -> anyhow::Result<()> { + let tracker_update = state.wait_for_trigger().await?; + + match state.inner { + InnerState::Building(building_state) => { + self.handle_building_state(state, building_state).await?; } - }; - let Some(update) = update else { - return; - }; - match update { - TrackerUpdate::Mined { - tx_hash, - block_number, - attempt_number, - gas_limit, - gas_used, - .. - } => { - BuilderMetrics::increment_bundle_txns_success(self.builder_index); - BuilderMetrics::set_bundle_gas_stats(gas_limit, gas_used); - if attempt_number == 0 { - info!("Bundle with hash {tx_hash:?} landed in block {block_number}"); + InnerState::Pending(pending_state) => { + self.handle_pending_state(state, pending_state, tracker_update) + .await?; + } + InnerState::Cancelling(cancelling_state) => { + self.handle_cancelling_state(state, cancelling_state) + .await?; + } + InnerState::CancelPending(cancel_pending_state) => { + self.handle_cancel_pending_state(state, cancel_pending_state, tracker_update) + .await?; + } + } + + Ok(()) + } + + async fn handle_building_state( + &mut self, + state: &mut SenderMachineState, + inner: BuildingState, + ) -> anyhow::Result<()> { + // send bundle + let block_number = state.block_number(); + debug!("Building bundle on block {}", block_number); + let result = self.send_bundle(state, inner.fee_increase_count).await; + + // handle result + match result { + Ok(SendBundleAttemptResult::Success) => { + // sent the bundle + info!("Bundle sent successfully"); + state.update(InnerState::Pending(inner.to_pending( + block_number + self.settings.max_blocks_to_wait_for_mine, + ))); + } + Ok(SendBundleAttemptResult::NoOperationsInitially) => { + debug!("No operations available initially"); + state.complete(Some(SendBundleResult::NoOperationsInitially)); + } + Ok(SendBundleAttemptResult::NoOperationsAfterSimulation) => { + debug!("No operations available after simulation"); + state.complete(Some(SendBundleResult::NoOperationsInitially)); + } + Ok(SendBundleAttemptResult::NoOperationsAfterFeeFilter) => { + debug!("No operations to bundle after fee filtering"); + if let Some(underpriced_info) = inner.underpriced_info { + // If we are here, there are UOs in the pool that may be correctly priced, but are being blocked by an underpriced replacement + // after a fee increase. If we repeatedly get into this state, initiate a cancellation. + if block_number - underpriced_info.since_block + >= self.settings.max_replacement_underpriced_blocks + { + warn!("No operations available, but last replacement underpriced, moving to cancelling state. Round: {}. Since block {}. Current block {}. Max underpriced blocks: {}", underpriced_info.rounds, underpriced_info.since_block, block_number, self.settings.max_replacement_underpriced_blocks); + state.update(InnerState::Cancelling(inner.to_cancelling())); + } else { + info!("No operations available, but last replacement underpriced, starting over and waiting for next trigger. Round: {}. Since block {}. Current block {}", underpriced_info.rounds, underpriced_info.since_block, block_number); + state.update_and_abandon(InnerState::Building(inner.underpriced_round())); + } + } else if inner.fee_increase_count > 0 { + warn!( + "Abandoning bundle after {} fee increases, no operations available after fee increase", + inner.fee_increase_count + ); + self.metrics.increment_bundle_txns_abandoned(); + + // abandon the bundle by starting a new bundle process + // If the node we are using still has the transaction in the mempool, its + // possible we will get a `ReplacementUnderpriced` on the next iteration + // and will start a cancellation. + state.abandon(); } else { - info!("Bundle with hash {tx_hash:?} landed in block {block_number} after increasing gas fees {attempt_number} time(s)"); + debug!("No operations available, waiting for next trigger"); + state.complete(Some(SendBundleResult::NoOperationsInitially)); } } - TrackerUpdate::StillPendingAfterWait => (), - TrackerUpdate::LatestTxDropped { nonce } => { - self.emit(BuilderEvent::latest_transaction_dropped( - self.builder_index, - nonce.low_u64(), - )); - BuilderMetrics::increment_bundle_txns_dropped(self.builder_index); - info!("Previous transaction dropped by sender"); + Ok(SendBundleAttemptResult::NonceTooLow) => { + // reset the transaction tracker and try again + info!("Nonce too low, starting new bundle attempt"); + state.reset(); } - TrackerUpdate::NonceUsedForOtherTx { nonce } => { - self.emit(BuilderEvent::nonce_used_for_other_transaction( - self.builder_index, - nonce.low_u64(), + Ok(SendBundleAttemptResult::ReplacementUnderpriced) => { + info!("Replacement transaction underpriced, marking as underpriced. Num fee increases {:?}", inner.fee_increase_count); + // unabandon to allow fee estimation to consider any submitted transactions, wait for next trigger + state.update_and_unabandon(InnerState::Building( + inner.replacement_underpriced(block_number), )); - BuilderMetrics::increment_bundle_txns_nonce_used(self.builder_index); - info!("Nonce used by external transaction") } - TrackerUpdate::ReplacementUnderpriced => { - BuilderMetrics::increment_bundle_txn_replacement_underpriced(self.builder_index); - info!("Replacement transaction underpriced") + Ok(SendBundleAttemptResult::ConditionNotMet) => { + info!("Condition not met, notifying proposer and starting new bundle attempt"); + self.proposer.notify_condition_not_met(); + state.update(InnerState::Building(inner.retry())); + } + Err(error) => { + error!("Bundle send error {error:?}"); + self.metrics.increment_bundle_txns_failed(); + let send_bundle_result = Some(SendBundleResult::Error(error)); + state.complete(send_bundle_result); } - }; - } - - /// Constructs a bundle and sends it to the entry point as a transaction. If - /// the bundle fails to be mined after - /// `settings.max_blocks_to_wait_for_mine` blocks, increases the gas fees by - /// enough to send a replacement transaction, then constructs a new bundle - /// using the new, higher gas requirements. Continues to retry with higher - /// gas costs until one of the following happens: - /// - /// 1. A transaction succeeds (not necessarily the most recent one) - /// 2. The gas fees are high enough that the bundle is empty because there - /// are no ops that meet the fee requirements. - /// 3. The transaction has not succeeded after `settings.max_fee_increases` - /// replacements. - async fn send_bundle_with_increasing_gas_fees(&self) -> SendBundleResult { - let result = self.send_bundle_with_increasing_gas_fees_inner().await; - match result { - Ok(result) => result, - Err(error) => SendBundleResult::Error(error), } - } - /// Helper function returning `Result` to be able to use `?`. - async fn send_bundle_with_increasing_gas_fees_inner(&self) -> anyhow::Result { - let (nonce, mut required_fees) = self.transaction_tracker.get_nonce_and_required_fees()?; - let mut initial_op_count: Option = None; + Ok(()) + } - for fee_increase_count in 0..=self.settings.max_fee_increases { - let Some(bundle_tx) = self.get_bundle_tx(nonce, required_fees).await? else { - self.emit(BuilderEvent::formed_bundle( - self.builder_index, - None, - nonce.low_u64(), - fee_increase_count, - required_fees, - )); - return Ok(match initial_op_count { - Some(initial_op_count) => { - BuilderMetrics::increment_bundle_txns_abandoned(self.builder_index); - SendBundleResult::NoOperationsAfterFeeIncreases { - initial_op_count, - attempt_number: fee_increase_count, - } - } - None => SendBundleResult::NoOperationsInitially, - }); - }; - println!("HC before BundleTx op_hashes {:?}", bundle_tx.op_hashes); - let BundleTx { - tx, - expected_storage, - op_hashes, - } = bundle_tx; - if initial_op_count.is_none() { - initial_op_count = Some(op_hashes.len()); - } - let current_fees = GasFees::from(&tx); - - BuilderMetrics::increment_bundle_txns_sent(self.builder_index); - BuilderMetrics::set_current_fees(¤t_fees); - - let send_result = self - .transaction_tracker - .send_transaction(tx.clone(), &expected_storage) - .await?; - let update = match send_result { - SendResult::TrackerUpdate(update) => update, - SendResult::TxHash(tx_hash) => { - self.emit(BuilderEvent::formed_bundle( - self.builder_index, - Some(BundleTxDetails { - tx_hash, - tx, - op_hashes: Arc::new(op_hashes), - }), - nonce.low_u64(), - fee_increase_count, - required_fees, - )); - self.transaction_tracker.wait_for_update().await? - } - }; - //println!("HC bundle_sender update {:?}", update); + async fn handle_pending_state( + &mut self, + state: &mut SenderMachineState, + inner: PendingState, + tracker_update: Option, + ) -> anyhow::Result<()> { + if let Some(update) = tracker_update { match update { TrackerUpdate::Mined { - tx_hash, - nonce, block_number, attempt_number, gas_limit, gas_used, + tx_hash, + nonce, + .. } => { + info!("Bundle transaction mined"); + self.metrics.process_bundle_txn_success(gas_limit, gas_used); self.emit(BuilderEvent::transaction_mined( self.builder_index, tx_hash, nonce.low_u64(), block_number, )); - BuilderMetrics::increment_bundle_txns_success(self.builder_index); - BuilderMetrics::set_bundle_gas_stats(gas_limit, gas_used); - return Ok(SendBundleResult::Success { + let send_bundle_result = Some(SendBundleResult::Success { block_number, attempt_number, tx_hash, }); - } - TrackerUpdate::StillPendingAfterWait => { - info!("Transaction not mined for several blocks") + state.complete(send_bundle_result); } TrackerUpdate::LatestTxDropped { nonce } => { + info!("Latest transaction dropped, starting new bundle attempt"); self.emit(BuilderEvent::latest_transaction_dropped( self.builder_index, nonce.low_u64(), )); - BuilderMetrics::increment_bundle_txns_dropped(self.builder_index); - info!("Previous transaction dropped by sender"); + self.metrics.increment_bundle_txns_dropped(); + // try again, increasing fees + state.update(InnerState::Building(inner.to_building())); } TrackerUpdate::NonceUsedForOtherTx { nonce } => { + info!("Nonce used externally, starting new bundle attempt"); self.emit(BuilderEvent::nonce_used_for_other_transaction( self.builder_index, nonce.low_u64(), )); - BuilderMetrics::increment_bundle_txns_nonce_used(self.builder_index); - bail!("nonce used by external transaction") + self.metrics.increment_bundle_txns_nonce_used(); + state.reset(); } - TrackerUpdate::ReplacementUnderpriced => { - BuilderMetrics::increment_bundle_txn_replacement_underpriced( - self.builder_index, - ); - info!("Replacement transaction underpriced, increasing fees") - } - }; + } + } else if state.block_number() >= inner.until { + // start replacement, don't wait for trigger. Continue + // to attempt until there are no longer any UOs priced high enough + // to bundle. info!( - "Bundle transaction failed to mine after {fee_increase_count} fee increases (maxFeePerGas: {}, maxPriorityFeePerGas: {}).", - current_fees.max_fee_per_gas, - current_fees.max_priority_fee_per_gas, - ); - BuilderMetrics::increment_bundle_txn_fee_increases(self.builder_index); - required_fees = Some( - current_fees.increase_by_percent(self.settings.replacement_fee_percent_increase), + "Not mined after {} blocks, increasing fees, attempt: {}", + self.settings.max_blocks_to_wait_for_mine, + inner.fee_increase_count + 1 ); + self.metrics.increment_bundle_txn_fee_increases(); + state.update(InnerState::Building(inner.to_building())) + } + + Ok(()) + } + + async fn handle_cancelling_state( + &mut self, + state: &mut SenderMachineState, + inner: CancellingState, + ) -> anyhow::Result<()> { + info!( + "Cancelling last transaction, attempt {}", + inner.fee_increase_count + ); + + let (estimated_fees, _) = self + .proposer + .estimate_gas_fees(None) + .await + .unwrap_or_default(); + + let cancel_res = state + .transaction_tracker + .cancel_transaction(self.entry_point.address(), estimated_fees) + .await; + + match cancel_res { + Ok(Some(_)) => { + info!("Cancellation transaction sent, waiting for confirmation"); + self.metrics.increment_cancellation_txns_sent(); + + state.update(InnerState::CancelPending(inner.to_cancel_pending( + state.block_number() + self.settings.max_blocks_to_wait_for_mine, + ))); + } + Ok(None) => { + info!("Soft cancellation or no transaction to cancel, starting new bundle attempt"); + self.metrics.increment_soft_cancellations(); + state.reset(); + } + Err(TransactionTrackerError::ReplacementUnderpriced) => { + info!("Replacement transaction underpriced during cancellation, trying again"); + if inner.fee_increase_count >= self.settings.max_cancellation_fee_increases { + // abandon the cancellation + warn!("Abandoning cancellation after max fee increases {}, starting new bundle attempt", inner.fee_increase_count); + self.metrics.increment_cancellations_abandoned(); + state.reset(); + } else { + // Increase fees again + info!( + "Cancellation increasing fees, attempt: {}", + inner.fee_increase_count + 1 + ); + state.update(InnerState::Cancelling(inner.to_self())); + } + } + Err(TransactionTrackerError::NonceTooLow) => { + // reset the transaction tracker and try again + info!("Nonce too low during cancellation, starting new bundle attempt"); + state.reset(); + } + Err(e) => { + error!("Failed to cancel transaction, moving back to building state: {e:#?}"); + self.metrics.increment_cancellation_txns_failed(); + state.reset(); + } + } + + Ok(()) + } + + async fn handle_cancel_pending_state( + &mut self, + state: &mut SenderMachineState, + inner: CancelPendingState, + tracker_update: Option, + ) -> anyhow::Result<()> { + // check for transaction update + if let Some(update) = tracker_update { + match update { + TrackerUpdate::Mined { + gas_used, + gas_price, + .. + } => { + // mined + let fee = gas_used.zip(gas_price).map(|(used, price)| used * price); + info!("Cancellation transaction mined. Price (wei) {fee:?}"); + self.metrics.increment_cancellation_txns_mined(); + if let Some(fee) = fee { + self.metrics + .increment_cancellation_txns_total_fee(fee.as_u64()); + }; + } + TrackerUpdate::LatestTxDropped { .. } => { + // If a cancellation gets dropped, move to bundling state as there is no + // longer a pending transaction + info!("Cancellation transaction dropped, starting new bundle attempt"); + } + TrackerUpdate::NonceUsedForOtherTx { .. } => { + // If a nonce is used externally, move to bundling state as there is no longer + // a pending transaction + info!("Nonce used externally while cancelling, starting new bundle attempt"); + } + } + state.reset(); + } else if state.block_number() >= inner.until { + if inner.fee_increase_count >= self.settings.max_cancellation_fee_increases { + // abandon the cancellation + warn!("Abandoning cancellation after max fee increases {}, starting new bundle attempt", inner.fee_increase_count); + self.metrics.increment_cancellations_abandoned(); + state.reset(); + } else { + // start replacement, don't wait for trigger + info!( + "Cancellation not mined after {} blocks, increasing fees, attempt: {}", + self.settings.max_blocks_to_wait_for_mine, + inner.fee_increase_count + 1 + ); + state.update(InnerState::Cancelling(inner.to_cancelling())); + } + } + + Ok(()) + } + + /// Constructs a bundle and sends it to the entry point as a transaction. + /// + /// Returns empty if: + /// - There are no ops available to bundle initially. + /// - The gas fees are high enough that the bundle is empty because there + /// are no ops that meet the fee requirements. + async fn send_bundle( + &mut self, + state: &mut SenderMachineState, + fee_increase_count: u64, + ) -> anyhow::Result { + let (nonce, required_fees) = state.transaction_tracker.get_nonce_and_required_fees()?; + + let bundle = match self + .proposer + .make_bundle(required_fees, fee_increase_count > 0) + .await + { + Ok(bundle) => bundle, + Err(BundleProposerError::NoOperationsInitially) => { + return Ok(SendBundleAttemptResult::NoOperationsInitially); + } + Err(BundleProposerError::NoOperationsAfterFeeFilter) => { + return Ok(SendBundleAttemptResult::NoOperationsAfterFeeFilter); + } + Err(e) => bail!("Failed to make bundle: {e:?}"), + }; + + let Some(bundle_tx) = self.get_bundle_tx(nonce, bundle).await? else { + self.emit(BuilderEvent::formed_bundle( + self.builder_index, + None, + nonce.low_u64(), + fee_increase_count, + required_fees, + )); + return Ok(SendBundleAttemptResult::NoOperationsAfterSimulation); + }; + println!("HC before BundleTx op_hashes {:?}", bundle_tx.op_hashes); + let BundleTx { + tx, + expected_storage, + op_hashes, + } = bundle_tx; + + self.metrics.increment_bundle_txns_sent(); + + let send_result = state + .transaction_tracker + .send_transaction(tx.clone(), &expected_storage) + .await; + + match send_result { + Ok(tx_hash) => { + self.emit(BuilderEvent::formed_bundle( + self.builder_index, + Some(BundleTxDetails { + tx_hash, + tx, + op_hashes: Arc::new(op_hashes), + }), + nonce.low_u64(), + fee_increase_count, + required_fees, + )); + + Ok(SendBundleAttemptResult::Success) + } + Err(TransactionTrackerError::NonceTooLow) => { + self.metrics.increment_bundle_txn_nonce_too_low(); + warn!("Bundle attempt nonce too low"); + Ok(SendBundleAttemptResult::NonceTooLow) + } + Err(TransactionTrackerError::ReplacementUnderpriced) => { + self.metrics.increment_bundle_txn_replacement_underpriced(); + warn!("Bundle attempt replacement transaction underpriced"); + Ok(SendBundleAttemptResult::ReplacementUnderpriced) + } + Err(TransactionTrackerError::ConditionNotMet) => { + self.metrics.increment_bundle_txn_condition_not_met(); + warn!("Bundle attempt condition not met"); + Ok(SendBundleAttemptResult::ConditionNotMet) + } + Err(e) => { + error!("Failed to send bundle with unexpected error: {e:?}"); + Err(e.into()) + } } - BuilderMetrics::increment_bundle_txns_abandoned(self.builder_index); - Ok(SendBundleResult::StalledAtMaxFeeIncreases) } /// Builds a bundle and returns some metadata and the transaction to send /// it, or `None` if there are no valid operations available. async fn get_bundle_tx( - &self, + &mut self, nonce: U256, - required_fees: Option, + bundle: Bundle, ) -> anyhow::Result> { - let bundle = self - .proposer - .make_bundle(required_fees) - .await - .context("proposer should create bundle for builder")?; let remove_ops_future = async { + if bundle.rejected_ops.is_empty() { + return; + } + let result = self.remove_ops_from_pool(&bundle.rejected_ops).await; if let Err(error) = result { error!("Failed to remove rejected ops from pool: {error}"); } }; + let update_entities_future = async { + if bundle.entity_updates.is_empty() { + return; + } + let result = self.update_entities_in_pool(&bundle.entity_updates).await; if let Err(error) = result { error!("Failed to update entities in pool: {error}"); } }; + join!(remove_ops_future, update_entities_future); + if bundle.is_empty() { if !bundle.rejected_ops.is_empty() || !bundle.entity_updates.is_empty() { info!( @@ -498,7 +636,7 @@ where ); let op_hashes: Vec<_> = bundle.iter_ops().map(|op| self.op_hash(op)).collect(); println!("HC bundle_sender bundle {:?} OH {:?}", bundle, op_hashes); - let mut tx = self.entry_point.get_send_bundle_transaction( + let mut tx = self.entry_point.get_send_bundle_transaction( bundle.ops_per_aggregator, self.beneficiary, bundle.gas_estimate, @@ -512,13 +650,12 @@ where })) } - async fn remove_ops_from_pool(&self, ops: &[UserOperation]) -> anyhow::Result<()> { - //println!("HC remove_ops_from_pool {:?}", ops); + async fn remove_ops_from_pool(&self, ops: &[UO]) -> anyhow::Result<()> { self.pool .remove_ops( self.entry_point.address(), ops.iter() - .map(|op| op.op_hash(self.entry_point.address(), self.chain_id)) + .map(|op| op.hash(self.entry_point.address(), self.chain_spec.id)) .collect(), ) .await @@ -539,65 +676,1093 @@ where }); } - fn op_hash(&self, op: &UserOperation) -> H256 { - op.op_hash(self.entry_point.address(), self.chain_id) + fn op_hash(&self, op: &UO) -> H256 { + op.hash(self.entry_point.address(), self.chain_spec.id) } } -struct BuilderMetrics {} +struct SenderMachineState { + trigger: TRIG, + transaction_tracker: T, + send_bundle_response: Option>, + inner: InnerState, + requires_reset: bool, +} -impl BuilderMetrics { - fn increment_bundle_txns_sent(builder_index: u64) { - metrics::increment_counter!("builder_bundle_txns_sent", "builder_index" => builder_index.to_string()); +impl SenderMachineState { + fn new(trigger: TRIG, transaction_tracker: T) -> Self { + Self { + trigger, + transaction_tracker, + send_bundle_response: None, + inner: InnerState::new(), + requires_reset: false, + } } - fn increment_bundle_txns_success(builder_index: u64) { - metrics::increment_counter!("builder_bundle_txns_success", "builder_index" => builder_index.to_string()); + fn update(&mut self, inner: InnerState) { + self.inner = inner; } - fn increment_bundle_txns_dropped(builder_index: u64) { - metrics::increment_counter!("builder_bundle_txns_dropped", "builder_index" => builder_index.to_string()); + // resets the state machine to the initial state, doesn't wait for next trigger + fn reset(&mut self) { + self.requires_reset = true; + let building_state = BuildingState { + wait_for_trigger: false, + fee_increase_count: 0, + underpriced_info: None, + }; + self.inner = InnerState::Building(building_state); } - // used when we decide to stop trying a transaction - fn increment_bundle_txns_abandoned(builder_index: u64) { - metrics::increment_counter!("builder_bundle_txns_abandoned", "builder_index" => builder_index.to_string()); + fn update_and_abandon(&mut self, inner: InnerState) { + self.update(inner); + self.transaction_tracker.abandon(); } - // used when sending a transaction fails - fn increment_bundle_txns_failed(builder_index: u64) { - metrics::increment_counter!("builder_bundle_txns_failed", "builder_index" => builder_index.to_string()); + // update the state and unabandoned the transaction tracker + // this will cause any "abandoned" transactions to be considered during the next + // fee estimation. + fn update_and_unabandon(&mut self, next_state: InnerState) { + self.transaction_tracker.unabandon(); + self.inner = next_state; + } + + fn abandon(&mut self) { + self.transaction_tracker.abandon(); + self.inner = InnerState::new(); + } + + fn complete(&mut self, result: Option) { + if let Some(result) = result { + if let Some(r) = self.send_bundle_response.take() { + if r.send(result).is_err() { + error!("Failed to send bundle result to manual caller"); + } + } + } + self.inner = InnerState::new(); + } + + async fn wait_for_trigger(&mut self) -> anyhow::Result> { + if self.requires_reset { + self.transaction_tracker.reset().await; + self.requires_reset = false; + } + + match &self.inner { + InnerState::Building(s) => { + if !s.wait_for_trigger { + return Ok(None); + } + + self.send_bundle_response = self.trigger.wait_for_trigger().await?; + self.transaction_tracker + .check_for_update() + .await + .map_err(|e| anyhow::anyhow!("transaction tracker update error {e:?}")) + } + InnerState::Pending(..) | InnerState::CancelPending(..) => { + self.trigger.wait_for_block().await?; + self.transaction_tracker + .check_for_update() + .await + .map_err(|e| anyhow::anyhow!("transaction tracker update error {e:?}")) + } + InnerState::Cancelling(..) => Ok(None), + } + } + + fn block_number(&self) -> u64 { + self.trigger.last_block().block_number + } +} + +// State of the sender loop +enum InnerState { + // Building a bundle, optionally waiting for a trigger to send it + Building(BuildingState), + // Waiting for a bundle to be mined + Pending(PendingState), + // Cancelling the last transaction + Cancelling(CancellingState), + // Waiting for a cancellation transaction to be mined + CancelPending(CancelPendingState), +} + +impl InnerState { + fn new() -> Self { + InnerState::Building(BuildingState { + wait_for_trigger: true, + fee_increase_count: 0, + underpriced_info: None, + }) + } +} + +#[derive(Debug, Clone, Copy)] +struct BuildingState { + wait_for_trigger: bool, + fee_increase_count: u64, + underpriced_info: Option, +} + +#[derive(Debug, Clone, Copy)] +struct UnderpricedInfo { + since_block: u64, + rounds: u64, +} + +impl BuildingState { + // Transition to pending state + fn to_pending(self, until: u64) -> PendingState { + PendingState { + until, + fee_increase_count: self.fee_increase_count, + } + } + + // Transition to cancelling state + fn to_cancelling(self) -> CancellingState { + CancellingState { + fee_increase_count: 0, + } + } + + // Retry the build + fn retry(mut self) -> Self { + self.wait_for_trigger = false; + self + } + + // Mark a replacement as underpriced + // + // The next state will wait for a trigger to reduce bundle building loops + fn replacement_underpriced(self, block_number: u64) -> Self { + let ui = if let Some(underpriced_info) = self.underpriced_info { + underpriced_info + } else { + UnderpricedInfo { + since_block: block_number, + rounds: 1, + } + }; + + BuildingState { + wait_for_trigger: true, + fee_increase_count: self.fee_increase_count + 1, + underpriced_info: Some(ui), + } + } + + // Finalize an underpriced round. + // + // This will clear out the number of fee increases and increment the number of underpriced rounds. + // Use this when we are in an underpriced state, but there are no longer any UOs available to bundle. + fn underpriced_round(self) -> Self { + let mut underpriced_info = self + .underpriced_info + .expect("underpriced_info must be Some when calling underpriced_round"); + underpriced_info.rounds += 1; + + BuildingState { + wait_for_trigger: true, + fee_increase_count: 0, + underpriced_info: Some(underpriced_info), + } + } +} + +#[derive(Debug, Clone, Copy)] +struct PendingState { + until: u64, + fee_increase_count: u64, +} + +impl PendingState { + fn to_building(self) -> BuildingState { + BuildingState { + wait_for_trigger: false, + fee_increase_count: self.fee_increase_count + 1, + underpriced_info: None, + } + } +} + +#[derive(Debug, Clone, Copy)] +struct CancellingState { + fee_increase_count: u64, +} + +impl CancellingState { + fn to_self(mut self) -> Self { + self.fee_increase_count += 1; + self + } + + fn to_cancel_pending(self, until: u64) -> CancelPendingState { + CancelPendingState { + until, + fee_increase_count: self.fee_increase_count, + } } +} + +#[derive(Debug, Clone, Copy)] +struct CancelPendingState { + until: u64, + fee_increase_count: u64, +} - fn increment_bundle_txns_nonce_used(builder_index: u64) { - metrics::increment_counter!("builder_bundle_txns_nonce_used", "builder_index" => builder_index.to_string()); +impl CancelPendingState { + fn to_cancelling(self) -> CancellingState { + CancellingState { + fee_increase_count: self.fee_increase_count + 1, + } } +} + +#[async_trait] +#[cfg_attr(test, automock)] +trait Trigger { + async fn wait_for_trigger( + &mut self, + ) -> anyhow::Result>>; - fn increment_bundle_txn_fee_increases(builder_index: u64) { - metrics::increment_counter!("builder_bundle_fee_increases", "builder_index" => builder_index.to_string()); + async fn wait_for_block(&mut self) -> anyhow::Result; + + fn last_block(&self) -> &NewHead; +} + +struct BundleSenderTrigger { + bundling_mode: BundlingMode, + block_rx: UnboundedReceiver, + bundle_action_receiver: mpsc::Receiver, + timer: tokio::time::Interval, + last_block: NewHead, +} + +#[async_trait] +impl Trigger for BundleSenderTrigger { + async fn wait_for_trigger( + &mut self, + ) -> anyhow::Result>> { + let mut send_bundle_response: Option> = None; + + loop { + hybrid_compute::expire_hc_cache(); + // 3 triggers for loop logic: + // 1 - new block + // - If auto mode, send next bundle + // 2 - timer tick + // - If auto mode, send next bundle + // 3 - action recv + // - If change mode, change and restart loop + // - If send bundle and manual mode, send next bundle + tokio::select! { + b = self.block_rx.recv() => { + let Some(b) = b else { + error!("Block stream closed"); + bail!("Block stream closed"); + }; + + self.last_block = b; + + match self.bundling_mode { + BundlingMode::Manual => continue, + BundlingMode::Auto => break, + } + }, + _ = self.timer.tick() => { + match self.bundling_mode { + BundlingMode::Manual => continue, + BundlingMode::Auto => break, + } + }, + a = self.bundle_action_receiver.recv() => { + match a { + Some(BundleSenderAction::ChangeMode(mode)) => { + debug!("changing bundling mode to {mode:?}"); + self.bundling_mode = mode; + continue; + }, + Some(BundleSenderAction::SendBundle(r)) => { + match self.bundling_mode { + BundlingMode::Manual => { + send_bundle_response = Some(r.responder); + break; + }, + BundlingMode::Auto => { + error!("Received bundle send action while in auto mode, ignoring"); + continue; + } + } + }, + None => { + error!("Bundle action recv closed"); + bail!("Bundle action recv closed"); + } + } + } + }; + } + + self.consume_blocks()?; + + Ok(send_bundle_response) } - fn increment_bundle_txn_replacement_underpriced(builder_index: u64) { - metrics::increment_counter!("builder_bundle_replacement_underpriced", "builder_index" => builder_index.to_string()); + async fn wait_for_block(&mut self) -> anyhow::Result { + self.last_block = self + .block_rx + .recv() + .await + .ok_or_else(|| anyhow::anyhow!("Block stream closed"))?; + self.consume_blocks()?; + Ok(self.last_block.clone()) } - fn set_bundle_gas_stats(gas_limit: Option, gas_used: Option) { + fn last_block(&self) -> &NewHead { + &self.last_block + } +} + +impl BundleSenderTrigger { + async fn new( + pool_client: &P, + bundle_action_receiver: mpsc::Receiver, + timer_interval: Duration, + ) -> anyhow::Result { + let block_rx = Self::start_block_stream(pool_client).await?; + + Ok(Self { + bundling_mode: BundlingMode::Auto, + block_rx, + bundle_action_receiver, + timer: tokio::time::interval(timer_interval), + last_block: NewHead { + block_hash: H256::zero(), + block_number: 0, + }, + }) + } + + async fn start_block_stream( + pool_client: &P, + ) -> anyhow::Result> { + let Ok(mut new_heads) = pool_client.subscribe_new_heads().await else { + error!("Failed to subscribe to new blocks"); + bail!("failed to subscribe to new blocks"); + }; + + let (tx, rx) = mpsc::unbounded_channel(); + tokio::spawn(async move { + loop { + match new_heads.next().await { + Some(b) => { + if tx.send(b).is_err() { + error!("Failed to buffer new block for bundle sender"); + return; + } + } + None => { + error!("Block stream ended"); + return; + } + } + } + }); + + Ok(rx) + } + + fn consume_blocks(&mut self) -> anyhow::Result<()> { + // Consume any other blocks that may have been buffered up + loop { + match self.block_rx.try_recv() { + Ok(b) => { + self.last_block = b; + } + Err(mpsc::error::TryRecvError::Empty) => { + return Ok(()); + } + Err(mpsc::error::TryRecvError::Disconnected) => { + error!("Block stream closed"); + bail!("Block stream closed"); + } + } + } + } +} + +#[derive(Debug, Clone)] +struct BuilderMetrics { + builder_index: u64, + entry_point: Address, +} + +impl BuilderMetrics { + fn increment_bundle_txns_sent(&self) { + metrics::counter!("builder_bundle_txns_sent", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()) + .increment(1); + } + + fn process_bundle_txn_success(&self, gas_limit: Option, gas_used: Option) { + metrics::counter!("builder_bundle_txns_success", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + if let Some(limit) = gas_limit { - metrics::counter!("builder_bundle_gas_limit", limit.as_u64()); + metrics::counter!("builder_bundle_gas_limit", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(limit.as_u64()); } if let Some(used) = gas_used { - metrics::counter!("builder_bundle_gas_used", used.as_u64()); + metrics::counter!("builder_bundle_gas_used", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(used.as_u64()); } } - fn set_current_fees(fees: &GasFees) { - metrics::gauge!( - "builder_current_max_fee", - fees.max_fee_per_gas.as_u128() as f64 + fn increment_bundle_txns_dropped(&self) { + metrics::counter!("builder_bundle_txns_dropped", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + + // used when we decide to stop trying a transaction + fn increment_bundle_txns_abandoned(&self) { + metrics::counter!("builder_bundle_txns_abandoned", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + + // used when sending a transaction fails + fn increment_bundle_txns_failed(&self) { + metrics::counter!("builder_bundle_txns_failed", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + + fn increment_bundle_txns_nonce_used(&self) { + metrics::counter!("builder_bundle_txns_nonce_used", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + + fn increment_bundle_txn_fee_increases(&self) { + metrics::counter!("builder_bundle_fee_increases", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + + fn increment_bundle_txn_replacement_underpriced(&self) { + metrics::counter!("builder_bundle_replacement_underpriced", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + + fn increment_bundle_txn_nonce_too_low(&self) { + metrics::counter!("builder_bundle_nonce_too_low", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + + fn increment_bundle_txn_condition_not_met(&self) { + metrics::counter!("builder_bundle_condition_not_met", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + + fn increment_cancellation_txns_sent(&self) { + metrics::counter!("builder_cancellation_txns_sent", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + + fn increment_cancellation_txns_mined(&self) { + metrics::counter!("builder_cancellation_txns_mined", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + + fn increment_cancellation_txns_total_fee(&self, fee: u64) { + metrics::counter!("builder_cancellation_txns_total_fee", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(fee); + } + + fn increment_cancellations_abandoned(&self) { + metrics::counter!("builder_cancellations_abandoned", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + + fn increment_soft_cancellations(&self) { + metrics::counter!("builder_soft_cancellations", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + + fn increment_cancellation_txns_failed(&self) { + metrics::counter!("builder_cancellation_txns_failed", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } + + fn increment_state_machine_errors(&self) { + metrics::counter!("builder_state_machine_errors", "entry_point" => self.entry_point.to_string(), "builder_index" => self.builder_index.to_string()).increment(1); + } +} + +#[cfg(test)] +mod tests { + use ethers::types::Bytes; + use mockall::Sequence; + use rundler_provider::MockEntryPointV0_6; + use rundler_types::{ + chain::ChainSpec, pool::MockPool, v0_6::UserOperation, GasFees, UserOpsPerAggregator, + }; + use tokio::sync::{broadcast, mpsc}; + + use super::*; + use crate::{ + bundle_proposer::{Bundle, MockBundleProposer}, + bundle_sender::{BundleSenderImpl, MockTrigger}, + transaction_tracker::MockTransactionTracker, + }; + + #[tokio::test] + async fn test_empty_send() { + let Mocks { + mut mock_proposer, + mock_entry_point, + mut mock_tracker, + mut mock_trigger, + } = new_mocks(); + + // block 0 + add_trigger_no_update_last_block( + &mut mock_trigger, + &mut mock_tracker, + &mut Sequence::new(), + 0, ); - metrics::gauge!( - "builder_current_max_priority_fee", - fees.max_priority_fee_per_gas.as_u128() as f64 + + // zero nonce + mock_tracker + .expect_get_nonce_and_required_fees() + .returning(|| Ok((U256::zero(), None))); + + // empty bundle + mock_proposer + .expect_make_bundle() + .times(1) + .returning(|_, _| Box::pin(async { Ok(Bundle::::default()) })); + + let mut sender = new_sender(mock_proposer, mock_entry_point); + + // start in building state + let mut state = SenderMachineState::new(mock_trigger, mock_tracker); + + sender.step_state(&mut state).await.unwrap(); + + // empty bundle shouldn't move out of building state + assert!(matches!( + state.inner, + InnerState::Building(BuildingState { + wait_for_trigger: true, + .. + }) + )); + } + + #[tokio::test] + async fn test_send() { + let Mocks { + mut mock_proposer, + mut mock_entry_point, + mut mock_tracker, + mut mock_trigger, + } = new_mocks(); + + // block 0 + add_trigger_no_update_last_block( + &mut mock_trigger, + &mut mock_tracker, + &mut Sequence::new(), + 0, ); + + // zero nonce + mock_tracker + .expect_get_nonce_and_required_fees() + .returning(|| Ok((U256::zero(), None))); + + // bundle with one op + mock_proposer + .expect_make_bundle() + .times(1) + .returning(|_, _| Box::pin(async { Ok(bundle()) })); + + // should create the bundle txn + mock_entry_point + .expect_get_send_bundle_transaction() + .returning(|_, _, _, _| TypedTransaction::default()); + + // should send the bundle txn + mock_tracker + .expect_send_transaction() + .returning(|_, _| Box::pin(async { Ok(H256::zero()) })); + + let mut sender = new_sender(mock_proposer, mock_entry_point); + + // start in building state + let mut state = SenderMachineState::new(mock_trigger, mock_tracker); + + sender.step_state(&mut state).await.unwrap(); + + // end in the pending state + assert!(matches!( + state.inner, + InnerState::Pending(PendingState { + until: 3, // block 0 + wait 3 blocks + .. + }) + )); + } + + #[tokio::test] + async fn test_wait_for_mine_success() { + let Mocks { + mock_proposer, + mock_entry_point, + mut mock_tracker, + mut mock_trigger, + } = new_mocks(); + + let mut seq = Sequence::new(); + add_trigger_wait_for_block_last_block(&mut mock_trigger, &mut seq, 1); + mock_trigger + .expect_wait_for_block() + .once() + .in_sequence(&mut seq) + .returning(|| { + Box::pin(async { + Ok(NewHead { + block_number: 2, + block_hash: H256::zero(), + }) + }) + }); + // no call to last_block after mine + + let mut seq = Sequence::new(); + mock_tracker + .expect_check_for_update() + .once() + .in_sequence(&mut seq) + .returning(|| Box::pin(async { Ok(None) })); + mock_tracker + .expect_check_for_update() + .once() + .in_sequence(&mut seq) + .returning(|| { + Box::pin(async { + Ok(Some(TrackerUpdate::Mined { + block_number: 2, + nonce: U256::zero(), + gas_limit: None, + gas_used: None, + gas_price: None, + tx_hash: H256::zero(), + attempt_number: 0, + })) + }) + }); + + let mut sender = new_sender(mock_proposer, mock_entry_point); + + // start in pending state + let mut state = SenderMachineState { + trigger: mock_trigger, + transaction_tracker: mock_tracker, + send_bundle_response: None, + inner: InnerState::Pending(PendingState { + until: 3, + fee_increase_count: 0, + }), + requires_reset: false, + }; + + // first step has no update + sender.step_state(&mut state).await.unwrap(); + assert!(matches!( + state.inner, + InnerState::Pending(PendingState { until: 3, .. }) + )); + + // second step is mined and moves back to building + sender.step_state(&mut state).await.unwrap(); + assert!(matches!( + state.inner, + InnerState::Building(BuildingState { + wait_for_trigger: true, + fee_increase_count: 0, + underpriced_info: None, + }) + )); + } + + #[tokio::test] + async fn test_wait_for_mine_timed_out() { + let Mocks { + mock_proposer, + mock_entry_point, + mut mock_tracker, + mut mock_trigger, + } = new_mocks(); + + let mut seq = Sequence::new(); + for i in 1..=3 { + add_trigger_wait_for_block_last_block(&mut mock_trigger, &mut seq, i); + } + + mock_tracker + .expect_check_for_update() + .times(3) + .returning(|| Box::pin(async { Ok(None) })); + + let mut sender = new_sender(mock_proposer, mock_entry_point); + + // start in pending state + let mut state = SenderMachineState { + trigger: mock_trigger, + transaction_tracker: mock_tracker, + send_bundle_response: None, + inner: InnerState::Pending(PendingState { + until: 3, + fee_increase_count: 0, + }), + requires_reset: false, + }; + + // first and second step has no update + for _ in 0..2 { + sender.step_state(&mut state).await.unwrap(); + assert!(matches!( + state.inner, + InnerState::Pending(PendingState { until: 3, .. }) + )); + } + + // third step times out and moves back to building with a fee increase + sender.step_state(&mut state).await.unwrap(); + assert!(matches!( + state.inner, + InnerState::Building(BuildingState { + wait_for_trigger: false, + fee_increase_count: 1, + underpriced_info: None, + }) + )); + } + + #[tokio::test] + async fn test_transition_to_cancel() { + let Mocks { + mut mock_proposer, + mock_entry_point, + mut mock_tracker, + mut mock_trigger, + } = new_mocks(); + + let mut seq = Sequence::new(); + add_trigger_no_update_last_block(&mut mock_trigger, &mut mock_tracker, &mut seq, 3); + + // zero nonce + mock_tracker + .expect_get_nonce_and_required_fees() + .returning(|| Ok((U256::zero(), None))); + + // fee filter error + mock_proposer + .expect_make_bundle() + .times(1) + .returning(|_, _| { + Box::pin(async { Err(BundleProposerError::NoOperationsAfterFeeFilter) }) + }); + + let mut sender = new_sender(mock_proposer, mock_entry_point); + + // start in underpriced meta-state + let mut state = SenderMachineState { + trigger: mock_trigger, + transaction_tracker: mock_tracker, + send_bundle_response: None, + inner: InnerState::Building(BuildingState { + wait_for_trigger: true, + fee_increase_count: 0, + underpriced_info: Some(UnderpricedInfo { + since_block: 0, + rounds: 1, + }), + }), + requires_reset: false, + }; + + // step state, block number should trigger move to cancellation + sender.step_state(&mut state).await.unwrap(); + assert!(matches!( + state.inner, + InnerState::Cancelling(CancellingState { + fee_increase_count: 0, + }) + )); + } + + #[tokio::test] + async fn test_send_cancel() { + let Mocks { + mut mock_proposer, + mock_entry_point, + mut mock_tracker, + mut mock_trigger, + } = new_mocks(); + + mock_proposer + .expect_estimate_gas_fees() + .once() + .returning(|_| Box::pin(async { Ok((GasFees::default(), U256::zero())) })); + + mock_tracker + .expect_cancel_transaction() + .once() + .returning(|_, _| Box::pin(async { Ok(Some(H256::zero())) })); + + mock_trigger.expect_last_block().return_const(NewHead { + block_number: 0, + block_hash: H256::zero(), + }); + + let mut state = SenderMachineState { + trigger: mock_trigger, + transaction_tracker: mock_tracker, + send_bundle_response: None, + inner: InnerState::Cancelling(CancellingState { + fee_increase_count: 0, + }), + requires_reset: false, + }; + + let mut sender = new_sender(mock_proposer, mock_entry_point); + + sender.step_state(&mut state).await.unwrap(); + assert!(matches!( + state.inner, + InnerState::CancelPending(CancelPendingState { + until: 3, + fee_increase_count: 0, + }) + )); + } + + #[tokio::test] + async fn test_resubmit_cancel() { + let Mocks { + mock_proposer, + mock_entry_point, + mut mock_tracker, + mut mock_trigger, + } = new_mocks(); + + let mut seq = Sequence::new(); + for i in 1..=3 { + add_trigger_wait_for_block_last_block(&mut mock_trigger, &mut seq, i); + } + + mock_tracker + .expect_check_for_update() + .times(3) + .returning(|| Box::pin(async { Ok(None) })); + + let mut state = SenderMachineState { + trigger: mock_trigger, + transaction_tracker: mock_tracker, + send_bundle_response: None, + inner: InnerState::CancelPending(CancelPendingState { + until: 3, + fee_increase_count: 0, + }), + requires_reset: false, + }; + + let mut sender = new_sender(mock_proposer, mock_entry_point); + + for _ in 0..2 { + sender.step_state(&mut state).await.unwrap(); + assert!(matches!( + state.inner, + InnerState::CancelPending(CancelPendingState { + until: 3, + fee_increase_count: 0, + }) + )); + } + + sender.step_state(&mut state).await.unwrap(); + assert!(matches!( + state.inner, + InnerState::Cancelling(CancellingState { + fee_increase_count: 1, + }) + )); + } + + #[tokio::test] + async fn test_condition_not_met() { + let Mocks { + mut mock_proposer, + mut mock_entry_point, + mut mock_tracker, + mut mock_trigger, + } = new_mocks(); + + let mut seq = Sequence::new(); + add_trigger_no_update_last_block(&mut mock_trigger, &mut mock_tracker, &mut seq, 1); + + // zero nonce + mock_tracker + .expect_get_nonce_and_required_fees() + .returning(|| Ok((U256::zero(), None))); + + // bundle with one op + mock_proposer + .expect_make_bundle() + .times(1) + .returning(|_, _| Box::pin(async { Ok(bundle()) })); + + // should create the bundle txn + mock_entry_point + .expect_get_send_bundle_transaction() + .returning(|_, _, _, _| TypedTransaction::default()); + + // should send the bundle txn, returns condition not met + mock_tracker + .expect_send_transaction() + .returning(|_, _| Box::pin(async { Err(TransactionTrackerError::ConditionNotMet) })); + + // should notify proposer that condition was not met + mock_proposer + .expect_notify_condition_not_met() + .times(1) + .return_const(()); + + let mut state = SenderMachineState { + trigger: mock_trigger, + transaction_tracker: mock_tracker, + send_bundle_response: None, + inner: InnerState::Building(BuildingState { + wait_for_trigger: true, + fee_increase_count: 0, + underpriced_info: None, + }), + requires_reset: false, + }; + + let mut sender = new_sender(mock_proposer, mock_entry_point); + + sender.step_state(&mut state).await.unwrap(); + + // end back in the building state without waiting for trigger + assert!(matches!( + state.inner, + InnerState::Building(BuildingState { + wait_for_trigger: false, + fee_increase_count: 0, + underpriced_info: None, + }) + )); + } + + struct Mocks { + mock_proposer: MockBundleProposer, + mock_entry_point: MockEntryPointV0_6, + mock_tracker: MockTransactionTracker, + mock_trigger: MockTrigger, + } + + fn new_mocks() -> Mocks { + let mut mock_entry_point = MockEntryPointV0_6::new(); + mock_entry_point + .expect_address() + .return_const(Address::default()); + + Mocks { + mock_proposer: MockBundleProposer::new(), + mock_entry_point, + mock_tracker: MockTransactionTracker::new(), + mock_trigger: MockTrigger::new(), + } + } + + fn new_sender( + mock_proposer: MockBundleProposer, + mock_entry_point: MockEntryPointV0_6, + ) -> BundleSenderImpl< + UserOperation, + MockBundleProposer, + MockEntryPointV0_6, + MockTransactionTracker, + MockPool, + > { + BundleSenderImpl::new( + 0, + mpsc::channel(1000).1, + ChainSpec::default(), + Address::default(), + mock_proposer, + mock_entry_point, + MockTransactionTracker::new(), + MockPool::new(), + Settings { + max_cancellation_fee_increases: 3, + max_blocks_to_wait_for_mine: 3, + max_replacement_underpriced_blocks: 3, + }, + broadcast::channel(1000).0, + ) + } + + fn add_trigger_no_update_last_block( + mock_trigger: &mut MockTrigger, + mock_tracker: &mut MockTransactionTracker, + seq: &mut Sequence, + block_number: u64, + ) { + mock_trigger + .expect_wait_for_trigger() + .once() + .in_sequence(seq) + .returning(move || Box::pin(async move { Ok(None) })); + mock_tracker + .expect_check_for_update() + .returning(|| Box::pin(async { Ok(None) })); + mock_trigger + .expect_last_block() + .once() + .in_sequence(seq) + .return_const(NewHead { + block_number, + block_hash: H256::zero(), + }); + } + + fn add_trigger_wait_for_block_last_block( + mock_trigger: &mut MockTrigger, + seq: &mut Sequence, + block_number: u64, + ) { + mock_trigger + .expect_wait_for_block() + .once() + .in_sequence(seq) + .returning(move || { + Box::pin(async move { + Ok(NewHead { + block_number, + block_hash: H256::zero(), + }) + }) + }); + mock_trigger + .expect_last_block() + .once() + .in_sequence(seq) + .return_const(NewHead { + block_number, + block_hash: H256::zero(), + }); + } + + fn bundle() -> Bundle { + Bundle { + gas_estimate: U256::from(100_000), + gas_fees: GasFees::default(), + expected_storage: Default::default(), + rejected_ops: vec![], + entity_updates: vec![], + ops_per_aggregator: vec![UserOpsPerAggregator { + aggregator: Address::zero(), + signature: Bytes::new(), + user_ops: vec![UserOperation::default()], + }], + } } } diff --git a/crates/builder/src/emit.rs b/crates/builder/src/emit.rs index e5c70cd3..66de76f0 100644 --- a/crates/builder/src/emit.rs +++ b/crates/builder/src/emit.rs @@ -196,6 +196,17 @@ pub enum OpRejectionReason { FailedRevalidation { error: SimulationError }, /// Operation reverted during bundle formation simulation with message FailedInBundle { message: Arc }, + /// Operation's storage slot condition was not met + ConditionNotMet(ConditionNotMetReason), +} + +/// Reason for a condition not being met +#[derive(Clone, Debug)] +pub struct ConditionNotMetReason { + pub address: Address, + pub slot: H256, + pub expected: H256, + pub actual: H256, } impl Display for BuilderEvent { diff --git a/crates/builder/src/lib.rs b/crates/builder/src/lib.rs index 8e7d9ccb..ceee246f 100644 --- a/crates/builder/src/lib.rs +++ b/crates/builder/src/lib.rs @@ -26,17 +26,17 @@ mod emit; pub use emit::{BuilderEvent, BuilderEventKind}; mod sender; -pub use sender::TransactionSenderType; +pub use sender::{ + BloxrouteSenderArgs, FlashbotsSenderArgs, RawSenderArgs, TransactionSenderArgs, + TransactionSenderKind, +}; mod server; -pub use server::{ - BuilderResult, BuilderServer, BuilderServerError, BundlingMode, LocalBuilderBuilder, - LocalBuilderHandle, RemoteBuilderClient, -}; +pub use server::{LocalBuilderBuilder, LocalBuilderHandle, RemoteBuilderClient}; mod signer; mod task; -pub use task::{Args as BuilderTaskArgs, BuilderTask}; +pub use task::{Args as BuilderTaskArgs, BuilderTask, EntryPointBuilderSettings}; mod transaction_tracker; diff --git a/crates/builder/src/sender/bloxroute.rs b/crates/builder/src/sender/bloxroute.rs index 46542adb..7137a4c6 100644 --- a/crates/builder/src/sender/bloxroute.rs +++ b/crates/builder/src/sender/bloxroute.rs @@ -11,30 +11,30 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -use std::{sync::Arc, time::Duration}; +use std::sync::Arc; use anyhow::Context; use ethers::{ middleware::SignerMiddleware, providers::{JsonRpcClient, Middleware, Provider}, - types::{ - transaction::eip2718::TypedTransaction, Address, Bytes, TransactionReceipt, TxHash, H256, - }, + types::{transaction::eip2718::TypedTransaction, Address, Bytes, TxHash, H256, U256}, utils::hex, }; use ethers_signers::Signer; use jsonrpsee::{ core::{client::ClientT, traits::ToRpcParams}, - http_client::{transport::HttpBackend, HttpClient, HttpClientBuilder}, + http_client::{transport::HttpBackend, HeaderMap, HeaderValue, HttpClient, HttpClientBuilder}, }; -use reqwest::header::{HeaderMap, HeaderValue}; use rundler_sim::ExpectedStorage; +use rundler_types::GasFees; use serde::{Deserialize, Serialize}; use serde_json::value::RawValue; -use tokio::time; use tonic::async_trait; -use super::{fill_and_sign, Result, SentTxInfo, TransactionSender, TxStatus}; +use super::{ + create_hard_cancel_tx, fill_and_sign, CancelTxInfo, Result, SentTxInfo, TransactionSender, + TxStatus, +}; pub(crate) struct PolygonBloxrouteTransactionSender where @@ -42,9 +42,7 @@ where S: Signer + 'static, { provider: SignerMiddleware>, S>, - raw_provider: Arc>, client: PolygonBloxrouteClient, - poll_interval: Duration, } #[async_trait] @@ -63,6 +61,29 @@ where Ok(SentTxInfo { nonce, tx_hash }) } + async fn cancel_transaction( + &self, + _tx_hash: H256, + nonce: U256, + to: Address, + gas_fees: GasFees, + ) -> Result { + let tx = create_hard_cancel_tx(self.provider.address(), to, nonce, gas_fees); + + let (raw_tx, _) = fill_and_sign(&self.provider, tx).await?; + + let tx_hash = self + .provider + .provider() + .request("eth_sendRawTransaction", (raw_tx,)) + .await?; + + Ok(CancelTxInfo { + tx_hash, + soft_cancelled: false, + }) + } + async fn get_transaction_status(&self, tx_hash: H256) -> Result { let tx = self .provider @@ -80,15 +101,6 @@ where .unwrap_or(TxStatus::Pending)) } - async fn wait_until_mined(&self, tx_hash: H256) -> Result> { - Ok(Self::wait_until_mined_no_drop( - tx_hash, - Arc::clone(&self.raw_provider), - self.poll_interval, - ) - .await?) - } - fn address(&self) -> Address { self.provider.address() } @@ -99,45 +111,12 @@ where C: JsonRpcClient + 'static, S: Signer + 'static, { - pub(crate) fn new( - provider: Arc>, - signer: S, - poll_interval: Duration, - auth_header: &str, - ) -> Result { + pub(crate) fn new(provider: Arc>, signer: S, auth_header: &str) -> Result { Ok(Self { provider: SignerMiddleware::new(Arc::clone(&provider), signer), - raw_provider: provider, client: PolygonBloxrouteClient::new(auth_header)?, - poll_interval, }) } - - async fn wait_until_mined_no_drop( - tx_hash: H256, - provider: Arc>, - poll_interval: Duration, - ) -> Result> { - loop { - let tx = provider - .get_transaction(tx_hash) - .await - .context("provider should return transaction status")?; - - match tx.and_then(|tx| tx.block_number) { - None => {} - Some(_) => { - let receipt = provider - .get_transaction_receipt(tx_hash) - .await - .context("provider should return transaction receipt")?; - return Ok(receipt); - } - } - - time::sleep(poll_interval).await; - } - } } struct PolygonBloxrouteClient { diff --git a/crates/builder/src/sender/conditional.rs b/crates/builder/src/sender/conditional.rs deleted file mode 100644 index e02f78a6..00000000 --- a/crates/builder/src/sender/conditional.rs +++ /dev/null @@ -1,103 +0,0 @@ -// This file is part of Rundler. -// -// Rundler is free software: you can redistribute it and/or modify it under the -// terms of the GNU Lesser General Public License as published by the Free Software -// Foundation, either version 3 of the License, or (at your option) any later version. -// -// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; -// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -// See the GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License along with Rundler. -// If not, see https://www.gnu.org/licenses/. - -use std::sync::Arc; - -use anyhow::Context; -use ethers::{ - middleware::SignerMiddleware, - providers::{JsonRpcClient, Middleware, PendingTransaction, Provider}, - types::{transaction::eip2718::TypedTransaction, Address, TransactionReceipt, H256}, -}; -use ethers_signers::Signer; -use rundler_sim::ExpectedStorage; -use serde_json::json; -use tonic::async_trait; - -use super::{fill_and_sign, Result, SentTxInfo, TransactionSender, TxStatus}; - -pub(crate) struct ConditionalTransactionSender -where - C: JsonRpcClient + 'static, - S: Signer + 'static, -{ - // The `SignerMiddleware` specifically needs to wrap a `Provider`, and not - // just any `Middleware`, because `.request()` is only on `Provider` and not - // on `Middleware`. - provider: SignerMiddleware>, S>, -} - -#[async_trait] -impl TransactionSender for ConditionalTransactionSender -where - C: JsonRpcClient + 'static, - S: Signer + 'static, -{ - async fn send_transaction( - &self, - tx: TypedTransaction, - expected_storage: &ExpectedStorage, - ) -> Result { - let (raw_tx, nonce) = fill_and_sign(&self.provider, tx).await?; - - let tx_hash = self - .provider - .provider() - .request( - "eth_sendRawTransactionConditional", - (raw_tx, json!({ "knownAccounts": expected_storage })), - ) - .await?; - - Ok(SentTxInfo { nonce, tx_hash }) - } - - async fn get_transaction_status(&self, tx_hash: H256) -> Result { - let tx = self - .provider - .get_transaction(tx_hash) - .await - .context("provider should return transaction status")?; - Ok(match tx { - None => TxStatus::Dropped, - Some(tx) => match tx.block_number { - None => TxStatus::Pending, - Some(block_number) => TxStatus::Mined { - block_number: block_number.as_u64(), - }, - }, - }) - } - - async fn wait_until_mined(&self, tx_hash: H256) -> Result> { - Ok(PendingTransaction::new(tx_hash, self.provider.inner()) - .await - .context("should wait for transaction to be mined or dropped")?) - } - - fn address(&self) -> Address { - self.provider.address() - } -} - -impl ConditionalTransactionSender -where - C: JsonRpcClient + 'static, - S: Signer + 'static, -{ - pub(crate) fn new(provider: Arc>, signer: S) -> Self { - Self { - provider: SignerMiddleware::new(provider, signer), - } - } -} diff --git a/crates/builder/src/sender/flashbots.rs b/crates/builder/src/sender/flashbots.rs index d8dbdede..d1e44272 100644 --- a/crates/builder/src/sender/flashbots.rs +++ b/crates/builder/src/sender/flashbots.rs @@ -13,54 +13,42 @@ // Adapted from https://github.com/onbjerg/ethers-flashbots and // https://github.com/gakonst/ethers-rs/blob/master/ethers-providers/src/toolbox/pending_transaction.rs -use std::{ - future::Future, - pin::Pin, - str::FromStr, - sync::Arc, - task::{Context as TaskContext, Poll}, -}; +use std::{str::FromStr, sync::Arc}; use anyhow::{anyhow, Context}; use ethers::{ middleware::SignerMiddleware, - providers::{interval, JsonRpcClient, Middleware, Provider}, - types::{ - transaction::eip2718::TypedTransaction, Address, Bytes, TransactionReceipt, TxHash, H256, - U256, U64, - }, + providers::{JsonRpcClient, Middleware, Provider}, + types::{transaction::eip2718::TypedTransaction, Address, Bytes, H256, U256, U64}, + utils, }; use ethers_signers::Signer; -use futures_timer::Delay; -use futures_util::{Stream, StreamExt, TryFutureExt}; -use jsonrpsee::{ - core::{client::ClientT, traits::ToRpcParams}, - http_client::{transport::HttpBackend, HttpClient, HttpClientBuilder}, +use reqwest::{ + header::{HeaderMap, HeaderValue, CONTENT_TYPE}, + Client, Response, }; -use pin_project::pin_project; +use rundler_types::GasFees; use serde::{de, Deserialize, Serialize}; -use serde_json::{value::RawValue, Value}; +use serde_json::{json, Value}; use tonic::async_trait; use super::{ fill_and_sign, ExpectedStorage, Result, SentTxInfo, TransactionSender, TxSenderError, TxStatus, }; +use crate::sender::CancelTxInfo; #[derive(Debug)] -pub(crate) struct FlashbotsTransactionSender -where - C: JsonRpcClient + 'static, - S: Signer + 'static, -{ +pub(crate) struct FlashbotsTransactionSender { provider: SignerMiddleware>, S>, - client: FlashbotsClient, + flashbots_client: FlashbotsClient, } #[async_trait] -impl TransactionSender for FlashbotsTransactionSender +impl TransactionSender for FlashbotsTransactionSender where C: JsonRpcClient + 'static, S: Signer + 'static, + FS: Signer + 'static, { async fn send_transaction( &self, @@ -69,13 +57,38 @@ where ) -> Result { let (raw_tx, nonce) = fill_and_sign(&self.provider, tx).await?; - let tx_hash = self.client.send_transaction(raw_tx).await?; + let tx_hash = self + .flashbots_client + .send_private_transaction(raw_tx) + .await?; Ok(SentTxInfo { nonce, tx_hash }) } + async fn cancel_transaction( + &self, + tx_hash: H256, + _nonce: U256, + _to: Address, + _gas_fees: GasFees, + ) -> Result { + let success = self + .flashbots_client + .cancel_private_transaction(tx_hash) + .await?; + + if !success { + return Err(TxSenderError::SoftCancelFailed); + } + + Ok(CancelTxInfo { + tx_hash: H256::zero(), + soft_cancelled: true, + }) + } + async fn get_transaction_status(&self, tx_hash: H256) -> Result { - let status = self.client.status(tx_hash).await?; + let status = self.flashbots_client.status(tx_hash).await?; Ok(match status.status { FlashbotsAPITransactionStatus::Pending => TxStatus::Pending, FlashbotsAPITransactionStatus::Included => { @@ -96,38 +109,104 @@ where } TxStatus::Pending } - FlashbotsAPITransactionStatus::Failed | FlashbotsAPITransactionStatus::Unknown => { + FlashbotsAPITransactionStatus::Unknown => { return Err(TxSenderError::Other(anyhow!( - "Transaction {tx_hash:?} failed in Flashbots with status {:?}", - status.status, + "Transaction {tx_hash:?} unknown in Flashbots API", ))); } - FlashbotsAPITransactionStatus::Cancelled => TxStatus::Dropped, + FlashbotsAPITransactionStatus::Failed | FlashbotsAPITransactionStatus::Cancelled => { + TxStatus::Dropped + } }) } - async fn wait_until_mined(&self, tx_hash: H256) -> Result> { - Ok(PendingFlashbotsTransaction::new(tx_hash, self.provider.inner(), &self.client).await?) - } - fn address(&self) -> Address { self.provider.address() } } -impl FlashbotsTransactionSender +impl FlashbotsTransactionSender where C: JsonRpcClient + 'static, S: Signer + 'static, + FS: Signer + 'static, { - pub(crate) fn new(provider: Arc>, signer: S) -> Result { + pub(crate) fn new( + provider: Arc>, + tx_signer: S, + flashbots_signer: FS, + builders: Vec, + relay_url: String, + status_url: String, + ) -> Result { Ok(Self { - provider: SignerMiddleware::new(provider, signer), - client: FlashbotsClient::new()?, + provider: SignerMiddleware::new(provider, tx_signer), + flashbots_client: FlashbotsClient::new( + flashbots_signer, + builders, + relay_url, + status_url, + ), }) } } +#[derive(Serialize, Deserialize, Debug)] +struct Preferences { + fast: bool, + #[serde(skip_serializing_if = "Option::is_none")] + privacy: Option, + #[serde(skip_serializing_if = "Option::is_none")] + validity: Option, +} + +#[derive(Serialize, Deserialize, Debug)] +struct Privacy { + #[serde(skip_serializing_if = "Option::is_none")] + hints: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + builders: Option>, +} + +#[derive(Serialize, Deserialize, Debug)] +struct Validity { + #[serde(skip_serializing_if = "Option::is_none")] + refund: Option>, +} + +#[derive(Serialize, Deserialize, Debug)] +struct Refund { + address: String, + percent: u8, +} + +#[derive(Serialize, Debug)] +#[serde(rename_all = "camelCase")] +struct FlashbotsSendPrivateTransactionRequest { + tx: Bytes, + #[serde(skip_serializing_if = "Option::is_none")] + max_block_number: Option, + preferences: Preferences, +} + +#[derive(Deserialize, Debug)] +#[serde(rename_all = "camelCase")] +struct FlashbotsSendPrivateTransactionResponse { + result: H256, +} + +#[derive(Serialize, Debug)] +#[serde(rename_all = "camelCase")] +struct FlashbotsCancelPrivateTransactionRequest { + tx_hash: H256, +} + +#[derive(Deserialize, Debug)] +#[serde(rename_all = "camelCase")] +struct FlashbotsCancelPrivateTransactionResponse { + result: bool, +} + #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] #[allow(dead_code)] @@ -171,162 +250,115 @@ struct FlashbotsAPIResponse { } #[derive(Debug)] -struct FlashbotsClient { - client: HttpClient, +struct FlashbotsClient { + http_client: Client, + signer: S, + builders: Vec, + relay_url: String, + status_url: String, } -impl FlashbotsClient { - fn new() -> anyhow::Result { - let client = HttpClientBuilder::default().build("https://rpc.flashbots.net")?; - Ok(Self { client }) +impl FlashbotsClient { + fn new(signer: S, builders: Vec, relay_url: String, status_url: String) -> Self { + Self { + http_client: Client::new(), + signer, + builders, + relay_url, + status_url, + } } async fn status(&self, tx_hash: H256) -> anyhow::Result { - let url = format!("https://protect.flashbots.net/tx/{:?}", tx_hash); - let resp = reqwest::get(&url).await?; + let url = format!("{}{:?}", self.status_url, tx_hash); + let resp = self.http_client.get(&url).send().await?; resp.json::() .await .context("should deserialize FlashbotsAPIResponse") } - - async fn send_transaction(&self, raw_tx: Bytes) -> Result { - let response: FlashbotsResponse = self - .client - .request("eth_sendRawTransaction", (raw_tx,)) - .await?; - Ok(response.tx_hash) - } } -#[derive(Serialize)] -struct FlashbotsRequest { - transaction: String, -} +impl FlashbotsClient +where + S: Signer, +{ + async fn send_private_transaction(&self, raw_tx: Bytes) -> anyhow::Result { + let preferences = Preferences { + fast: false, + privacy: Some(Privacy { + hints: None, + builders: Some(self.builders.clone()), + }), + validity: None, + }; + + let body = json!({ + "jsonrpc": "2.0", + "method": "eth_sendPrivateTransaction", + "params": [ + FlashbotsSendPrivateTransactionRequest { + tx: raw_tx, + max_block_number: None, + preferences, + }], + "id": 1 + }); + + let response = self.sign_send_request(body).await?; + + let parsed_response = response + .json::() + .await + .map_err(|e| anyhow!("failed to deserialize Flashbots response: {:?}", e))?; -impl ToRpcParams for FlashbotsRequest { - fn to_rpc_params(self) -> std::result::Result>, jsonrpsee::core::Error> { - let s = String::from_utf8(serde_json::to_vec(&self)?).expect("Valid UTF8 format"); - RawValue::from_string(s) - .map(Some) - .map_err(jsonrpsee::core::Error::ParseError) + Ok(parsed_response.result) } -} -#[derive(Deserialize, Debug)] -#[serde(rename_all = "camelCase")] -struct FlashbotsResponse { - tx_hash: TxHash, -} - -type PinBoxFut<'a, T> = Pin> + Send + 'a>>; - -enum PendingFlashbotsTxState<'a> { - InitialDelay(Pin>), - PausedGettingTx, - GettingTx(PinBoxFut<'a, FlashbotsAPIResponse>), - PausedGettingReceipt, - GettingReceipt(PinBoxFut<'a, Option>), - Completed, -} + async fn cancel_private_transaction(&self, tx_hash: H256) -> anyhow::Result { + let body = json!({ + "jsonrpc": "2.0", + "method": "eth_cancelPrivateTransaction", + "params": [ + FlashbotsCancelPrivateTransactionRequest { tx_hash } + ], + "id": 1 + }); -#[pin_project] -struct PendingFlashbotsTransaction<'a, P> { - tx_hash: H256, - provider: &'a Provider

, - client: &'a FlashbotsClient, - state: PendingFlashbotsTxState<'a>, - interval: Box + Send + Unpin>, -} + let response = self.sign_send_request(body).await?; -impl<'a, P: JsonRpcClient> PendingFlashbotsTransaction<'a, P> { - fn new(tx_hash: H256, provider: &'a Provider

, client: &'a FlashbotsClient) -> Self { - let delay = Box::pin(Delay::new(provider.get_interval())); + let parsed_response = response + .json::() + .await + .map_err(|e| anyhow!("failed to deserialize Flashbots response: {:?}", e))?; - Self { - tx_hash, - provider, - client, - state: PendingFlashbotsTxState::InitialDelay(delay), - interval: Box::new(interval(provider.get_interval())), - } + Ok(parsed_response.result) } -} - -impl<'a, P: JsonRpcClient> Future for PendingFlashbotsTransaction<'a, P> { - type Output = anyhow::Result>; - - fn poll(self: Pin<&mut Self>, ctx: &mut TaskContext<'_>) -> Poll { - let this = self.project(); - - match this.state { - PendingFlashbotsTxState::InitialDelay(fut) => { - futures_util::ready!(fut.as_mut().poll(ctx)); - let status_fut = Box::pin(this.client.status(*this.tx_hash)); - *this.state = PendingFlashbotsTxState::GettingTx(status_fut); - ctx.waker().wake_by_ref(); - return Poll::Pending; - } - PendingFlashbotsTxState::PausedGettingTx => { - let _ready = futures_util::ready!(this.interval.poll_next_unpin(ctx)); - let status_fut = Box::pin(this.client.status(*this.tx_hash)); - *this.state = PendingFlashbotsTxState::GettingTx(status_fut); - ctx.waker().wake_by_ref(); - return Poll::Pending; - } - PendingFlashbotsTxState::GettingTx(fut) => { - let status = futures_util::ready!(fut.as_mut().poll(ctx))?; - tracing::debug!("Transaction:status {:?}:{:?}", *this.tx_hash, status.status); - match status.status { - FlashbotsAPITransactionStatus::Pending => { - *this.state = PendingFlashbotsTxState::PausedGettingTx; - ctx.waker().wake_by_ref(); - } - FlashbotsAPITransactionStatus::Included => { - let receipt_fut = Box::pin( - this.provider - .get_transaction_receipt(*this.tx_hash) - .map_err(|e| anyhow::anyhow!("failed to get receipt: {:?}", e)), - ); - *this.state = PendingFlashbotsTxState::GettingReceipt(receipt_fut); - ctx.waker().wake_by_ref(); - } - FlashbotsAPITransactionStatus::Cancelled => { - return Poll::Ready(Ok(None)); - } - FlashbotsAPITransactionStatus::Failed - | FlashbotsAPITransactionStatus::Unknown => { - return Poll::Ready(Err(anyhow::anyhow!( - "transaction failed with status {:?}", - status.status - ))); - } - } - } - PendingFlashbotsTxState::PausedGettingReceipt => { - let _ready = futures_util::ready!(this.interval.poll_next_unpin(ctx)); - let fut = Box::pin( - this.provider - .get_transaction_receipt(*this.tx_hash) - .map_err(|e| anyhow::anyhow!("failed to get receipt: {:?}", e)), - ); - *this.state = PendingFlashbotsTxState::GettingReceipt(fut); - ctx.waker().wake_by_ref(); - } - PendingFlashbotsTxState::GettingReceipt(fut) => { - if let Some(receipt) = futures_util::ready!(fut.as_mut().poll(ctx))? { - *this.state = PendingFlashbotsTxState::Completed; - return Poll::Ready(Ok(Some(receipt))); - } else { - *this.state = PendingFlashbotsTxState::PausedGettingReceipt; - ctx.waker().wake_by_ref(); - } - } - PendingFlashbotsTxState::Completed => { - panic!("polled pending flashbots transaction future after completion") - } - } - Poll::Pending + async fn sign_send_request(&self, body: Value) -> anyhow::Result { + let signature = self + .signer + .sign_message(format!( + "0x{:x}", + H256::from(utils::keccak256(body.to_string())) + )) + .await + .expect("Signature failed"); + let header_val = + HeaderValue::from_str(&format!("{:?}:0x{}", self.signer.address(), signature)) + .expect("Header contains invalid characters"); + + let mut headers = HeaderMap::new(); + headers.insert(CONTENT_TYPE, "application/json".parse().unwrap()); + headers.insert("x-flashbots-signature", header_val); + + // Send the request + self.http_client + .post(&self.relay_url) + .headers(headers) + .body(body.to_string()) + .send() + .await + .map_err(|e| anyhow!("failed to send request to Flashbots: {:?}", e)) } } diff --git a/crates/builder/src/sender/mod.rs b/crates/builder/src/sender/mod.rs index 61ec6897..41955d7d 100644 --- a/crates/builder/src/sender/mod.rs +++ b/crates/builder/src/sender/mod.rs @@ -12,31 +12,29 @@ // If not, see https://www.gnu.org/licenses/. mod bloxroute; -mod conditional; mod flashbots; mod raw; -use std::{str::FromStr, sync::Arc, time::Duration}; +use std::sync::Arc; -use anyhow::{bail, Context, Error}; +use anyhow::Context; use async_trait::async_trait; pub(crate) use bloxroute::PolygonBloxrouteTransactionSender; -pub(crate) use conditional::ConditionalTransactionSender; use enum_dispatch::enum_dispatch; use ethers::{ prelude::SignerMiddleware, providers::{JsonRpcClient, Middleware, Provider, ProviderError}, types::{ - transaction::eip2718::TypedTransaction, Address, Bytes, Chain, TransactionReceipt, H256, + transaction::eip2718::TypedTransaction, Address, Bytes, Eip1559TransactionRequest, H256, U256, }, }; -use ethers_signers::Signer; +use ethers_signers::{LocalWallet, Signer}; pub(crate) use flashbots::FlashbotsTransactionSender; #[cfg(test)] use mockall::automock; pub(crate) use raw::RawTransactionSender; use rundler_sim::ExpectedStorage; -use serde::Serialize; +use rundler_types::GasFees; #[derive(Debug)] pub(crate) struct SentTxInfo { @@ -44,6 +42,14 @@ pub(crate) struct SentTxInfo { pub(crate) tx_hash: H256, } +#[derive(Debug)] +pub(crate) struct CancelTxInfo { + pub(crate) tx_hash: H256, + // True if the transaction was soft-cancelled. Soft-cancellation is when the RPC endpoint + // accepts the cancel without an onchain transaction. + pub(crate) soft_cancelled: bool, +} + #[derive(Debug)] pub(crate) enum TxStatus { Pending, @@ -57,6 +63,15 @@ pub(crate) enum TxSenderError { /// Replacement transaction was underpriced #[error("replacement transaction underpriced")] ReplacementUnderpriced, + /// Nonce too low + #[error("nonce too low")] + NonceTooLow, + /// Conditional value not met + #[error("storage slot value condition not met")] + ConditionNotMet, + /// Soft cancellation failed + #[error("soft cancel failed")] + SoftCancelFailed, /// All other errors #[error(transparent)] Other(#[from] anyhow::Error), @@ -65,7 +80,7 @@ pub(crate) enum TxSenderError { pub(crate) type Result = std::result::Result; #[async_trait] -#[enum_dispatch(TransactionSenderEnum<_C,_S>)] +#[enum_dispatch(TransactionSenderEnum<_C,_S,_FS>)] #[cfg_attr(test, automock)] pub(crate) trait TransactionSender: Send + Sync + 'static { async fn send_transaction( @@ -74,109 +89,130 @@ pub(crate) trait TransactionSender: Send + Sync + 'static { expected_storage: &ExpectedStorage, ) -> Result; - async fn get_transaction_status(&self, tx_hash: H256) -> Result; + async fn cancel_transaction( + &self, + tx_hash: H256, + nonce: U256, + to: Address, + gas_fees: GasFees, + ) -> Result; - async fn wait_until_mined(&self, tx_hash: H256) -> Result>; + async fn get_transaction_status(&self, tx_hash: H256) -> Result; fn address(&self) -> Address; } #[enum_dispatch] -pub(crate) enum TransactionSenderEnum +pub(crate) enum TransactionSenderEnum where C: JsonRpcClient + 'static, S: Signer + 'static, + FS: Signer + 'static, { Raw(RawTransactionSender), - Conditional(ConditionalTransactionSender), - Flashbots(FlashbotsTransactionSender), + Flashbots(FlashbotsTransactionSender), PolygonBloxroute(PolygonBloxrouteTransactionSender), } /// Transaction sender types -#[derive(Debug, Clone, Copy, Serialize)] -#[serde(rename_all = "snake_case")] -pub enum TransactionSenderType { +#[derive(Debug, Clone, strum::EnumString)] +#[strum(serialize_all = "lowercase")] +pub enum TransactionSenderKind { /// Raw transaction sender Raw, - /// Conditional transaction sender - Conditional, /// Flashbots transaction sender - /// - /// Currently only supported on Eth mainnet Flashbots, /// Bloxroute transaction sender - /// - /// Currently only supported on Polygon mainnet - PolygonBloxroute, + Bloxroute, } -impl FromStr for TransactionSenderType { - type Err = Error; +/// Transaction sender types +#[derive(Debug, Clone)] +pub enum TransactionSenderArgs { + /// Raw transaction sender + Raw(RawSenderArgs), + /// Flashbots transaction sender + Flashbots(FlashbotsSenderArgs), + /// Bloxroute transaction sender + Bloxroute(BloxrouteSenderArgs), +} - fn from_str(s: &str) -> std::result::Result { - match s.to_ascii_lowercase().as_str() { - "raw" => Ok(TransactionSenderType::Raw), - "conditional" => Ok(TransactionSenderType::Conditional), - "flashbots" => Ok(TransactionSenderType::Flashbots), - "polygon_bloxroute" => Ok(TransactionSenderType::PolygonBloxroute), - _ => bail!("Invalid sender input. Must be one of either 'raw', 'conditional', 'flashbots' or 'polygon_bloxroute'"), - } - } +/// Raw sender arguments +#[derive(Debug, Clone)] +pub struct RawSenderArgs { + /// Submit URL + pub submit_url: String, + /// Use submit for status + pub use_submit_for_status: bool, + /// If the "dropped" status is supported by the status provider + pub dropped_status_supported: bool, + /// If the sender should use the conditional endpoint + pub use_conditional_rpc: bool, } -impl TransactionSenderType { - fn into_snake_case(self) -> String { - match self { - TransactionSenderType::Raw => "raw", - TransactionSenderType::Conditional => "conditional", - TransactionSenderType::Flashbots => "flashbots", - TransactionSenderType::PolygonBloxroute => "polygon_bloxroute", - } - .to_string() - } +/// Bloxroute sender arguments +#[derive(Debug, Clone)] +pub struct BloxrouteSenderArgs { + /// The auth header to use + pub header: String, +} + +/// Flashbots sender arguments +#[derive(Debug, Clone)] +pub struct FlashbotsSenderArgs { + /// Builder list + pub builders: Vec, + /// Flashbots relay URL + pub relay_url: String, + /// Flashbots protect tx status URL (NOTE: must end in "/") + pub status_url: String, + /// Auth Key + pub auth_key: String, +} +impl TransactionSenderArgs { pub(crate) fn into_sender( self, - client: Arc>, + rpc_provider: Arc>, + submit_provider: Option>>, signer: S, - chain_id: u64, - eth_poll_interval: Duration, - bloxroute_header: &Option, - ) -> std::result::Result, SenderConstructorErrors> { + ) -> std::result::Result, SenderConstructorErrors> + { let sender = match self { - Self::Raw => TransactionSenderEnum::Raw(RawTransactionSender::new(client, signer)), - Self::Conditional => TransactionSenderEnum::Conditional( - ConditionalTransactionSender::new(client, signer), - ), - Self::Flashbots => { - if chain_id != Chain::Mainnet as u64 { - return Err(SenderConstructorErrors::InvalidChainForSender( - chain_id, - self.into_snake_case(), - )); - } - TransactionSenderEnum::Flashbots(FlashbotsTransactionSender::new(client, signer)?) - } - Self::PolygonBloxroute => { - if let Some(header) = bloxroute_header { - if chain_id == Chain::Polygon as u64 { - return Err(SenderConstructorErrors::InvalidChainForSender( - chain_id, - self.into_snake_case(), - )); + Self::Raw(args) => { + let (provider, submitter) = if let Some(submit_provider) = submit_provider { + if args.use_submit_for_status { + (Arc::clone(&submit_provider), submit_provider) + } else { + (rpc_provider, submit_provider) } - - TransactionSenderEnum::PolygonBloxroute(PolygonBloxrouteTransactionSender::new( - client, - signer, - eth_poll_interval, - header, - )?) } else { - return Err(SenderConstructorErrors::BloxRouteMissingToken); - } + (Arc::clone(&rpc_provider), rpc_provider) + }; + + TransactionSenderEnum::Raw(RawTransactionSender::new( + provider, + submitter, + signer, + args.dropped_status_supported, + args.use_conditional_rpc, + )) + } + Self::Flashbots(args) => { + let flashbots_signer = args.auth_key.parse().context("should parse auth key")?; + + TransactionSenderEnum::Flashbots(FlashbotsTransactionSender::new( + rpc_provider, + signer, + flashbots_signer, + args.builders, + args.relay_url, + args.status_url, + )?) } + Self::Bloxroute(args) => TransactionSenderEnum::PolygonBloxroute( + PolygonBloxrouteTransactionSender::new(rpc_provider, signer, &args.header)?, + ), }; Ok(sender) } @@ -185,15 +221,12 @@ impl TransactionSenderType { /// Custom errors for the sender constructor #[derive(Debug, thiserror::Error)] pub(crate) enum SenderConstructorErrors { - /// Error fallback + /// Sender Error #[error(transparent)] - Internal(#[from] TxSenderError), - /// Invalid Chain ID error for sender - #[error("Chain ID: {0} cannot be used with the {1} sender")] - InvalidChainForSender(u64, String), - /// Bloxroute missing token error - #[error("Missing token for Bloxroute API")] - BloxRouteMissingToken, + Sender(#[from] TxSenderError), + /// Fallback + #[error(transparent)] + Other(#[from] anyhow::Error), } async fn fill_and_sign( @@ -219,6 +252,23 @@ where Ok((tx.rlp_signed(&signature), nonce)) } +fn create_hard_cancel_tx( + from: Address, + to: Address, + nonce: U256, + gas_fees: GasFees, +) -> TypedTransaction { + Eip1559TransactionRequest::new() + .from(from) + .to(to) + .nonce(nonce) + .gas(U256::from(30_000)) + .max_fee_per_gas(gas_fees.max_fee_per_gas) + .max_priority_fee_per_gas(gas_fees.max_priority_fee_per_gas) + .data(Bytes::new()) + .into() +} + impl From for TxSenderError { fn from(value: ProviderError) -> Self { match &value { @@ -226,6 +276,16 @@ impl From for TxSenderError { if let Some(e) = e.as_error_response() { if e.message.contains("replacement transaction underpriced") { return TxSenderError::ReplacementUnderpriced; + } else if e.message.contains("nonce too low") { + return TxSenderError::NonceTooLow; + // Arbitrum conditional sender error message + // TODO push them to use a specific error code and to return the specific slot that is not met. + } else if e + .message + .to_lowercase() + .contains("storage slot value condition not met") + { + return TxSenderError::ConditionNotMet; } } TxSenderError::Other(value.into()) diff --git a/crates/builder/src/sender/raw.rs b/crates/builder/src/sender/raw.rs index e1465603..429b4f57 100644 --- a/crates/builder/src/sender/raw.rs +++ b/crates/builder/src/sender/raw.rs @@ -17,14 +17,18 @@ use anyhow::Context; use async_trait::async_trait; use ethers::{ middleware::SignerMiddleware, - providers::{JsonRpcClient, Middleware, PendingTransaction, Provider}, - types::{transaction::eip2718::TypedTransaction, Address, TransactionReceipt, H256}, + providers::{JsonRpcClient, Middleware, Provider}, + types::{transaction::eip2718::TypedTransaction, Address, H256, U256}, }; use ethers_signers::Signer; use rundler_sim::ExpectedStorage; +use rundler_types::GasFees; +use serde_json::json; -use super::Result; -use crate::sender::{fill_and_sign, SentTxInfo, TransactionSender, TxStatus}; +use super::{CancelTxInfo, Result}; +use crate::sender::{ + create_hard_cancel_tx, fill_and_sign, SentTxInfo, TransactionSender, TxStatus, +}; #[derive(Debug)] pub(crate) struct RawTransactionSender @@ -32,10 +36,13 @@ where C: JsonRpcClient + 'static, S: Signer + 'static, { + provider: Arc>, // The `SignerMiddleware` specifically needs to wrap a `Provider`, and not // just any `Middleware`, because `.request()` is only on `Provider` and not // on `Middleware`. - provider: SignerMiddleware>, S>, + submitter: SignerMiddleware>, S>, + dropped_status_supported: bool, + use_conditional_rpc: bool, } #[async_trait] @@ -47,16 +54,49 @@ where async fn send_transaction( &self, tx: TypedTransaction, - _expected_storage: &ExpectedStorage, + expected_storage: &ExpectedStorage, ) -> Result { - let (raw_tx, nonce) = fill_and_sign(&self.provider, tx).await?; + let (raw_tx, nonce) = fill_and_sign(&self.submitter, tx).await?; + + let tx_hash = if self.use_conditional_rpc { + self.submitter + .provider() + .request( + "eth_sendRawTransactionConditional", + (raw_tx, json!({ "knownAccounts": expected_storage })), + ) + .await? + } else { + self.submitter + .provider() + .request("eth_sendRawTransaction", (raw_tx,)) + .await? + }; + + Ok(SentTxInfo { nonce, tx_hash }) + } + + async fn cancel_transaction( + &self, + _tx_hash: H256, + nonce: U256, + to: Address, + gas_fees: GasFees, + ) -> Result { + let tx = create_hard_cancel_tx(self.submitter.address(), to, nonce, gas_fees); + + let (raw_tx, _) = fill_and_sign(&self.submitter, tx).await?; let tx_hash = self - .provider + .submitter .provider() .request("eth_sendRawTransaction", (raw_tx,)) .await?; - Ok(SentTxInfo { nonce, tx_hash }) + + Ok(CancelTxInfo { + tx_hash, + soft_cancelled: false, + }) } async fn get_transaction_status(&self, tx_hash: H256) -> Result { @@ -66,36 +106,24 @@ where .await .context("provider should return transaction status")?; Ok(match tx { -// None => TxStatus::Dropped, None => { - // FIXME - workaround - println!("HC get_transaction_status for {:?} returned None, overriding", tx_hash); - TxStatus::Pending - }, - Some(tx) => - match tx.block_number { - None => { - println!("HC get_transaction_status found tx, no block"); - TxStatus::Pending - }, - Some(block_number) => { - println!("HC get_transaction_status found tx at block {:?}", block_number); - TxStatus::Mined { - block_number: block_number.as_u64(), - } - }, + if self.dropped_status_supported { + TxStatus::Dropped + } else { + TxStatus::Pending + } + } + Some(tx) => match tx.block_number { + None => TxStatus::Pending, + Some(block_number) => TxStatus::Mined { + block_number: block_number.as_u64(), }, + }, }) } - async fn wait_until_mined(&self, tx_hash: H256) -> Result> { - Ok(PendingTransaction::new(tx_hash, self.provider.inner()) - .await - .context("should wait for transaction to be mined or dropped")?) - } - fn address(&self) -> Address { - self.provider.address() + self.submitter.address() } } @@ -104,9 +132,18 @@ where C: JsonRpcClient + 'static, S: Signer + 'static, { - pub(crate) fn new(provider: Arc>, signer: S) -> Self { + pub(crate) fn new( + provider: Arc>, + submitter: Arc>, + signer: S, + dropped_status_supported: bool, + use_conditional_rpc: bool, + ) -> Self { Self { - provider: SignerMiddleware::new(provider, signer), + provider, + submitter: SignerMiddleware::new(submitter, signer), + dropped_status_supported, + use_conditional_rpc, } } } diff --git a/crates/builder/src/server/local.rs b/crates/builder/src/server/local.rs index ce620b5b..fa6758ab 100644 --- a/crates/builder/src/server/local.rs +++ b/crates/builder/src/server/local.rs @@ -11,24 +11,17 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -use std::sync::{ - atomic::{AtomicBool, Ordering}, - Arc, -}; - use async_trait::async_trait; use ethers::types::{Address, H256}; use rundler_task::server::{HealthCheck, ServerStatus}; +use rundler_types::builder::{Builder, BuilderError, BuilderResult, BundlingMode}; use tokio::{ sync::{mpsc, oneshot}, task::JoinHandle, }; use tokio_util::sync::CancellationToken; -use crate::{ - bundle_sender::{SendBundleRequest, SendBundleResult}, - server::{BuilderResult, BuilderServer, BuilderServerError, BundlingMode}, -}; +use crate::bundle_sender::{BundleSenderAction, SendBundleRequest, SendBundleResult}; /// Local builder server builder #[derive(Debug)] @@ -57,17 +50,12 @@ impl LocalBuilderBuilder { /// Run the local builder server, consuming the builder pub fn run( self, - manual_bundling_mode: Arc, - send_bundle_requesters: Vec>, + bundle_sender_actions: Vec>, entry_points: Vec

, shutdown_token: CancellationToken, ) -> JoinHandle> { - let mut runner = LocalBuilderServerRunner::new( - self.req_receiver, - manual_bundling_mode, - send_bundle_requesters, - entry_points, - ); + let mut runner = + LocalBuilderServerRunner::new(self.req_receiver, bundle_sender_actions, entry_points); tokio::spawn(async move { runner.run(shutdown_token).await }) } } @@ -80,8 +68,7 @@ pub struct LocalBuilderHandle { struct LocalBuilderServerRunner { req_receiver: mpsc::Receiver, - send_bundle_requesters: Vec>, - manual_bundling_mode: Arc, + bundle_sender_actions: Vec>, entry_points: Vec
, } @@ -103,13 +90,13 @@ impl LocalBuilderHandle { } #[async_trait] -impl BuilderServer for LocalBuilderHandle { +impl Builder for LocalBuilderHandle { async fn get_supported_entry_points(&self) -> BuilderResult> { let req = ServerRequestKind::GetSupportedEntryPoints; let resp = self.send(req).await?; match resp { ServerResponse::GetSupportedEntryPoints { entry_points } => Ok(entry_points), - _ => Err(BuilderServerError::UnexpectedResponse), + _ => Err(BuilderError::UnexpectedResponse), } } @@ -118,7 +105,7 @@ impl BuilderServer for LocalBuilderHandle { let resp = self.send(req).await?; match resp { ServerResponse::DebugSendBundleNow { hash, block_number } => Ok((hash, block_number)), - _ => Err(BuilderServerError::UnexpectedResponse), + _ => Err(BuilderError::UnexpectedResponse), } } @@ -127,7 +114,7 @@ impl BuilderServer for LocalBuilderHandle { let resp = self.send(req).await?; match resp { ServerResponse::DebugSetBundlingMode => Ok(()), - _ => Err(BuilderServerError::UnexpectedResponse), + _ => Err(BuilderError::UnexpectedResponse), } } } @@ -150,14 +137,12 @@ impl HealthCheck for LocalBuilderHandle { impl LocalBuilderServerRunner { fn new( req_receiver: mpsc::Receiver, - manual_bundling_mode: Arc, - send_bundle_requesters: Vec>, + bundle_sender_actions: Vec>, entry_points: Vec
, ) -> Self { Self { req_receiver, - manual_bundling_mode, - send_bundle_requesters, + bundle_sender_actions, entry_points, } } @@ -177,16 +162,14 @@ impl LocalBuilderServerRunner { }) }, ServerRequestKind::DebugSendBundleNow => { - if !self.manual_bundling_mode.load(Ordering::Relaxed) { - break 'a Err(anyhow::anyhow!("bundling mode is not manual").into()) - } else if self.send_bundle_requesters.len() != 1 { + if self.bundle_sender_actions.len() != 1 { break 'a Err(anyhow::anyhow!("more than 1 bundle builder not supported in debug mode").into()) } let (tx, rx) = oneshot::channel(); - match self.send_bundle_requesters[0].send(SendBundleRequest{ + match self.bundle_sender_actions[0].send(BundleSenderAction::SendBundle(SendBundleRequest{ responder: tx - }).await { + })).await { Ok(()) => {}, Err(e) => break 'a Err(anyhow::anyhow!("failed to send send bundle request: {}", e.to_string()).into()) } @@ -203,15 +186,20 @@ impl LocalBuilderServerRunner { SendBundleResult::NoOperationsInitially => { Err(anyhow::anyhow!("no ops to send").into()) }, - SendBundleResult::NoOperationsAfterFeeIncreases { .. } => { - Err(anyhow::anyhow!("bundle initially had operations, but after increasing gas fees it was empty").into()) - }, SendBundleResult::StalledAtMaxFeeIncreases => Err(anyhow::anyhow!("stalled at max fee increases").into()), SendBundleResult::Error(e) => Err(anyhow::anyhow!("send bundle error: {e:?}").into()), } }, ServerRequestKind::DebugSetBundlingMode { mode } => { - self.manual_bundling_mode.store(mode == BundlingMode::Manual, Ordering::Relaxed); + if self.bundle_sender_actions.len() != 1 { + break 'a Err(anyhow::anyhow!("more than 1 bundle builder not supported in debug mode").into()) + } + + match self.bundle_sender_actions[0].send(BundleSenderAction::ChangeMode(mode)).await { + Ok(()) => {}, + Err(e) => break 'a Err(anyhow::anyhow!("failed to change bundler mode: {}", e.to_string()).into()) + } + Ok(ServerResponse::DebugSetBundlingMode) }, } diff --git a/crates/builder/src/server/mod.rs b/crates/builder/src/server/mod.rs index 4193e3ad..adf8664b 100644 --- a/crates/builder/src/server/mod.rs +++ b/crates/builder/src/server/mod.rs @@ -12,59 +12,8 @@ // If not, see https://www.gnu.org/licenses/. mod local; -mod remote; - -use async_trait::async_trait; -use ethers::types::{Address, H256}; pub use local::{LocalBuilderBuilder, LocalBuilderHandle}; -#[cfg(feature = "test-utils")] -use mockall::automock; -use parse_display::Display; + +mod remote; pub(crate) use remote::spawn_remote_builder_server; pub use remote::RemoteBuilderClient; -use serde::{Deserialize, Serialize}; - -/// Builder server errors -#[derive(Debug, thiserror::Error)] -pub enum BuilderServerError { - /// Builder returned an unexpected response type for the given request - #[error("Unexpected response from BuilderServer")] - UnexpectedResponse, - /// Internal errors - #[error(transparent)] - Other(#[from] anyhow::Error), -} - -/// Builder server result -pub type BuilderResult = std::result::Result; - -/// Builder server -#[cfg_attr(feature = "test-utils", automock)] -#[async_trait] -pub trait BuilderServer: Send + Sync + 'static { - /// Get the supported entry points of this builder - async fn get_supported_entry_points(&self) -> BuilderResult>; - - /// Trigger the builder to send a bundle now, used for debugging. - /// - /// Bundling mode must be set to `Manual`, or this will error - async fn debug_send_bundle_now(&self) -> BuilderResult<(H256, u64)>; - - /// Set the bundling mode - async fn debug_set_bundling_mode(&self, mode: BundlingMode) -> BuilderResult<()>; -} - -/// Builder bundling mode -#[derive(Display, Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize)] -#[display(style = "lowercase")] -#[serde(rename_all = "lowercase")] -pub enum BundlingMode { - /// Manual bundling mode for debugging. - /// - /// Bundles will only be sent when `debug_send_bundle_now` is called. - Manual, - /// Auto bundling mode for normal operation. - /// - /// Bundles will be sent automatically. - Auto, -} diff --git a/crates/builder/src/server/remote/client.rs b/crates/builder/src/server/remote/client.rs index 34d23e37..494ae0a7 100644 --- a/crates/builder/src/server/remote/client.rs +++ b/crates/builder/src/server/remote/client.rs @@ -18,6 +18,7 @@ use rundler_task::{ grpc::protos::{from_bytes, ConversionError}, server::{HealthCheck, ServerStatus}, }; +use rundler_types::builder::{Builder, BuilderError, BuilderResult, BundlingMode}; use tonic::{ async_trait, transport::{Channel, Uri}, @@ -32,7 +33,6 @@ use super::protos::{ debug_set_bundling_mode_response, BundlingMode as ProtoBundlingMode, DebugSendBundleNowRequest, DebugSetBundlingModeRequest, GetSupportedEntryPointsRequest, }; -use crate::server::{BuilderResult, BuilderServer, BuilderServerError, BundlingMode}; /// Remote builder client, used for communicating with a remote builder server #[derive(Debug, Clone)] @@ -55,18 +55,20 @@ impl RemoteBuilderClient { } #[async_trait] -impl BuilderServer for RemoteBuilderClient { +impl Builder for RemoteBuilderClient { async fn get_supported_entry_points(&self) -> BuilderResult> { Ok(self .grpc_client .clone() .get_supported_entry_points(GetSupportedEntryPointsRequest {}) - .await? + .await + .map_err(anyhow::Error::from)? .into_inner() .entry_points .into_iter() .map(|ep| from_bytes(ep.as_slice())) - .collect::>()?) + .collect::>() + .map_err(anyhow::Error::from)?) } async fn debug_send_bundle_now(&self) -> BuilderResult<(H256, u64)> { @@ -74,7 +76,8 @@ impl BuilderServer for RemoteBuilderClient { .grpc_client .clone() .debug_send_bundle_now(DebugSendBundleNowRequest {}) - .await? + .await + .map_err(anyhow::Error::from)? .into_inner() .result; @@ -83,7 +86,7 @@ impl BuilderServer for RemoteBuilderClient { Ok((H256::from_slice(&s.transaction_hash), s.block_number)) } Some(debug_send_bundle_now_response::Result::Failure(f)) => Err(f.try_into()?), - None => Err(BuilderServerError::Other(anyhow::anyhow!( + None => Err(BuilderError::Other(anyhow::anyhow!( "should have received result from builder" )))?, } @@ -96,14 +99,15 @@ impl BuilderServer for RemoteBuilderClient { .debug_set_bundling_mode(DebugSetBundlingModeRequest { mode: ProtoBundlingMode::from(mode) as i32, }) - .await? + .await + .map_err(anyhow::Error::from)? .into_inner() .result; match res { Some(debug_set_bundling_mode_response::Result::Success(_)) => Ok(()), Some(debug_set_bundling_mode_response::Result::Failure(f)) => Err(f.try_into()?), - None => Err(BuilderServerError::Other(anyhow::anyhow!( + None => Err(BuilderError::Other(anyhow::anyhow!( "should have received result from builder" )))?, } diff --git a/crates/builder/src/server/remote/error.rs b/crates/builder/src/server/remote/error.rs index 0b49ea6a..d2264fcb 100644 --- a/crates/builder/src/server/remote/error.rs +++ b/crates/builder/src/server/remote/error.rs @@ -11,45 +11,30 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -use rundler_task::grpc::protos::ConversionError; +use rundler_types::builder::BuilderError; use super::protos::{builder_error, BuilderError as ProtoBuilderError}; -use crate::server::BuilderServerError; -impl From for BuilderServerError { - fn from(value: tonic::Status) -> Self { - BuilderServerError::Other(anyhow::anyhow!(value.to_string())) - } -} - -impl From for BuilderServerError { - fn from(value: ConversionError) -> Self { - BuilderServerError::Other(anyhow::anyhow!(value.to_string())) - } -} - -impl TryFrom for BuilderServerError { +impl TryFrom for BuilderError { type Error = anyhow::Error; fn try_from(value: ProtoBuilderError) -> Result { match value.error { - Some(builder_error::Error::Internal(e)) => { - Ok(BuilderServerError::Other(anyhow::anyhow!(e))) - } - None => Ok(BuilderServerError::Other(anyhow::anyhow!("Unknown error"))), + Some(builder_error::Error::Internal(e)) => Ok(BuilderError::Other(anyhow::anyhow!(e))), + None => Ok(BuilderError::Other(anyhow::anyhow!("Unknown error"))), } } } -impl From for ProtoBuilderError { - fn from(value: BuilderServerError) -> Self { +impl From for ProtoBuilderError { + fn from(value: BuilderError) -> Self { match value { - BuilderServerError::UnexpectedResponse => ProtoBuilderError { + BuilderError::UnexpectedResponse => ProtoBuilderError { error: Some(builder_error::Error::Internal( "Unexpected response".to_string(), )), }, - BuilderServerError::Other(e) => ProtoBuilderError { + BuilderError::Other(e) => ProtoBuilderError { error: Some(builder_error::Error::Internal(e.to_string())), }, } diff --git a/crates/builder/src/server/remote/protos.rs b/crates/builder/src/server/remote/protos.rs index e3f4ae66..35c100cb 100644 --- a/crates/builder/src/server/remote/protos.rs +++ b/crates/builder/src/server/remote/protos.rs @@ -12,8 +12,7 @@ // If not, see https://www.gnu.org/licenses/. use rundler_task::grpc::protos::ConversionError; - -use crate::server::BundlingMode as RpcBundlingMode; +use rundler_types::builder::BundlingMode as RpcBundlingMode; tonic::include_proto!("builder"); @@ -40,15 +39,3 @@ impl TryFrom for RpcBundlingMode { } } } - -impl TryFrom for RpcBundlingMode { - type Error = ConversionError; - - fn try_from(status: i32) -> Result { - match status { - x if x == BundlingMode::Auto as i32 => Ok(Self::Auto), - x if x == BundlingMode::Manual as i32 => Ok(Self::Manual), - _ => Err(ConversionError::InvalidEnumValue(status)), - } - } -} diff --git a/crates/builder/src/server/remote/server.rs b/crates/builder/src/server/remote/server.rs index 6c25be14..a9425974 100644 --- a/crates/builder/src/server/remote/server.rs +++ b/crates/builder/src/server/remote/server.rs @@ -13,20 +13,19 @@ use std::net::SocketAddr; +use rundler_types::builder::Builder; use tokio::task::JoinHandle; use tokio_util::sync::CancellationToken; use tonic::{async_trait, transport::Server, Request, Response, Status}; use super::protos::{ builder_server::{Builder as GrpcBuilder, BuilderServer as GrpcBuilderServer}, - debug_send_bundle_now_response, debug_set_bundling_mode_response, DebugSendBundleNowRequest, - DebugSendBundleNowResponse, DebugSetBundlingModeRequest, DebugSetBundlingModeResponse, - DebugSetBundlingModeSuccess, GetSupportedEntryPointsRequest, GetSupportedEntryPointsResponse, - BUILDER_FILE_DESCRIPTOR_SET, -}; -use crate::server::{ - local::LocalBuilderHandle, remote::protos::DebugSendBundleNowSuccess, BuilderServer, + debug_send_bundle_now_response, debug_set_bundling_mode_response, BundlingMode, + DebugSendBundleNowRequest, DebugSendBundleNowResponse, DebugSetBundlingModeRequest, + DebugSetBundlingModeResponse, DebugSetBundlingModeSuccess, GetSupportedEntryPointsRequest, + GetSupportedEntryPointsResponse, BUILDER_FILE_DESCRIPTOR_SET, }; +use crate::server::{local::LocalBuilderHandle, remote::protos::DebugSendBundleNowSuccess}; /// Spawn a remote builder server pub(crate) async fn spawn_remote_builder_server( @@ -122,13 +121,14 @@ impl GrpcBuilder for GrpcBuilderServerImpl { &self, request: Request, ) -> tonic::Result> { - let resp = match self - .local_builder - .debug_set_bundling_mode(request.into_inner().mode.try_into().map_err(|e| { - Status::internal(format!("Failed to convert from proto reputation {e}")) - })?) - .await - { + let mode = BundlingMode::try_from(request.into_inner().mode).map_err(|e| { + Status::internal(format!("Failed to convert from proto reputation {e}")) + })?; + let mode = mode.try_into().map_err(|e| { + Status::internal(format!("Failed to convert from proto reputation {e}")) + })?; + + let resp = match self.local_builder.debug_set_bundling_mode(mode).await { Ok(()) => DebugSetBundlingModeResponse { result: Some(debug_set_bundling_mode_response::Result::Success( DebugSetBundlingModeSuccess {}, diff --git a/crates/builder/src/signer/mod.rs b/crates/builder/src/signer/mod.rs index 3a8c608c..ad24babe 100644 --- a/crates/builder/src/signer/mod.rs +++ b/crates/builder/src/signer/mod.rs @@ -63,7 +63,8 @@ pub(crate) async fn monitor_account_balance(addr: Address, provid // converting to u64. This keeps six decimal places. let eth_balance = (balance / 10_u64.pow(12)).as_u64() as f64 / 1e6; tracing::info!("account {addr:?} balance: {}", eth_balance); - metrics::gauge!("bundle_builder_account_balance", eth_balance, "addr" => format!("{addr:?}")); + metrics::gauge!("bundle_builder_account_balance", "addr" => format!("{addr:?}")) + .set(eth_balance); } Err(err) => { tracing::error!("Get account {addr:?} balance error {err:?}"); diff --git a/crates/builder/src/task.rs b/crates/builder/src/task.rs index 2228e318..92d02772 100644 --- a/crates/builder/src/task.rs +++ b/crates/builder/src/task.rs @@ -11,29 +11,28 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -use std::{ - collections::HashMap, - net::SocketAddr, - sync::{atomic::AtomicBool, Arc}, - time::Duration, -}; +use std::{collections::HashMap, net::SocketAddr, sync::Arc, time::Duration}; use anyhow::{bail, Context}; use async_trait::async_trait; use ethers::{ - providers::{JsonRpcClient, Provider}, + providers::{JsonRpcClient, Provider as EthersProvider}, types::{Address, H256}, }; use ethers_signers::Signer; use futures::future; use futures_util::TryFutureExt; -use rundler_pool::PoolServer; +use rundler_provider::{EntryPointProvider, EthersEntryPointV0_6, EthersEntryPointV0_7}; use rundler_sim::{ - MempoolConfig, PriorityFeeMode, SimulateValidationTracerImpl, SimulationSettings, SimulatorImpl, + simulation::{self, UnsafeSimulator}, + MempoolConfig, PriorityFeeMode, SimulationSettings, Simulator, }; use rundler_task::Task; -use rundler_types::contracts::i_entry_point::IEntryPoint; -use rundler_utils::{emit::WithEntryPoint, eth, handle}; +use rundler_types::{ + chain::ChainSpec, pool::Pool, v0_6, v0_7, EntryPointVersion, UserOperation, + UserOperationVariant, +}; +use rundler_utils::{emit::WithEntryPoint, handle}; use rusoto_core::Region; use tokio::{ sync::{broadcast, mpsc}, @@ -45,9 +44,9 @@ use tracing::info; use crate::{ bundle_proposer::{self, BundleProposerImpl}, - bundle_sender::{self, BundleSender, BundleSenderImpl, SendBundleRequest}, + bundle_sender::{self, BundleSender, BundleSenderAction, BundleSenderImpl}, emit::BuilderEvent, - sender::TransactionSenderType, + sender::TransactionSenderArgs, server::{spawn_remote_builder_server, LocalBuilderBuilder}, signer::{BundlerSigner, KmsSigner, LocalSigner}, transaction_tracker::{self, TransactionTrackerImpl}, @@ -58,13 +57,15 @@ use rundler_types::hybrid_compute; /// Builder task arguments #[derive(Debug)] pub struct Args { + /// Chain spec + pub chain_spec: ChainSpec, /// Full node RPC url pub rpc_url: String, - /// Address of the entry point contract this builder targets - pub entry_point_address: Address, + /// True if using unsafe mode + pub unsafe_mode: bool, /// Private key to use for signing transactions - /// If not provided, AWS KMS will be used - pub private_key: Option, + /// If empty, AWS KMS will be used + pub private_keys: Vec, /// AWS KMS key ids to use for signing transactions /// Only used if private_key is not provided pub aws_kms_key_ids: Vec, @@ -74,44 +75,45 @@ pub struct Args { pub redis_uri: String, /// Redis lease TTL in milliseconds pub redis_lock_ttl_millis: u64, - /// Chain ID - pub chain_id: u64, /// Maximum bundle size in number of operations pub max_bundle_size: u64, /// Maximum bundle size in gas limit pub max_bundle_gas: u64, - /// URL to submit bundles too - pub submit_url: String, - /// Percentage to add to the the network priority fee for the bundle priority fee + /// Percentage to add to the network priority fee for the bundle priority fee pub bundle_priority_fee_overhead_percent: u64, /// Priority fee mode to use for operation priority fee minimums pub priority_fee_mode: PriorityFeeMode, /// Sender to be used by the builder - pub sender_type: TransactionSenderType, - /// RPC node poll interval - pub eth_poll_interval: Duration, + pub sender_args: TransactionSenderArgs, /// Operation simulation settings pub sim_settings: SimulationSettings, - /// Alt-mempool configs - pub mempool_configs: HashMap, /// Maximum number of blocks to wait for a transaction to be mined pub max_blocks_to_wait_for_mine: u64, /// Percentage to increase the fees by when replacing a bundle transaction pub replacement_fee_percent_increase: u64, - /// Maximum number of times to increase the fees when replacing a bundle transaction - pub max_fee_increases: u64, + /// Maximum number of times to increase the fee when cancelling a transaction + pub max_cancellation_fee_increases: u64, + /// Maximum amount of blocks to spend in a replacement underpriced state before moving to cancel + pub max_replacement_underpriced_blocks: u64, /// Address to bind the remote builder server to, if any. If none, no server is starter. pub remote_address: Option, - /// Optional Bloxroute auth header - /// - /// This is only used for Polygon. - /// - /// Checked ~after~ checking for conditional sender or Flashbots sender. - pub bloxroute_auth_header: Option, + /// Entry points to start builders for + pub entry_points: Vec, +} + +/// Builder settings for an entrypoint +#[derive(Debug)] +pub struct EntryPointBuilderSettings { + /// Entry point address + pub address: Address, + /// Entry point version + pub version: EntryPointVersion, /// Number of bundle builders to start pub num_bundle_builders: u64, /// Index offset for bundle builders pub bundle_builder_index_offset: u64, + /// Mempool configs + pub mempool_configs: HashMap, } /// Builder task @@ -126,27 +128,67 @@ pub struct BuilderTask

{ #[async_trait] impl

Task for BuilderTask

where - P: PoolServer + Clone, + P: Pool + Clone, { async fn run(mut self: Box, shutdown_token: CancellationToken) -> anyhow::Result<()> { - info!("Mempool config: {:?}", self.args.mempool_configs); + let provider = rundler_provider::new_provider(&self.args.rpc_url, None)?; + let submit_provider = if let TransactionSenderArgs::Raw(args) = &self.args.sender_args { + Some(rundler_provider::new_provider(&args.submit_url, None)?) + } else { + None + }; - let provider = eth::new_provider(&self.args.rpc_url, Some(self.args.eth_poll_interval))?; - let manual_bundling_mode = Arc::new(AtomicBool::new(false)); + let ep_v0_6 = EthersEntryPointV0_6::new( + self.args.chain_spec.entry_point_address_v0_6, + &self.args.chain_spec, + self.args.sim_settings.max_simulate_handle_ops_gas, + Arc::clone(&provider), + ); + let ep_v0_7 = EthersEntryPointV0_7::new( + self.args.chain_spec.entry_point_address_v0_7, + &self.args.chain_spec, + self.args.sim_settings.max_simulate_handle_ops_gas, + Arc::clone(&provider), + ); let mut sender_handles = vec![]; - let mut send_bundle_txs = vec![]; - for i in 0..self.args.num_bundle_builders { - let (spawn_guard, send_bundle_tx) = self - .create_bundle_builder( - i + self.args.bundle_builder_index_offset, - Arc::clone(&manual_bundling_mode), - Arc::clone(&provider), - ) - .await?; - sender_handles.push(spawn_guard); - send_bundle_txs.push(send_bundle_tx); + let mut bundle_sender_actions = vec![]; + let mut pk_iter = self.args.private_keys.clone().into_iter(); + + for ep in &self.args.entry_points { + match ep.version { + EntryPointVersion::V0_6 => { + let (handles, actions) = self + .create_builders_v0_6( + ep, + Arc::clone(&provider), + submit_provider.clone(), + ep_v0_6.clone(), + &mut pk_iter, + ) + .await?; + sender_handles.extend(handles); + bundle_sender_actions.extend(actions); + } + EntryPointVersion::V0_7 => { + let (handles, actions) = self + .create_builders_v0_7( + ep, + Arc::clone(&provider), + submit_provider.clone(), + ep_v0_7.clone(), + &mut pk_iter, + ) + .await?; + sender_handles.extend(handles); + bundle_sender_actions.extend(actions); + } + EntryPointVersion::Unspecified => { + panic!("Unspecified entry point version") + } + } } + // flatten the senders handles to one handle, short-circuit on errors let sender_handle = tokio::spawn( future::try_join_all(sender_handles) @@ -156,9 +198,8 @@ where let builder_handle = self.builder_builder.get_handle(); let builder_runnder_handle = self.builder_builder.run( - manual_bundling_mode, - send_bundle_txs, - vec![self.args.entry_point_address], + bundle_sender_actions, + vec![self.args.chain_spec.entry_point_address_v0_6], shutdown_token.clone(), ); @@ -166,7 +207,7 @@ where Some(addr) => { spawn_remote_builder_server( addr, - self.args.chain_id, + self.args.chain_spec.id, builder_handle, shutdown_token, ) @@ -196,7 +237,7 @@ where impl

BuilderTask

where - P: PoolServer + Clone, + P: Pool + Clone, { /// Create a new builder task pub fn new( @@ -218,34 +259,161 @@ where Box::new(self) } - async fn create_bundle_builder( + async fn create_builders_v0_6( + &self, + ep: &EntryPointBuilderSettings, + provider: Arc>, + submit_provider: Option>>, + ep_v0_6: E, + pk_iter: &mut I, + ) -> anyhow::Result<( + Vec>>, + Vec>, + )> + where + C: JsonRpcClient + 'static, + E: EntryPointProvider + Clone, + I: Iterator, + { + info!("Mempool config for ep v0.6: {:?}", ep.mempool_configs); + let mut sender_handles = vec![]; + let mut bundle_sender_actions = vec![]; + for i in 0..ep.num_bundle_builders { + let (spawn_guard, bundle_sender_action) = if self.args.unsafe_mode { + self.create_bundle_builder( + i + ep.bundle_builder_index_offset, + Arc::clone(&provider), + submit_provider.clone(), + ep_v0_6.clone(), + UnsafeSimulator::new( + Arc::clone(&provider), + ep_v0_6.clone(), + self.args.sim_settings.clone(), + ), + pk_iter, + ) + .await? + } else { + self.create_bundle_builder( + i + ep.bundle_builder_index_offset, + Arc::clone(&provider), + submit_provider.clone(), + ep_v0_6.clone(), + simulation::new_v0_6_simulator( + Arc::clone(&provider), + ep_v0_6.clone(), + self.args.sim_settings.clone(), + ep.mempool_configs.clone(), + ), + pk_iter, + ) + .await? + }; + sender_handles.push(spawn_guard); + bundle_sender_actions.push(bundle_sender_action); + } + Ok((sender_handles, bundle_sender_actions)) + } + + async fn create_builders_v0_7( + &self, + ep: &EntryPointBuilderSettings, + provider: Arc>, + submit_provider: Option>>, + ep_v0_7: E, + pk_iter: &mut I, + ) -> anyhow::Result<( + Vec>>, + Vec>, + )> + where + C: JsonRpcClient + 'static, + E: EntryPointProvider + Clone, + I: Iterator, + { + info!("Mempool config for ep v0.7: {:?}", ep.mempool_configs); + let mut sender_handles = vec![]; + let mut bundle_sender_actions = vec![]; + for i in 0..ep.num_bundle_builders { + let (spawn_guard, bundle_sender_action) = if self.args.unsafe_mode { + self.create_bundle_builder( + i + ep.bundle_builder_index_offset, + Arc::clone(&provider), + submit_provider.clone(), + ep_v0_7.clone(), + UnsafeSimulator::new( + Arc::clone(&provider), + ep_v0_7.clone(), + self.args.sim_settings.clone(), + ), + pk_iter, + ) + .await? + } else { + self.create_bundle_builder( + i + ep.bundle_builder_index_offset, + Arc::clone(&provider), + submit_provider.clone(), + ep_v0_7.clone(), + simulation::new_v0_7_simulator( + Arc::clone(&provider), + ep_v0_7.clone(), + self.args.sim_settings.clone(), + ep.mempool_configs.clone(), + ), + pk_iter, + ) + .await? + }; + sender_handles.push(spawn_guard); + bundle_sender_actions.push(bundle_sender_action); + } + Ok((sender_handles, bundle_sender_actions)) + } + + async fn create_bundle_builder( &self, index: u64, - manual_bundling_mode: Arc, - provider: Arc>, + provider: Arc>, + submit_provider: Option>>, + entry_point: E, + simulator: S, + pk_iter: &mut I, ) -> anyhow::Result<( JoinHandle>, - mpsc::Sender, - )> { + mpsc::Sender, + )> + where + UO: UserOperation + From, + UserOperationVariant: AsRef, + E: EntryPointProvider + Clone, + S: Simulator, + C: JsonRpcClient + 'static, + I: Iterator, + { let (send_bundle_tx, send_bundle_rx) = mpsc::channel(1); - let signer = if let Some(pk) = &self.args.private_key { + let signer = if let Some(pk) = pk_iter.next() { info!("Using local signer"); BundlerSigner::Local( - LocalSigner::connect(Arc::clone(&provider), self.args.chain_id, pk.to_owned()) - .await?, + LocalSigner::connect( + Arc::clone(&provider), + self.args.chain_spec.id, + pk.to_owned(), + ) + .await?, ) } else { info!("Using AWS KMS signer"); let signer = time::timeout( - // timeout must be << than the lock TTL to avoid a + // timeout must be < than the lock TTL to avoid a // bug in the redis lock implementation that panics if connection // takes longer than the TTL. Generally the TLL should be on the order of 10s of seconds // so this should give ample time for the connection to establish. - Duration::from_millis(self.args.redis_lock_ttl_millis / 10), + Duration::from_millis(self.args.redis_lock_ttl_millis / 4), KmsSigner::connect( Arc::clone(&provider), - self.args.chain_id, + self.args.chain_spec.id, self.args.aws_kms_region.clone(), self.args.aws_kms_key_ids.clone(), self.args.redis_uri.clone(), @@ -260,9 +428,9 @@ where ret }; let beneficiary = signer.address(); - hybrid_compute::set_signer(signer.address()); + hybrid_compute::set_signer(signer.address()); let proposer_settings = bundle_proposer::Settings { - chain_id: self.args.chain_id, + chain_spec: self.args.chain_spec.clone(), max_bundle_size: self.args.max_bundle_size, max_bundle_gas: self.args.max_bundle_gas, beneficiary, @@ -270,31 +438,13 @@ where bundle_priority_fee_overhead_percent: self.args.bundle_priority_fee_overhead_percent, }; - let entry_point = IEntryPoint::new(self.args.entry_point_address, Arc::clone(&provider)); - let simulate_validation_tracer = - SimulateValidationTracerImpl::new(Arc::clone(&provider), entry_point.clone()); - let simulator = SimulatorImpl::new( + let transaction_sender = self.args.sender_args.clone().into_sender( Arc::clone(&provider), - entry_point.address(), - simulate_validation_tracer, - self.args.sim_settings, - self.args.mempool_configs.clone(), - ); - - let submit_provider = - eth::new_provider(&self.args.submit_url, Some(self.args.eth_poll_interval))?; - - let transaction_sender = self.args.sender_type.into_sender( submit_provider, signer, - self.args.chain_id, - self.args.eth_poll_interval, - &self.args.bloxroute_auth_header, )?; let tracker_settings = transaction_tracker::Settings { - poll_interval: self.args.eth_poll_interval, - max_blocks_to_wait_for_mine: self.args.max_blocks_to_wait_for_mine, replacement_fee_percent_increase: self.args.replacement_fee_percent_increase, }; @@ -302,12 +452,14 @@ where Arc::clone(&provider), transaction_sender, tracker_settings, + index, ) .await?; let builder_settings = bundle_sender::Settings { - replacement_fee_percent_increase: self.args.replacement_fee_percent_increase, - max_fee_increases: self.args.max_fee_increases, + max_replacement_underpriced_blocks: self.args.max_replacement_underpriced_blocks, + max_cancellation_fee_increases: self.args.max_cancellation_fee_increases, + max_blocks_to_wait_for_mine: self.args.max_blocks_to_wait_for_mine, }; let proposer = BundleProposerImpl::new( @@ -321,9 +473,8 @@ where ); let builder = BundleSenderImpl::new( index, - manual_bundling_mode.clone(), send_bundle_rx, - self.args.chain_id, + self.args.chain_spec.clone(), beneficiary, proposer, entry_point, diff --git a/crates/builder/src/transaction_tracker.rs b/crates/builder/src/transaction_tracker.rs index b123ad8f..16b402b6 100644 --- a/crates/builder/src/transaction_tracker.rs +++ b/crates/builder/src/transaction_tracker.rs @@ -11,16 +11,17 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -use std::{sync::Arc, time::Duration}; +use std::sync::Arc; use anyhow::{bail, Context}; use async_trait::async_trait; -use ethers::types::{transaction::eip2718::TypedTransaction, H256, U256}; +use ethers::types::{transaction::eip2718::TypedTransaction, Address, H256, U256}; +#[cfg(test)] +use mockall::automock; use rundler_provider::Provider; use rundler_sim::ExpectedStorage; use rundler_types::GasFees; -use tokio::time; -use tracing::{info, warn}; +use tracing::{debug, info, warn}; use crate::sender::{TransactionSender, TxSenderError, TxStatus}; @@ -34,39 +35,67 @@ use crate::sender::{TransactionSender, TxSenderError, TxStatus}; /// succeeded (potentially not the most recent one) or whether circumstances /// have changed so that it is worth making another attempt. #[async_trait] +#[cfg_attr(test, automock)] pub(crate) trait TransactionTracker: Send + Sync + 'static { - fn get_nonce_and_required_fees(&self) -> anyhow::Result<(U256, Option)>; + /// Returns the current nonce and the required fees for the next transaction. + fn get_nonce_and_required_fees(&self) -> TransactionTrackerResult<(U256, Option)>; /// Sends the provided transaction and typically returns its transaction /// hash, but if the transaction failed to send because another transaction /// with the same nonce mined first, then returns information about that /// transaction instead. async fn send_transaction( - &self, + &mut self, tx: TypedTransaction, expected_stroage: &ExpectedStorage, - ) -> anyhow::Result; + ) -> TransactionTrackerResult; - /// Waits until one of the following occurs: + /// Cancel the abandoned transaction in the tracker. /// + /// Returns: An option containing the hash of the transaction that was used to cancel. If the option + /// is empty, then either no transaction was cancelled or the cancellation was a "soft-cancel." + async fn cancel_transaction( + &mut self, + to: Address, + estimated_fees: GasFees, + ) -> TransactionTrackerResult>; + + /// Checks: /// 1. One of our transactions mines (not necessarily the one just sent). /// 2. All our send transactions have dropped. /// 3. Our nonce has changed but none of our transactions mined. This means /// that a transaction from our account other than one of the ones we are /// tracking has mined. This should not normally happen. /// 4. Several new blocks have passed. - async fn wait_for_update(&self) -> anyhow::Result; + async fn check_for_update(&mut self) -> TransactionTrackerResult>; - /// Like `wait_for_update`, except it returns immediately if there is no - /// update rather than waiting for several new blocks. - async fn check_for_update_now(&self) -> anyhow::Result>; + /// Resets the tracker to its initial state + async fn reset(&mut self); + + /// Abandons the current transaction. + /// The current transaction will still be tracked, but will no longer be considered during fee estimation + fn abandon(&mut self); + + /// Un-abandons the current transaction + fn unabandon(&mut self); } -pub(crate) enum SendResult { - TxHash(H256), - TrackerUpdate(TrackerUpdate), +/// Errors that can occur while using a `TransactionTracker`. +#[derive(Debug, thiserror::Error)] +pub(crate) enum TransactionTrackerError { + #[error("nonce too low")] + NonceTooLow, + #[error("replacement transaction underpriced")] + ReplacementUnderpriced, + #[error("storage slot value condition not met")] + ConditionNotMet, + /// All other errors + #[error(transparent)] + Other(#[from] anyhow::Error), } +pub(crate) type TransactionTrackerResult = std::result::Result; + #[derive(Debug)] #[allow(dead_code)] pub(crate) enum TrackerUpdate { @@ -77,27 +106,18 @@ pub(crate) enum TrackerUpdate { attempt_number: u64, gas_limit: Option, gas_used: Option, + gas_price: Option, }, - StillPendingAfterWait, LatestTxDropped { nonce: U256, }, NonceUsedForOtherTx { nonce: U256, }, - ReplacementUnderpriced, } #[derive(Debug)] -pub(crate) struct TransactionTrackerImpl( - tokio::sync::Mutex>, -) -where - P: Provider, - T: TransactionSender; - -#[derive(Debug)] -struct TransactionTrackerImplInner +pub(crate) struct TransactionTrackerImpl where P: Provider, T: TransactionSender, @@ -105,16 +125,15 @@ where provider: Arc

, sender: T, settings: Settings, + builder_index: u64, nonce: U256, transactions: Vec, - has_dropped: bool, + has_abandoned: bool, attempt_count: u64, } #[derive(Clone, Copy, Debug)] pub(crate) struct Settings { - pub(crate) poll_interval: Duration, - pub(crate) max_blocks_to_wait_for_mine: u64, pub(crate) replacement_fee_percent_increase: u64, } @@ -125,33 +144,6 @@ struct PendingTransaction { attempt_number: u64, } -#[async_trait] -impl TransactionTracker for TransactionTrackerImpl -where - P: Provider, - T: TransactionSender, -{ - fn get_nonce_and_required_fees(&self) -> anyhow::Result<(U256, Option)> { - Ok(self.inner()?.get_nonce_and_required_fees()) - } - - async fn send_transaction( - &self, - tx: TypedTransaction, - expected_storage: &ExpectedStorage, - ) -> anyhow::Result { - self.inner()?.send_transaction(tx, expected_storage).await - } - - async fn wait_for_update(&self) -> anyhow::Result { - self.inner()?.wait_for_update().await - } - - async fn check_for_update_now(&self) -> anyhow::Result> { - self.inner()?.check_for_update_now().await - } -} - impl TransactionTrackerImpl where P: Provider, @@ -161,26 +153,8 @@ where provider: Arc

, sender: T, settings: Settings, + builder_index: u64, ) -> anyhow::Result { - let inner = TransactionTrackerImplInner::new(provider, sender, settings).await?; - Ok(Self(tokio::sync::Mutex::new(inner))) - } - - fn inner( - &self, - ) -> anyhow::Result>> { - self.0 - .try_lock() - .context("tracker should not be called while waiting for a transaction") - } -} - -impl TransactionTrackerImplInner -where - P: Provider, - T: TransactionSender, -{ - async fn new(provider: Arc

, sender: T, settings: Settings) -> anyhow::Result { let nonce = provider .get_transaction_count(sender.address()) .await @@ -189,15 +163,94 @@ where provider, sender, settings, + builder_index, nonce, transactions: vec![], - has_dropped: false, + has_abandoned: false, attempt_count: 0, }) } - fn get_nonce_and_required_fees(&self) -> (U256, Option) { - let gas_fees = if self.has_dropped { + fn set_nonce_and_clear_state(&mut self, nonce: U256) { + self.nonce = nonce; + self.transactions.clear(); + self.attempt_count = 0; + self.has_abandoned = false; + self.update_metrics(); + } + + async fn get_external_nonce(&self) -> anyhow::Result { + self.provider + .get_transaction_count(self.sender.address()) + .await + .context("tracker should load current nonce from provider") + } + + fn validate_transaction(&self, tx: &TypedTransaction) -> anyhow::Result<()> { + let Some(&nonce) = tx.nonce() else { + bail!("transaction given to tracker should have nonce set"); + }; + let gas_fees = GasFees::from(tx); + let (required_nonce, required_gas_fees) = self.get_nonce_and_required_fees()?; + if nonce != required_nonce { + bail!("tried to send transaction with nonce {nonce}, but should match tracker's nonce of {required_nonce}"); + } + if let Some(required_gas_fees) = required_gas_fees { + if gas_fees.max_fee_per_gas < required_gas_fees.max_fee_per_gas + || gas_fees.max_priority_fee_per_gas < required_gas_fees.max_priority_fee_per_gas + { + bail!("new transaction's gas fees should be at least the required fees") + } + } + Ok(()) + } + + fn update_metrics(&self) { + TransactionTrackerMetrics::set_num_pending_transactions( + self.builder_index, + self.transactions.len(), + ); + TransactionTrackerMetrics::set_nonce(self.builder_index, self.nonce); + TransactionTrackerMetrics::set_attempt_count(self.builder_index, self.attempt_count); + if let Some(tx) = self.transactions.last() { + TransactionTrackerMetrics::set_current_fees(self.builder_index, Some(tx.gas_fees)); + } else { + TransactionTrackerMetrics::set_current_fees(self.builder_index, None); + } + } + + async fn get_mined_tx_gas_info( + &self, + tx_hash: H256, + ) -> anyhow::Result<(Option, Option, Option)> { + let (tx, tx_receipt) = tokio::try_join!( + self.provider.get_transaction(tx_hash), + self.provider.get_transaction_receipt(tx_hash), + )?; + println!("HC get_mined_tx_gas_info looking for hash {:?} got tx {:?} receipt {:?}", tx_hash, tx, tx_receipt); + let gas_limit = tx.map(|t| t.gas).or_else(|| { + warn!("failed to fetch transaction data for tx: {}", tx_hash); + None + }); + let (gas_used, gas_price) = match tx_receipt { + Some(r) => (r.gas_used, r.effective_gas_price), + None => { + warn!("failed to fetch transaction receipt for tx: {}", tx_hash); + (None, None) + } + }; + Ok((gas_limit, gas_used, gas_price)) + } +} + +#[async_trait] +impl TransactionTracker for TransactionTrackerImpl +where + P: Provider, + T: TransactionSender, +{ + fn get_nonce_and_required_fees(&self) -> TransactionTrackerResult<(U256, Option)> { + let gas_fees = if self.has_abandoned { None } else { self.transactions.last().map(|tx| { @@ -205,96 +258,129 @@ where .increase_by_percent(self.settings.replacement_fee_percent_increase) }) }; - (self.nonce, gas_fees) + Ok((self.nonce, gas_fees)) } async fn send_transaction( &mut self, tx: TypedTransaction, expected_storage: &ExpectedStorage, - ) -> anyhow::Result { + ) -> TransactionTrackerResult { self.validate_transaction(&tx)?; - let gas_fees = GasFees::from(&tx); println!("HC send_transaction will send tx {:?}", tx.clone()); - let send_result = self.sender.send_transaction(tx, expected_storage).await; - println!("HC send_transaction result {:?}", send_result); - let sent_tx = match send_result { - Ok(sent_tx) => sent_tx, - Err(error) => { - let tracker_update = self.handle_send_error(error).await?; - return Ok(SendResult::TrackerUpdate(tracker_update)); + let gas_fees = GasFees::from(&tx); + info!( + "Sending transaction with nonce: {:?} gas fees: {:?} gas limit: {:?}", + self.nonce, + gas_fees, + tx.gas() + ); + let sent_tx = self.sender.send_transaction(tx, expected_storage).await; + println!("HC send_transaction result {:?}", sent_tx); + + match sent_tx { + Ok(sent_tx) => { + info!( + "Sent transaction {:?} nonce: {:?}", + sent_tx.tx_hash, sent_tx.nonce + ); + self.transactions.push(PendingTransaction { + tx_hash: sent_tx.tx_hash, + gas_fees, + attempt_number: self.attempt_count, + }); + self.has_abandoned = false; + self.attempt_count += 1; + self.update_metrics(); + Ok(sent_tx.tx_hash) } + Err(e) if matches!(e, TxSenderError::ReplacementUnderpriced) => { + // Only can get into this state if there is an unknown pending transaction causing replacement + // underpriced errors, or if the last transaction was abandoned. + info!("Replacement underpriced: nonce: {:?}", self.nonce); + + // Store this transaction as pending if last is empty or if it has higher gas fees than last + // so that we can continue to increase fees. + if self.transactions.last().map_or(true, |t| { + gas_fees.max_fee_per_gas > t.gas_fees.max_fee_per_gas + && gas_fees.max_priority_fee_per_gas > t.gas_fees.max_priority_fee_per_gas + }) { + self.transactions.push(PendingTransaction { + tx_hash: H256::zero(), + gas_fees, + attempt_number: self.attempt_count, + }); + }; + + self.has_abandoned = false; + self.attempt_count += 1; + self.update_metrics(); + Err(e.into()) + } + Err(e) => Err(e.into()), + } + } + + async fn cancel_transaction( + &mut self, + to: Address, + estimated_fees: GasFees, + ) -> TransactionTrackerResult> { + let (tx_hash, gas_fees) = match self.transactions.last() { + Some(tx) => { + let increased_fees = tx + .gas_fees + .increase_by_percent(self.settings.replacement_fee_percent_increase); + let gas_fees = GasFees { + max_fee_per_gas: increased_fees + .max_fee_per_gas + .max(estimated_fees.max_fee_per_gas), + max_priority_fee_per_gas: increased_fees + .max_priority_fee_per_gas + .max(estimated_fees.max_priority_fee_per_gas), + }; + (tx.tx_hash, gas_fees) + } + None => (H256::zero(), estimated_fees), }; + + let cancel_info = self + .sender + .cancel_transaction(tx_hash, self.nonce, to, gas_fees) + .await?; + + if cancel_info.soft_cancelled { + // If the transaction was soft-cancelled. Reset internal state. + self.reset().await; + return Ok(None); + } + info!( - "Sent transaction {:?} nonce: {:?}", - sent_tx.tx_hash, sent_tx.nonce + "Sent cancellation tx {:?} fees: {:?}", + cancel_info.tx_hash, gas_fees ); + self.transactions.push(PendingTransaction { - tx_hash: sent_tx.tx_hash, + tx_hash: cancel_info.tx_hash, gas_fees, attempt_number: self.attempt_count, }); - self.has_dropped = false; + self.attempt_count += 1; self.update_metrics(); - Ok(SendResult::TxHash(sent_tx.tx_hash)) + Ok(Some(cancel_info.tx_hash)) } - /// When we fail to send a transaction, it may be because another - /// transaction has mined before it could be sent, invalidating the nonce. - /// Thus, do one last check for an update before returning the error. - async fn handle_send_error(&mut self, error: TxSenderError) -> anyhow::Result { - match &error { - TxSenderError::ReplacementUnderpriced => { - return Ok(TrackerUpdate::ReplacementUnderpriced) - } - TxSenderError::Other(_error) => {} - } - - let update = self.check_for_update_now().await?; - let Some(update) = update else { - return Err(error.into()); - }; - match &update { - TrackerUpdate::StillPendingAfterWait | TrackerUpdate::LatestTxDropped { .. } => { - Err(error.into()) - } - _ => Ok(update), - } - } - - async fn wait_for_update(&mut self) -> anyhow::Result { - let start_block_number = self - .provider - .get_block_number() - .await - .context("tracker should get starting block when waiting for update")?; - let end_block_number = start_block_number + self.settings.max_blocks_to_wait_for_mine; - loop { - let update = self.check_for_update_now().await?; - if let Some(update) = update { - println!("HC wait_for_update found {:?}", update); - return Ok(update); - } - let current_block_number = self - .provider - .get_block_number() - .await - .context("tracker should get current block when polling for updates")?; - println!("HC wait_for_update at {:?}/{:?}", current_block_number, end_block_number); - if end_block_number <= current_block_number { - return Ok(TrackerUpdate::StillPendingAfterWait); - } - time::sleep(self.settings.poll_interval).await; - } - } - - async fn check_for_update_now(&mut self) -> anyhow::Result> { + async fn check_for_update(&mut self) -> TransactionTrackerResult> { let external_nonce = self.get_external_nonce().await?; - println!("HC check_for_update_now at self.nonce {:?} external_nonce {:?}", self.nonce, external_nonce); if self.nonce < external_nonce { + println!("HC check_for_update_now at self.nonce {:?} external_nonce {:?}", self.nonce, external_nonce); // The nonce has changed. Check to see which of our transactions has // mined, if any. + debug!( + "Nonce has changed from {:?} to {:?}", + self.nonce, external_nonce + ); let mut out = TrackerUpdate::NonceUsedForOtherTx { nonce: self.nonce }; for tx in self.transactions.iter().rev() { @@ -303,9 +389,11 @@ where .get_transaction_status(tx.tx_hash) .await .context("tracker should check transaction status when the nonce changes")?; - println!("HC check_for_update_now status after nonce change {:?}", status); + println!("HC check_for_update_now status after nonce change {:?}", status); + info!("Status of tx {:?}: {:?}", tx.tx_hash, status); if let TxStatus::Mined { block_number } = status { - let (gas_limit, gas_used) = self.get_mined_tx_gas_info(tx.tx_hash).await?; + let (gas_limit, gas_used, gas_price) = + self.get_mined_tx_gas_info(tx.tx_hash).await?; out = TrackerUpdate::Mined { tx_hash: tx.tx_hash, nonce: self.nonce, @@ -313,6 +401,7 @@ where attempt_number: tx.attempt_number, gas_limit, gas_used, + gas_price, }; break; } @@ -320,31 +409,30 @@ where self.set_nonce_and_clear_state(external_nonce); return Ok(Some(out)); } - // The nonce has not changed. Check to see if the latest transaction has - // dropped. - if self.has_dropped { - println!("HC check_for_update_now self.has_dropped"); - // has_dropped being true means that no new transactions have been - // added since the last time we checked, hence no update. - return Ok(None); - } + let Some(&last_tx) = self.transactions.last() else { - //println!("HC check_for_update_now no update"); // If there are no pending transactions, there's no update either. return Ok(None); }; + + if last_tx.tx_hash == H256::zero() { + // If the last transaction was a replacement that failed to send, we + // don't need to check for updates. + return Ok(None); + } + let status = self .sender .get_transaction_status(last_tx.tx_hash) .await - .context("tracker should check for dropped transactions")?; - println!("HC check_for_update_now status {:?}", status); + .context("tracker should check for transaction status")?; Ok(match status { - TxStatus::Pending | TxStatus::Dropped => None, + TxStatus::Pending => None, TxStatus::Mined { block_number } => { let nonce = self.nonce; self.set_nonce_and_clear_state(nonce + 1); - let (gas_limit, gas_used) = self.get_mined_tx_gas_info(last_tx.tx_hash).await?; + let (gas_limit, gas_used, gas_price) = + self.get_mined_tx_gas_info(last_tx.tx_hash).await?; Some(TrackerUpdate::Mined { tx_hash: last_tx.tx_hash, nonce, @@ -352,113 +440,69 @@ where attempt_number: last_tx.attempt_number, gas_limit, gas_used, + gas_price, }) - } // TODO(#295): dropped status is often incorrect, for now just assume its still pending - // TxStatus::Dropped => { - // self.has_dropped = true; - // Some(TrackerUpdate::LatestTxDropped { nonce: self.nonce }) - // } + } + TxStatus::Dropped => Some(TrackerUpdate::LatestTxDropped { nonce: self.nonce }), }) } - fn set_nonce_and_clear_state(&mut self, nonce: U256) { - self.nonce = nonce; - self.transactions.clear(); - self.has_dropped = false; - self.attempt_count = 0; - self.update_metrics(); + async fn reset(&mut self) { + let nonce = self.get_external_nonce().await.unwrap_or(self.nonce); + self.set_nonce_and_clear_state(nonce); } - async fn get_external_nonce(&self) -> anyhow::Result { - self.provider - .get_transaction_count(self.sender.address()) - .await - .context("tracker should load current nonce from provider") - } - - fn validate_transaction(&self, tx: &TypedTransaction) -> anyhow::Result<()> { - let Some(&nonce) = tx.nonce() else { - bail!("transaction given to tracker should have nonce set"); - }; - let gas_fees = GasFees::from(tx); - let (required_nonce, required_gas_fees) = self.get_nonce_and_required_fees(); - if nonce != required_nonce { - bail!("tried to send transaction with nonce {nonce}, but should match tracker's nonce of {required_nonce}"); - } - if let Some(required_gas_fees) = required_gas_fees { - if gas_fees.max_fee_per_gas < required_gas_fees.max_fee_per_gas - || gas_fees.max_priority_fee_per_gas < required_gas_fees.max_priority_fee_per_gas - { - bail!("new transaction's gas fees should be at least the required fees") - } - } - Ok(()) + fn abandon(&mut self) { + self.has_abandoned = true; + self.attempt_count = 0; + // remember the transaction in case we need to cancel it } - fn update_metrics(&self) { - TransactionTrackerMetrics::set_num_pending_transactions(self.transactions.len()); - TransactionTrackerMetrics::set_nonce(self.nonce); - TransactionTrackerMetrics::set_attempt_count(self.attempt_count); - if let Some(tx) = self.transactions.last() { - TransactionTrackerMetrics::set_current_fees(Some(tx.gas_fees)); - } else { - TransactionTrackerMetrics::set_current_fees(None); - } + fn unabandon(&mut self) { + self.has_abandoned = false; } +} - async fn get_mined_tx_gas_info( - &self, - tx_hash: H256, - ) -> anyhow::Result<(Option, Option)> { - let (tx, tx_receipt) = tokio::try_join!( - self.provider.get_transaction(tx_hash), - self.provider.get_transaction_receipt(tx_hash), - )?; - println!("HC get_mined_tx_gas_info looking for hash {:?} got tx {:?} receipt {:?}", tx_hash, tx, tx_receipt); - let gas_limit = tx.map(|t| t.gas).or_else(|| { - warn!("failed to fetch transaction data for tx: {}", tx_hash); - None - }); - let gas_used = match tx_receipt { - Some(r) => r.gas_used, - None => { - warn!("failed to fetch transaction receipt for tx: {}", tx_hash); - None +impl From for TransactionTrackerError { + fn from(value: TxSenderError) -> Self { + match value { + TxSenderError::NonceTooLow => TransactionTrackerError::NonceTooLow, + TxSenderError::ReplacementUnderpriced => { + TransactionTrackerError::ReplacementUnderpriced } - }; - Ok((gas_limit, gas_used)) + TxSenderError::ConditionNotMet => TransactionTrackerError::ConditionNotMet, + TxSenderError::SoftCancelFailed => { + TransactionTrackerError::Other(anyhow::anyhow!("soft cancel failed")) + } + TxSenderError::Other(e) => TransactionTrackerError::Other(e), + } } } struct TransactionTrackerMetrics {} impl TransactionTrackerMetrics { - fn set_num_pending_transactions(num_pending_transactions: usize) { - metrics::gauge!( - "builder_tracker_num_pending_transactions", - num_pending_transactions as f64 - ); + fn set_num_pending_transactions(builder_index: u64, num_pending_transactions: usize) { + metrics::gauge!("builder_tracker_num_pending_transactions", "builder_index" => builder_index.to_string()) + .set(num_pending_transactions as f64); } - fn set_nonce(nonce: U256) { - metrics::gauge!("builder_tracker_nonce", nonce.as_u64() as f64); + fn set_nonce(builder_index: u64, nonce: U256) { + metrics::gauge!("builder_tracker_nonce", "builder_index" => builder_index.to_string()) + .set(nonce.as_u64() as f64); } - fn set_attempt_count(attempt_count: u64) { - metrics::gauge!("builder_tracker_attempt_count", attempt_count as f64); + fn set_attempt_count(builder_index: u64, attempt_count: u64) { + metrics::gauge!("builder_tracker_attempt_count", "builder_index" => builder_index.to_string()).set(attempt_count as f64); } - fn set_current_fees(current_fees: Option) { + fn set_current_fees(builder_index: u64, current_fees: Option) { let fees = current_fees.unwrap_or_default(); - metrics::gauge!( - "builder_tracker_current_max_fee_per_gas", - fees.max_fee_per_gas.as_u64() as f64 - ); - metrics::gauge!( - "builder_tracker_current_max_priority_fee_per_gas", - fees.max_priority_fee_per_gas.as_u64() as f64 - ); + metrics::gauge!("builder_tracker_current_max_fee_per_gas", "builder_index" => builder_index.to_string()) + .set(fees.max_fee_per_gas.as_u64() as f64); + metrics::gauge!("builder_tracker_current_max_priority_fee_per_gas", "builder_index" => builder_index.to_string()) + .set(fees.max_priority_fee_per_gas.as_u64() as f64); } } @@ -485,13 +529,11 @@ mod tests { provider: MockProvider, ) -> TransactionTrackerImpl { let settings = Settings { - poll_interval: Duration::from_secs(0), - max_blocks_to_wait_for_mine: 3, replacement_fee_percent_increase: 5, }; let tracker: TransactionTrackerImpl = - TransactionTrackerImpl::new(Arc::new(provider), sender, settings) + TransactionTrackerImpl::new(Arc::new(provider), sender, settings, 0) .await .unwrap(); @@ -515,7 +557,7 @@ mod tests { .expect_get_transaction_count() .returning(move |_a| Ok(U256::from(0))); - let tracker = create_tracker(sender, provider).await; + let mut tracker = create_tracker(sender, provider).await; let tx = Eip1559TransactionRequest::new() .nonce(0) @@ -539,50 +581,46 @@ mod tests { ); } - // TODO(#295): fix dropped status - // #[tokio::test] - // async fn test_nonce_and_fees_dropped() { - // let (mut sender, mut provider) = create_base_config(); - // sender.expect_address().return_const(Address::zero()); + #[tokio::test] + async fn test_nonce_and_fees_abandoned() { + let (mut sender, mut provider) = create_base_config(); + sender.expect_address().return_const(Address::zero()); - // sender - // .expect_get_transaction_status() - // .returning(move |_a| Box::pin(async { Ok(TxStatus::Dropped) })); + sender + .expect_get_transaction_status() + .returning(move |_a| Box::pin(async { Ok(TxStatus::Pending) })); - // sender.expect_send_transaction().returning(move |_a, _b| { - // Box::pin(async { - // Ok(SentTxInfo { - // nonce: U256::from(0), - // tx_hash: H256::zero(), - // }) - // }) - // }); + sender.expect_send_transaction().returning(move |_a, _b| { + Box::pin(async { + Ok(SentTxInfo { + nonce: U256::from(0), + tx_hash: H256::zero(), + }) + }) + }); - // provider - // .expect_get_transaction_count() - // .returning(move |_a| Ok(U256::from(0))); + provider + .expect_get_transaction_count() + .returning(move |_a| Ok(U256::from(0))); - // provider - // .expect_get_block_number() - // .returning(move || Ok(1)) - // .times(1); + let mut tracker = create_tracker(sender, provider).await; - // let tracker = create_tracker(sender, provider).await; + let tx = Eip1559TransactionRequest::new() + .nonce(0) + .gas(10000) + .max_fee_per_gas(10000); + let exp = ExpectedStorage::default(); - // let tx = Eip1559TransactionRequest::new() - // .nonce(0) - // .gas(10000) - // .max_fee_per_gas(10000); - // let exp = ExpectedStorage::default(); + // send dummy transaction + let _sent = tracker.send_transaction(tx.into(), &exp).await; + let _tracker_update = tracker.check_for_update().await.unwrap(); - // // send dummy transaction - // let _sent = tracker.send_transaction(tx.into(), &exp).await; - // let _tracker_update = tracker.wait_for_update().await.unwrap(); + tracker.abandon(); - // let nonce_and_fees = tracker.get_nonce_and_required_fees().unwrap(); + let nonce_and_fees = tracker.get_nonce_and_required_fees().unwrap(); - // assert_eq!((U256::from(0), None), nonce_and_fees); - // } + assert_eq!((U256::from(0), None), nonce_and_fees); + } #[tokio::test] async fn test_send_transaction_without_nonce() { @@ -601,7 +639,7 @@ mod tests { .expect_get_transaction_count() .returning(move |_a| Ok(U256::from(2))); - let tracker = create_tracker(sender, provider).await; + let mut tracker = create_tracker(sender, provider).await; let tx = Eip1559TransactionRequest::new(); let exp = ExpectedStorage::default(); @@ -628,7 +666,7 @@ mod tests { .expect_get_transaction_count() .returning(move |_a| Ok(U256::from(2))); - let tracker = create_tracker(sender, provider).await; + let mut tracker = create_tracker(sender, provider).await; let tx = Eip1559TransactionRequest::new().nonce(0); let exp = ExpectedStorage::default(); @@ -654,41 +692,11 @@ mod tests { .expect_get_transaction_count() .returning(move |_a| Ok(U256::from(0))); - let tracker = create_tracker(sender, provider).await; + let mut tracker = create_tracker(sender, provider).await; let tx = Eip1559TransactionRequest::new().nonce(0); let exp = ExpectedStorage::default(); - let sent_transaction = tracker.send_transaction(tx.into(), &exp).await.unwrap(); - - assert!(matches!(sent_transaction, SendResult::TxHash(..))); - } - - #[tokio::test] - async fn test_wait_for_update_still_pending() { - let (mut sender, mut provider) = create_base_config(); - sender.expect_address().return_const(Address::zero()); - - let mut s = Sequence::new(); - - provider - .expect_get_transaction_count() - .returning(move |_a| Ok(U256::from(0))); - - for block_number in 1..=4 { - provider - .expect_get_block_number() - .returning(move || Ok(block_number)) - .times(1) - .in_sequence(&mut s); - } - - let tracker = create_tracker(sender, provider).await; - let tracker_update = tracker.wait_for_update().await.unwrap(); - - assert!(matches!( - tracker_update, - TrackerUpdate::StillPendingAfterWait - )); + tracker.send_transaction(tx.into(), &exp).await.unwrap(); } // TODO(#295): fix dropped status @@ -730,7 +738,7 @@ mod tests { // } #[tokio::test] - async fn test_wait_for_update_nonce_used() { + async fn test_check_for_update_nonce_used() { let (mut sender, mut provider) = create_base_config(); sender.expect_address().return_const(Address::zero()); @@ -743,14 +751,9 @@ mod tests { .in_sequence(&mut provider_seq); } - provider - .expect_get_block_number() - .returning(move || Ok(1)) - .times(1); - - let tracker = create_tracker(sender, provider).await; + let mut tracker = create_tracker(sender, provider).await; - let tracker_update = tracker.wait_for_update().await.unwrap(); + let tracker_update = tracker.check_for_update().await.unwrap().unwrap(); assert!(matches!( tracker_update, @@ -759,7 +762,7 @@ mod tests { } #[tokio::test] - async fn test_wait_for_update_mined() { + async fn test_check_for_update_mined() { let (mut sender, mut provider) = create_base_config(); sender.expect_address().return_const(Address::zero()); sender @@ -770,7 +773,7 @@ mod tests { Box::pin(async { Ok(SentTxInfo { nonce: U256::from(0), - tx_hash: H256::zero(), + tx_hash: H256::random(), }) }) }); @@ -779,11 +782,6 @@ mod tests { .expect_get_transaction_count() .returning(move |_a| Ok(U256::from(0))); - provider - .expect_get_block_number() - .returning(move || Ok(1)) - .times(1); - provider.expect_get_transaction().returning(|_: H256| { Ok(Some(Transaction { gas: U256::from(0), @@ -800,14 +798,14 @@ mod tests { })) }); - let tracker = create_tracker(sender, provider).await; + let mut tracker = create_tracker(sender, provider).await; let tx = Eip1559TransactionRequest::new().nonce(0); let exp = ExpectedStorage::default(); // send dummy transaction let _sent = tracker.send_transaction(tx.into(), &exp).await; - let tracker_update = tracker.wait_for_update().await.unwrap(); + let tracker_update = tracker.check_for_update().await.unwrap().unwrap(); assert!(matches!(tracker_update, TrackerUpdate::Mined { .. })); } diff --git a/crates/dev/src/lib.rs b/crates/dev/src/lib.rs index 81739c7b..0d70c972 100644 --- a/crates/dev/src/lib.rs +++ b/crates/dev/src/lib.rs @@ -42,11 +42,11 @@ use ethers::{ utils::{self, hex, keccak256}, }; use rundler_types::{ - contracts::{ - entry_point::EntryPoint, simple_account::SimpleAccount, + contracts::v0_6::{ + i_entry_point::IEntryPoint, simple_account::SimpleAccount, simple_account_factory::SimpleAccountFactory, verifying_paymaster::VerifyingPaymaster, }, - UserOperation, + v0_6, UserOperation, }; /// Chain ID used by Geth in --dev mode. @@ -185,14 +185,14 @@ pub fn test_signing_key_bytes(test_account_id: u8) -> [u8; 32] { } /// An alternative to the default user op with gas values prefilled. -pub fn base_user_op() -> UserOperation { - UserOperation { +pub fn base_user_op() -> v0_6::UserOperation { + v0_6::UserOperation { call_gas_limit: 1_000_000.into(), verification_gas_limit: 1_000_000.into(), pre_verification_gas: 1_000_000.into(), max_fee_per_gas: 100.into(), max_priority_fee_per_gas: 5.into(), - ..UserOperation::default() + ..v0_6::UserOperation::default() } } @@ -281,7 +281,7 @@ pub async fn deploy_dev_contracts(entry_point_bytecode: &str) -> anyhow::Result< let entry_point_address = deterministic_deploy .deploy_bytecode(entry_point_bytecode, 0) .await?; - let entry_point = EntryPoint::new(entry_point_address, Arc::clone(&deployer_client)); + let entry_point = IEntryPoint::new(entry_point_address, Arc::clone(&deployer_client)); // TODO use deterministic deployment // account factory @@ -315,12 +315,12 @@ pub async fn deploy_dev_contracts(entry_point_bytecode: &str) -> anyhow::Result< factory.create_account(wallet_owner_eoa.address(), salt), ); - let mut op = UserOperation { + let mut op = v0_6::UserOperation { sender: wallet_address, init_code, ..base_user_op() }; - let op_hash = op.op_hash(entry_point.address(), DEV_CHAIN_ID); + let op_hash = op.hash(entry_point.address(), DEV_CHAIN_ID); let signature = wallet_owner_eoa .sign_message(op_hash) .await @@ -348,7 +348,7 @@ pub struct DevClients { /// The client used by the bundler. pub bundler_client: Arc, /// The entry point contract. - pub entry_point: EntryPoint, + pub entry_point: IEntryPoint, /// The account factory contract. pub factory: SimpleAccountFactory>, /// The wallet contract. @@ -373,7 +373,7 @@ impl DevClients { let provider = new_local_provider(); let bundler_client = new_test_client(Arc::clone(&provider), BUNDLER_ACCOUNT_ID); let wallet_owner_client = new_test_client(Arc::clone(&provider), WALLET_OWNER_ACCOUNT_ID); - let entry_point = EntryPoint::new(entry_point_address, Arc::clone(&bundler_client)); + let entry_point = IEntryPoint::new(entry_point_address, Arc::clone(&bundler_client)); let factory = SimpleAccountFactory::new(factory_address, Arc::clone(&provider)); let wallet = SimpleAccount::new(wallet_address, Arc::clone(&provider)); let paymaster = VerifyingPaymaster::new(paymaster_address, Arc::clone(&provider)); @@ -400,7 +400,7 @@ impl DevClients { /// Adds a signature to a user operation. pub async fn add_signature( &self, - op: &mut UserOperation, + op: &mut v0_6::UserOperation, use_paymaster: bool, ) -> anyhow::Result<()> { if use_paymaster { @@ -426,7 +426,7 @@ impl DevClients { paymaster_and_data.extend(paymaster_signature.to_vec()); op.paymaster_and_data = paymaster_and_data.into() } - let op_hash = op.op_hash(self.entry_point.address(), DEV_CHAIN_ID); + let op_hash = op.hash(self.entry_point.address(), DEV_CHAIN_ID); let signature = self .wallet_owner_signer .sign_message(op_hash) @@ -441,7 +441,7 @@ impl DevClients { &self, call: ContractCall, value: U256, - ) -> anyhow::Result { + ) -> anyhow::Result { self.new_wallet_op_internal(call, value, false).await } @@ -450,7 +450,7 @@ impl DevClients { &self, call: ContractCall, value: U256, - ) -> anyhow::Result { + ) -> anyhow::Result { self.new_wallet_op_internal(call, value, true).await } @@ -459,7 +459,7 @@ impl DevClients { call: ContractCall, value: U256, use_paymaster: bool, - ) -> anyhow::Result { + ) -> anyhow::Result { let tx = &call.tx; let inner_call_data = Bytes::clone( tx.data() @@ -480,7 +480,7 @@ impl DevClients { .data() .context("wallet execute should have call data")?, ); - let mut op = UserOperation { + let mut op = v0_6::UserOperation { sender: self.wallet.address(), call_data, nonce, diff --git a/crates/pool/Cargo.toml b/crates/pool/Cargo.toml index a4d9f9b7..85ee659a 100644 --- a/crates/pool/Cargo.toml +++ b/crates/pool/Cargo.toml @@ -19,10 +19,12 @@ async-trait.workspace = true ethers.workspace = true futures.workspace = true futures-util.workspace = true -itertools = "0.11.0" +itertools.workspace = true metrics.workspace = true parking_lot = "0.12.1" prost.workspace = true +serde.workspace = true +strum.workspace = true thiserror.workspace = true tokio.workspace = true tokio-stream = { version = "0.1.12", features = ["sync"] } @@ -31,8 +33,6 @@ tonic.workspace = true tonic-health.workspace = true tonic-reflection.workspace = true tracing.workspace = true -serde.workspace = true -strum.workspace = true url.workspace = true mockall = {workspace = true, optional = true } @@ -44,6 +44,3 @@ rundler-provider = { path = "../provider", features = ["test-utils"] } [build-dependencies] tonic-build.workspace = true - -[features] -test-utils = [ "mockall" ] diff --git a/crates/pool/proto/op_pool/op_pool.proto b/crates/pool/proto/op_pool/op_pool.proto index d3050526..32d1dfa5 100644 --- a/crates/pool/proto/op_pool/op_pool.proto +++ b/crates/pool/proto/op_pool/op_pool.proto @@ -17,9 +17,16 @@ syntax = "proto3"; package op_pool; +message UserOperation { + oneof uo { + UserOperationV06 v06 = 1; + UserOperationV07 v07 = 2; + } +} + // Protocol Buffer representation of an ERC-4337 UserOperation. See the official // specification at https://eips.ethereum.org/EIPS/eip-4337#definitions -message UserOperation { +message UserOperationV06 { // The account making the operation bytes sender = 1; // Anti-replay parameter (see “Semi-abstracted Nonce Support” ) @@ -43,11 +50,48 @@ message UserOperation { // Address of paymaster sponsoring the transaction, followed by extra data to // send to the paymaster (empty for self-sponsored transaction) bytes paymaster_and_data = 10; - // Data passed into the account along with the nonce during the verification - // step + // Signature over the hash of the packed representation of the user operation bytes signature = 11; } +message UserOperationV07 { + // The account making the operation + bytes sender = 1; + // Anti-replay parameter (see “Semi-abstracted Nonce Support” ) + bytes nonce = 2; + // The data to pass to the sender during the main execution call + bytes call_data = 3; + // The amount of gas to allocate the main execution call + bytes call_gas_limit = 4; + // The amount of gas to allocate for the verification step + bytes verification_gas_limit = 5; + // The amount of gas to pay for to compensate the bundler for pre-verification + // execution and calldata + bytes pre_verification_gas = 6; + // Maximum fee per gas (similar to EIP-1559 max_fee_per_gas) + bytes max_fee_per_gas = 7; + // Maximum priority fee per gas (similar to EIP-1559 max_priority_fee_per_gas) + bytes max_priority_fee_per_gas = 8; + // Signature over the hash of the packed representation of the user operation + bytes signature = 9; + // Address of paymaster sponsoring the transaction, empty if none + bytes paymaster = 10; + // Extra data to send to the paymaster, zero if no paymaster + bytes paymaster_data = 11; + // Paymaster verification gas limit, zero if no paymaster + bytes paymaster_verification_gas_limit = 12; + // Paymaster post-op gas limit, zero if no paymaster + bytes paymaster_post_op_gas_limit = 13; + // Address of the factory to use to create the sender account, empty if none + bytes factory = 14; + // Extra data to send to the factory, empty if no factory + bytes factory_data = 15; + + // Extra data to compute the hash of the user operation + bytes entry_point = 16; + uint64 chain_id = 17; +} + enum EntityType { ENTITY_TYPE_UNSPECIFIED = 0; ENTITY_TYPE_ACCOUNT = 1; @@ -101,17 +145,14 @@ message MempoolOp { // See rule (5) here: // https://github.com/ethereum/EIPs/blob/master/EIPS/eip-4337.md#specification-1 bytes expected_code_hash = 5; - // A list of all entities that require stake to process this UO as identified in - // validation before entering the mempool - repeated EntityType entities_needing_stake = 6; // Block hash at which the UserOperation was simulated - bytes sim_block_hash = 7; + bytes sim_block_hash = 6; // Indicates if the account is staked. Staked accounts are allowed to have // multiple UserOperations in the mempool, otherwise just one UserOperation is // permitted - bool account_is_staked = 8; + bool account_is_staked = 7; // The entry point address of this operation - bytes entry_point = 9; + bytes entry_point = 8; } // Defines the gRPC endpoints for a UserOperation mempool service @@ -133,6 +174,9 @@ service OpPool { // Removes UserOperations from the mempool rpc RemoveOps(RemoveOpsRequest) returns (RemoveOpsResponse); + // Remove a UserOperation by its id + rpc RemoveOpById(RemoveOpByIdRequest) returns (RemoveOpByIdResponse); + // Handles a list of updates to be performed on entities rpc UpdateEntities(UpdateEntitiesRequest) returns (UpdateEntitiesResponse); @@ -150,6 +194,9 @@ service OpPool { // debug_bundler_setReputation rpc DebugDumpReputation(DebugDumpReputationRequest) returns (DebugDumpReputationResponse); + // Dumps the paymaster balances + rpc DebugDumpPaymasterBalances(DebugDumpPaymasterBalancesRequest) returns (DebugDumpPaymasterBalancesResponse); + // Get reputation status of address rpc GetReputationStatus(GetReputationStatusRequest) returns (GetReputationStatusResponse); @@ -159,6 +206,9 @@ service OpPool { // Streaming API to subscribe to be updated upon a new block being added to (or reorged onto) // the chain. rpc SubscribeNewHeads(SubscribeNewHeadsRequest) returns (stream SubscribeNewHeadsResponse); + + // Clears the bundler mempool and reputation data of paymasters/accounts/factories/aggregators + rpc AdminSetTracking(AdminSetTrackingRequest) returns (AdminSetTrackingResponse); } message GetSupportedEntryPointsRequest {} @@ -275,6 +325,21 @@ message RemoveOpsResponse { } message RemoveOpsSuccess {} +message RemoveOpByIdRequest { + bytes entry_point = 1; + bytes sender = 2; + bytes nonce = 3; +} +message RemoveOpByIdResponse { + oneof result { + RemoveOpByIdSuccess success = 1; + MempoolError failure = 2; + } +} +message RemoveOpByIdSuccess { + bytes hash = 1; +} + message UpdateEntitiesRequest { // The serilaized entry point address bytes entry_point = 1; @@ -293,6 +358,7 @@ message UpdateEntitiesSuccess {} message DebugClearStateRequest { bool clear_mempool = 1; bool clear_reputation = 2; + bool clear_paymaster = 3; } message DebugClearStateResponse { oneof result { @@ -343,6 +409,24 @@ message DebugDumpReputationSuccess { repeated Reputation reputations = 1; } +message DebugDumpPaymasterBalancesRequest { + bytes entry_point = 1; +} +message DebugDumpPaymasterBalancesResponse { + oneof result { + DebugDumpPaymasterBalancesSuccess success = 1; + MempoolError failure = 2; + } +} +message DebugDumpPaymasterBalancesSuccess { + repeated PaymasterBalance balances = 1; +} +message PaymasterBalance { + bytes address = 1; + bytes pending_balance = 2; + bytes confirmed_balance = 3; +} + message SubscribeNewHeadsRequest {} message SubscribeNewHeadsResponse { // The new chain head @@ -355,6 +439,20 @@ message NewHead { uint64 block_number = 2; } +message AdminSetTrackingRequest { + // The serialized entry point address via which the UserOperation is being submitted + bytes entry_point = 1; + bool paymaster = 2; + bool reputation = 3; +} +message AdminSetTrackingResponse { + oneof result { + AdminSetTrackingSuccess success = 1; + MempoolError failure = 2; + } +} +message AdminSetTrackingSuccess {} + message Reputation { // The (serialized) address to set the reputation for bytes address = 1; @@ -389,6 +487,7 @@ message MempoolError { SenderAddressUsedAsAlternateEntity sender_address_used_as_alternate_entity = 13; AssociatedStorageIsAlternateSender associated_storage_is_alternate_sender = 14; PaymasterBalanceTooLow paymaster_balance_too_low = 15; + OperationDropTooSoon operation_drop_too_soon = 16; } } @@ -412,7 +511,7 @@ message PaymasterBalanceTooLow { message MaxOperationsReachedError { uint64 num_ops = 1; - bytes entity_address = 2; + Entity entity = 2; } message EntityThrottledError { @@ -435,23 +534,27 @@ message UnsupportedAggregatorError { message InvalidSignatureError {} +message OperationDropTooSoon { + uint64 added_at = 1; + uint64 attempted_at = 2; + uint64 must_wait = 3; +} + // PRECHECK VIOLATIONS message PrecheckViolationError { oneof violation { - InitCodeTooShort init_code_too_short = 1; - SenderIsNotContractAndNoInitCode sender_is_not_contract_and_no_init_code = 2; - ExistingSenderWithInitCode existing_sender_with_init_code = 3; - FactoryIsNotContract factory_is_not_contract = 4; - TotalGasLimitTooHigh total_gas_limit_too_high = 5; - VerificationGasLimitTooHigh verification_gas_limit_too_high = 6; - PreVerificationGasTooLow pre_verification_gas_too_low = 7; - PaymasterTooShort paymaster_too_short = 8; - PaymasterIsNotContract paymaster_is_not_contract = 9; - PaymasterDepositTooLow paymaster_deposit_too_low = 10; - SenderFundsTooLow sender_funds_too_low = 11; - MaxFeePerGasTooLow max_fee_per_gas_too_low = 12; - MaxPriorityFeePerGasTooLow max_priority_fee_per_gas_too_low = 13; - CallGasLimitTooLow call_gas_limit_too_low = 14; + SenderIsNotContractAndNoInitCode sender_is_not_contract_and_no_init_code = 1; + ExistingSenderWithInitCode existing_sender_with_init_code = 2; + FactoryIsNotContract factory_is_not_contract = 3; + TotalGasLimitTooHigh total_gas_limit_too_high = 4; + VerificationGasLimitTooHigh verification_gas_limit_too_high = 5; + PreVerificationGasTooLow pre_verification_gas_too_low = 6; + PaymasterIsNotContract paymaster_is_not_contract = 7; + PaymasterDepositTooLow paymaster_deposit_too_low = 8; + SenderFundsTooLow sender_funds_too_low = 9; + MaxFeePerGasTooLow max_fee_per_gas_too_low = 10; + MaxPriorityFeePerGasTooLow max_priority_fee_per_gas_too_low = 11; + CallGasLimitTooLow call_gas_limit_too_low = 12; } } @@ -540,15 +643,31 @@ message SimulationViolationError { AggregatorValidationFailed aggregator_validation_failed = 16; UnstakedPaymasterContext unstaked_paymaster_context = 17; UnstakedAggregator unstaked_aggregator = 18; + VerificationGasLimitBufferTooLow verification_gas_limit_buffer_too_low = 19; + ValidationRevert validation_revert = 20; + InvalidAccountSignature invalid_account_signature = 21; + InvalidPaymasterSignature invalid_paymaster_signature = 22; + AssociatedStorageDuringDeploy associated_storage_during_deploy = 23; + InvalidTimeRange invalid_time_range = 24; + AccessedUnsupportedContractType accessed_unsupported_contract_type = 25; } } message InvalidSignature {} +message InvalidAccountSignature {} + +message InvalidPaymasterSignature {} + message UnstakedAggregator {} message UnstakedPaymasterContext {} +message InvalidTimeRange { + uint64 valid_until = 1; + uint64 valud_after = 2; +} + message UnintendedRevertWithMessage { Entity entity = 1; string reason = 2; @@ -576,13 +695,20 @@ message InvalidStorageAccess { bytes slot = 3; } -message NotStaked { +message AssociatedStorageDuringDeploy { Entity entity = 1; - bytes accessed_address = 2; + bytes contract_address = 2; + bytes slot = 3; +} + +message NotStaked { + Entity needs_stake = 1; + EntityType accessing_entity = 2; EntityType accessed_entity = 3; - bytes slot = 4; - bytes min_stake = 5; - bytes min_unstake_delay = 6; + bytes accessed_address = 4; + bytes slot = 5; + bytes min_stake = 6; + bytes min_unstake_delay = 7; } message UnintendedRevert { @@ -616,3 +742,31 @@ message CodeHashChanged {} message AggregatorValidationFailed {} +message VerificationGasLimitBufferTooLow { + bytes limit = 1; + bytes needed = 2; +} + +message ValidationRevert { + oneof revert { + EntryPointRevert entry_point = 1; + OperationRevert operation = 2; + UnknownRevert unknown = 3; + } +} +message EntryPointRevert { + string reason = 1; +} +message OperationRevert { + string entry_point_reason = 1; + bytes inner_revert_data = 2; + string inner_revert_reason = 3; +} +message UnknownRevert { + bytes revert_bytes = 1; +} + +message AccessedUnsupportedContractType { + string contract_type = 1; + bytes contract_address = 2; +} diff --git a/crates/pool/src/chain.rs b/crates/pool/src/chain.rs index a83e15c8..e570ee59 100644 --- a/crates/pool/src/chain.rs +++ b/crates/pool/src/chain.rs @@ -12,14 +12,14 @@ // If not, see https://www.gnu.org/licenses/. use std::{ - collections::{HashSet, VecDeque}, + collections::{HashMap, HashSet, VecDeque}, sync::Arc, time::Duration, }; use anyhow::{ensure, Context}; use ethers::{ - contract, + contract::EthLogDecode, prelude::EthEvent, types::{Address, Block, Filter, Log, H256, U256}, }; @@ -27,16 +27,17 @@ use futures::future; use rundler_provider::Provider; use rundler_task::block_watcher; use rundler_types::{ - contracts::{entry_point::DepositedFilter, i_entry_point::UserOperationEventFilter}, - Timestamp, UserOperationId, + contracts::{v0_6::i_entry_point as entry_point_v0_6, v0_7::i_entry_point as entry_point_v0_7}, + EntryPointVersion, Timestamp, UserOperationId, }; use tokio::{ select, sync::{broadcast, Semaphore}, task::JoinHandle, + time, }; use tokio_util::sync::CancellationToken; -use tracing::{error, info, warn}; +use tracing::{debug, info, warn}; const MAX_LOAD_OPS_CONCURRENCY: usize = 64; @@ -54,6 +55,8 @@ pub(crate) struct Chain { blocks: VecDeque, /// Semaphore to limit the number of concurrent `eth_getLogs` calls. load_ops_semaphore: Semaphore, + /// Filter template + filter_template: Filter, } #[derive(Default, Debug, Eq, PartialEq)] @@ -67,10 +70,10 @@ pub struct ChainUpdate { pub reorg_depth: u64, pub mined_ops: Vec, pub unmined_ops: Vec, - /// List of on-chain entity deposits made in the most recent block - pub entity_deposits: Vec, - /// List of entity deposits that have been unmined due to a reorg - pub unmined_entity_deposits: Vec, + /// List of on-chain entity balance updates made in the most recent block + pub entity_balance_updates: Vec, + /// List of entity balance updates that have been unmined due to a reorg + pub unmined_entity_balance_updates: Vec, /// Boolean to state if the most recent chain update had a reorg /// that was larger than the existing history that has been tracked pub reorg_larger_than_history: bool, @@ -87,10 +90,11 @@ pub struct MinedOp { } #[derive(Clone, Copy, Debug, Eq, PartialEq)] -pub struct DepositInfo { +pub struct BalanceUpdate { pub address: Address, pub entrypoint: Address, pub amount: U256, + pub is_addition: bool, } impl MinedOp { @@ -106,7 +110,8 @@ impl MinedOp { pub(crate) struct Settings { pub(crate) history_size: u64, pub(crate) poll_interval: Duration, - pub(crate) entry_point_addresses: Vec

, + pub(crate) entry_point_addresses: HashMap, + pub(crate) max_sync_retries: u64, } #[derive(Debug)] @@ -116,18 +121,51 @@ struct BlockSummary { timestamp: Timestamp, parent_hash: H256, ops: Vec, - entity_deposits: Vec, + entity_balance_updates: Vec, } impl Chain

{ pub(crate) fn new(provider: Arc

, settings: Settings) -> Self { let history_size = settings.history_size as usize; assert!(history_size > 0, "history size should be positive"); + + let mut events = vec![]; + + if settings + .entry_point_addresses + .values() + .any(|v| *v == EntryPointVersion::V0_6) + { + events.push(entry_point_v0_6::UserOperationEventFilter::abi_signature()); + events.push(entry_point_v0_6::DepositedFilter::abi_signature()); + events.push(entry_point_v0_6::WithdrawnFilter::abi_signature()); + } + if settings + .entry_point_addresses + .values() + .any(|v| *v == EntryPointVersion::V0_7) + { + events.push(entry_point_v0_7::UserOperationEventFilter::abi_signature()); + events.push(entry_point_v0_7::DepositedFilter::abi_signature()); + events.push(entry_point_v0_7::WithdrawnFilter::abi_signature()); + } + + let filter_template = Filter::new() + .address( + settings + .entry_point_addresses + .keys() + .cloned() + .collect::>(), + ) + .events(events.iter().map(|e| e.as_ref())); + Self { provider, settings, blocks: VecDeque::new(), load_ops_semaphore: Semaphore::new(MAX_LOAD_OPS_CONCURRENCY), + filter_template, } } @@ -165,13 +203,28 @@ impl Chain

{ ) .await; block_hash = hash; - let update = self.sync_to_block(block).await; - match update { - Ok(update) => return update, - Err(error) => { - error!("Failed to update chain at block {block_hash:?}. Will try again at next block. {error:?}"); + + for i in 0..=self.settings.max_sync_retries { + if i > 0 { + ChainMetrics::increment_sync_retries(); + } + + let update = self.sync_to_block(block.clone()).await; + match update { + Ok(update) => return update, + Err(error) => { + debug!("Failed to update chain at block {block_hash:?}: {error:?}"); + } } + + time::sleep(self.settings.poll_interval).await; } + + warn!( + "Failed to update chain at block {:?} after {} retries. Abandoning sync.", + block_hash, self.settings.max_sync_retries + ); + ChainMetrics::increment_sync_abandoned(); } } @@ -219,13 +272,14 @@ impl Chain

{ .copied() .collect(); - let entity_deposits: Vec<_> = self + let entity_balance_updates: Vec<_> = self .blocks .iter() - .flat_map(|block| &block.entity_deposits) + .flat_map(|block| &block.entity_balance_updates) .copied() .collect(); - Ok(self.new_update(0, mined_ops, vec![], entity_deposits, vec![], false)) + + Ok(self.new_update(0, mined_ops, vec![], entity_balance_updates, vec![], false)) } /// Given a collection of blocks to add to the chain, whose numbers may @@ -242,11 +296,12 @@ impl Chain

{ .copied() .collect(); - let entity_deposits: Vec<_> = added_blocks + let entity_balance_updates: Vec<_> = added_blocks .iter() - .flat_map(|block| &block.entity_deposits) + .flat_map(|block| &block.entity_balance_updates) .copied() .collect(); + let reorg_depth = current_block_number + 1 - added_blocks[0].number; let unmined_ops: Vec<_> = self .blocks @@ -256,11 +311,11 @@ impl Chain

{ .copied() .collect(); - let unmined_entity_deposits: Vec<_> = self + let unmined_entity_balance_updates: Vec<_> = self .blocks .iter() .skip(self.blocks.len() - reorg_depth as usize) - .flat_map(|block| &block.entity_deposits) + .flat_map(|block| &block.entity_balance_updates) .copied() .collect(); @@ -284,8 +339,8 @@ impl Chain

{ reorg_depth, mined_ops, unmined_ops, - entity_deposits, - unmined_entity_deposits, + entity_balance_updates, + unmined_entity_balance_updates, is_reorg_larger_than_history, ) } @@ -379,9 +434,9 @@ impl Chain

{ let opses = future::try_join_all(future_opses) .await .context("should load ops for new blocks")?; - for (i, (ops, deposits)) in opses.into_iter().enumerate() { + for (i, (ops, balance_updates)) in opses.into_iter().enumerate() { blocks[i].ops = ops; - blocks[i].entity_deposits = deposits; + blocks[i].entity_balance_updates = balance_updates; } Ok(()) } @@ -389,76 +444,126 @@ impl Chain

{ async fn load_ops_in_block_with_hash( &self, block_hash: H256, - ) -> anyhow::Result<(Vec, Vec)> { + ) -> anyhow::Result<(Vec, Vec)> { let _permit = self .load_ops_semaphore .acquire() .await .expect("semaphore should not be closed"); - let deposit = DepositedFilter::abi_signature(); - let uo_filter = UserOperationEventFilter::abi_signature(); - let events: Vec<&str> = vec![&deposit, &uo_filter]; - - let filter = Filter::new() - .address(self.settings.entry_point_addresses.clone()) - .events(events) - .at_block_hash(block_hash); + let filter = self.filter_template.clone().at_block_hash(block_hash); let logs = self .provider .get_logs(&filter) .await .context("chain state should load user operation events")?; - let deposits = self.load_entity_deposits(&logs); - let mined_ops = self.load_mined_ops(&logs); - - Ok((mined_ops, deposits)) - } - - fn load_mined_ops(&self, logs: &Vec) -> Vec { let mut mined_ops = vec![]; + let mut entity_balance_updates = vec![]; for log in logs { - let entry_point = log.address; - if let Ok(event) = contract::parse_log::(log.clone()) { - let paymaster = if event.paymaster.is_zero() { - None - } else { - Some(event.paymaster) - }; - - let mined = MinedOp { - hash: event.user_op_hash.into(), - entry_point, - sender: event.sender, - nonce: event.nonce, - actual_gas_cost: event.actual_gas_cost, - paymaster, - }; - - mined_ops.push(mined); + match self.settings.entry_point_addresses.get(&log.address) { + Some(EntryPointVersion::V0_6) => { + Self::load_v0_6(log, &mut mined_ops, &mut entity_balance_updates) + } + Some(EntryPointVersion::V0_7) => { + Self::load_v0_7(log, &mut mined_ops, &mut entity_balance_updates) + } + Some(EntryPointVersion::Unspecified) | None => { + warn!( + "Log with unknown entry point address: {:?}. Ignoring.", + log.address + ); + } } } - mined_ops + Ok((mined_ops, entity_balance_updates)) } - fn load_entity_deposits(&self, logs: &Vec) -> Vec { - let mut deposits = vec![]; - for log in logs { - let entrypoint = log.address; - if let Ok(event) = contract::parse_log::(log.clone()) { - let info = DepositInfo { - entrypoint, - address: event.account, - amount: event.total_deposit, - }; - - deposits.push(info); + fn load_v0_6(log: Log, mined_ops: &mut Vec, balance_updates: &mut Vec) { + let address = log.address; + if let Ok(event) = entry_point_v0_6::IEntryPointEvents::decode_log(&log.into()) { + match event { + entry_point_v0_6::IEntryPointEvents::UserOperationEventFilter(event) => { + let paymaster = if event.paymaster.is_zero() { + None + } else { + Some(event.paymaster) + }; + let mined = MinedOp { + hash: event.user_op_hash.into(), + entry_point: address, + sender: event.sender, + nonce: event.nonce, + actual_gas_cost: event.actual_gas_cost, + paymaster, + }; + mined_ops.push(mined); + } + entry_point_v0_6::IEntryPointEvents::DepositedFilter(event) => { + let info = BalanceUpdate { + entrypoint: address, + address: event.account, + amount: event.total_deposit, + is_addition: true, + }; + balance_updates.push(info); + } + entry_point_v0_6::IEntryPointEvents::WithdrawnFilter(event) => { + let info = BalanceUpdate { + entrypoint: address, + address: event.account, + amount: event.amount, + is_addition: false, + }; + balance_updates.push(info); + } + _ => {} } } + } - deposits + fn load_v0_7(log: Log, mined_ops: &mut Vec, balance_updates: &mut Vec) { + let address = log.address; + if let Ok(event) = entry_point_v0_7::IEntryPointEvents::decode_log(&log.into()) { + match event { + entry_point_v0_7::IEntryPointEvents::UserOperationEventFilter(event) => { + let paymaster = if event.paymaster.is_zero() { + None + } else { + Some(event.paymaster) + }; + let mined = MinedOp { + hash: event.user_op_hash.into(), + entry_point: address, + sender: event.sender, + nonce: event.nonce, + actual_gas_cost: event.actual_gas_cost, + paymaster, + }; + mined_ops.push(mined); + } + entry_point_v0_7::IEntryPointEvents::DepositedFilter(event) => { + let info = BalanceUpdate { + entrypoint: address, + address: event.account, + amount: event.total_deposit, + is_addition: true, + }; + balance_updates.push(info); + } + entry_point_v0_7::IEntryPointEvents::WithdrawnFilter(event) => { + let info = BalanceUpdate { + entrypoint: address, + address: event.account, + amount: event.amount, + is_addition: false, + }; + balance_updates.push(info); + } + _ => {} + } + } } fn block_with_number(&self, number: u64) -> Option<&BlockSummary> { @@ -469,13 +574,14 @@ impl Chain

{ self.blocks.get((number - earliest_number) as usize) } + #[allow(clippy::too_many_arguments)] fn new_update( &self, reorg_depth: u64, mined_ops: Vec, unmined_ops: Vec, - entity_deposits: Vec, - unmined_entity_deposits: Vec, + entity_balance_updates: Vec, + unmined_entity_balance_updates: Vec, reorg_larger_than_history: bool, ) -> ChainUpdate { let latest_block = self @@ -490,8 +596,8 @@ impl Chain

{ reorg_depth, mined_ops, unmined_ops, - entity_deposits, - unmined_entity_deposits, + entity_balance_updates, + unmined_entity_balance_updates, reorg_larger_than_history, } } @@ -527,7 +633,7 @@ impl BlockSummary { timestamp: block.timestamp.as_u64().into(), parent_hash: block.parent_hash, ops: Vec::new(), - entity_deposits: Vec::new(), + entity_balance_updates: Vec::new(), }) } } @@ -566,15 +672,23 @@ struct ChainMetrics {} impl ChainMetrics { fn set_block_height(block_height: u64) { - metrics::gauge!("op_pool_chain_block_height", block_height as f64); + metrics::gauge!("op_pool_chain_block_height").set(block_height as f64); } fn increment_reorgs_detected() { - metrics::increment_counter!("op_pool_chain_reorgs_detected"); + metrics::counter!("op_pool_chain_reorgs_detected").increment(1); } fn increment_total_reorg_depth(depth: u64) { - metrics::counter!("op_pool_chain_total_reorg_depth", depth); + metrics::counter!("op_pool_chain_total_reorg_depth").increment(depth); + } + + fn increment_sync_retries() { + metrics::counter!("op_pool_chain_sync_retries").increment(1); + } + + fn increment_sync_abandoned() { + metrics::counter!("op_pool_chain_sync_abandoned").increment(1); } } @@ -594,22 +708,45 @@ mod tests { use super::*; const HISTORY_SIZE: u64 = 3; - const ENTRY_POINT_ADDRESS: Address = H160(*b"01234567890123456789"); + const ENTRY_POINT_ADDRESS_V0_6: Address = H160(*b"01234567890123456789"); + const ENTRY_POINT_ADDRESS_V0_7: Address = H160(*b"98765432109876543210"); #[derive(Clone, Debug)] struct MockBlock { hash: H256, + events: Vec, + } + + #[derive(Clone, Debug, Default)] + struct MockEntryPointEvents { + address: Address, op_hashes: Vec, deposit_addresses: Vec

, + withdrawal_addresses: Vec
, } impl MockBlock { - fn new(hash: H256, op_hashes: Vec, deposit_addresses: Vec
) -> Self { + fn new(hash: H256) -> Self { Self { hash, + events: vec![], + } + } + + fn add_ep( + mut self, + address: Address, + op_hashes: Vec, + deposit_addresses: Vec
, + withdrawal_addresses: Vec
, + ) -> Self { + self.events.push(MockEntryPointEvents { + address, op_hashes, deposit_addresses, - } + withdrawal_addresses, + }); + self } } @@ -656,14 +793,44 @@ mod tests { }; let mut joined_logs: Vec = Vec::new(); - joined_logs.extend(block.op_hashes.iter().copied().map(fake_log)); - joined_logs.extend( - block - .deposit_addresses - .iter() - .copied() - .map(fake_deposit_log), - ); + + for events in &block.events { + if events.address == ENTRY_POINT_ADDRESS_V0_6 { + joined_logs.extend(events.op_hashes.iter().copied().map(fake_mined_log_v0_6)); + joined_logs.extend( + events + .deposit_addresses + .iter() + .copied() + .map(fake_deposit_log_v0_6), + ); + joined_logs.extend( + events + .withdrawal_addresses + .iter() + .copied() + .map(fake_withdrawal_log_v0_6), + ); + } else if events.address == ENTRY_POINT_ADDRESS_V0_7 { + joined_logs.extend(events.op_hashes.iter().copied().map(fake_mined_log_v0_7)); + joined_logs.extend( + events + .deposit_addresses + .iter() + .copied() + .map(fake_deposit_log_v0_7), + ); + joined_logs.extend( + events + .withdrawal_addresses + .iter() + .copied() + .map(fake_withdrawal_log_v0_7), + ); + } else { + panic!("Unknown entry point address: {:?}", events.address); + } + } joined_logs } @@ -673,10 +840,25 @@ mod tests { async fn test_initial_load() { let (mut chain, controller) = new_chain(); controller.set_blocks(vec![ - MockBlock::new(hash(0), vec![hash(101), hash(102)], vec![]), - MockBlock::new(hash(1), vec![hash(103)], vec![]), - MockBlock::new(hash(2), vec![], vec![]), - MockBlock::new(hash(3), vec![hash(104), hash(105)], vec![]), + MockBlock::new(hash(0)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(101), hash(102)], + vec![], + vec![], + ), + MockBlock::new(hash(1)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(103)], + vec![], + vec![], + ), + MockBlock::new(hash(2)).add_ep(ENTRY_POINT_ADDRESS_V0_6, vec![], vec![], vec![]), + MockBlock::new(hash(3)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(104), hash(105)], + vec![], + vec![], + ), ]); let update = chain.sync_to_block(controller.get_head()).await.unwrap(); // With a history size of 3, we should get updates from all blocks except the first one. @@ -688,10 +870,14 @@ mod tests { latest_block_timestamp: 0.into(), earliest_remembered_block_number: 1, reorg_depth: 0, - mined_ops: vec![fake_mined_op(103), fake_mined_op(104), fake_mined_op(105),], + mined_ops: vec![ + fake_mined_op(103, ENTRY_POINT_ADDRESS_V0_6), + fake_mined_op(104, ENTRY_POINT_ADDRESS_V0_6), + fake_mined_op(105, ENTRY_POINT_ADDRESS_V0_6), + ], unmined_ops: vec![], - entity_deposits: vec![], - unmined_entity_deposits: vec![], + entity_balance_updates: vec![], + unmined_entity_balance_updates: vec![], reorg_larger_than_history: false, } ); @@ -701,15 +887,35 @@ mod tests { async fn test_simple_advance() { let (mut chain, controller) = new_chain(); controller.set_blocks(vec![ - MockBlock::new(hash(0), vec![hash(101), hash(102)], vec![]), - MockBlock::new(hash(1), vec![hash(103)], vec![]), - MockBlock::new(hash(2), vec![], vec![]), - MockBlock::new(hash(3), vec![hash(104), hash(105)], vec![]), + MockBlock::new(hash(0)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(101), hash(102)], + vec![], + vec![], + ), + MockBlock::new(hash(1)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(103)], + vec![], + vec![], + ), + MockBlock::new(hash(2)).add_ep(ENTRY_POINT_ADDRESS_V0_6, vec![], vec![], vec![]), + MockBlock::new(hash(3)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(104), hash(105)], + vec![], + vec![], + ), ]); chain.sync_to_block(controller.get_head()).await.unwrap(); controller .get_blocks_mut() - .push(MockBlock::new(hash(4), vec![hash(106)], vec![])); + .push(MockBlock::new(hash(4)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(106)], + vec![], + vec![], + )); let update = chain.sync_to_block(controller.get_head()).await.unwrap(); assert_eq!( update, @@ -719,10 +925,10 @@ mod tests { latest_block_timestamp: 0.into(), earliest_remembered_block_number: 2, reorg_depth: 0, - mined_ops: vec![fake_mined_op(106)], + mined_ops: vec![fake_mined_op(106, ENTRY_POINT_ADDRESS_V0_6)], unmined_ops: vec![], - entity_deposits: vec![], - unmined_entity_deposits: vec![], + entity_balance_updates: vec![], + unmined_entity_balance_updates: vec![], reorg_larger_than_history: false, } ); @@ -732,9 +938,24 @@ mod tests { async fn test_forward_reorg() { let (mut chain, controller) = new_chain(); controller.set_blocks(vec![ - MockBlock::new(hash(0), vec![hash(100)], vec![]), - MockBlock::new(hash(1), vec![hash(101)], vec![]), - MockBlock::new(hash(2), vec![hash(102)], vec![Address::zero()]), + MockBlock::new(hash(0)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(100)], + vec![], + vec![], + ), + MockBlock::new(hash(1)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(101)], + vec![], + vec![], + ), + MockBlock::new(hash(2)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(102)], + vec![Address::zero()], + vec![addr(1)], + ), ]); chain.sync_to_block(controller.get_head()).await.unwrap(); { @@ -742,9 +963,24 @@ mod tests { let mut blocks = controller.get_blocks_mut(); blocks.pop(); blocks.extend([ - MockBlock::new(hash(12), vec![hash(112)], vec![]), - MockBlock::new(hash(13), vec![hash(113)], vec![]), - MockBlock::new(hash(14), vec![hash(114)], vec![]), + MockBlock::new(hash(12)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(112)], + vec![], + vec![], + ), + MockBlock::new(hash(13)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(113)], + vec![], + vec![], + ), + MockBlock::new(hash(14)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(114)], + vec![], + vec![addr(3)], + ), ]); } let update = chain.sync_to_block(controller.get_head()).await.unwrap(); @@ -756,10 +992,22 @@ mod tests { latest_block_timestamp: 0.into(), earliest_remembered_block_number: 2, reorg_depth: 1, - mined_ops: vec![fake_mined_op(112), fake_mined_op(113), fake_mined_op(114)], - unmined_ops: vec![fake_mined_op(102)], - entity_deposits: vec![], - unmined_entity_deposits: vec![fake_mined_deposit(Address::zero(), 0.into())], + mined_ops: vec![ + fake_mined_op(112, ENTRY_POINT_ADDRESS_V0_6), + fake_mined_op(113, ENTRY_POINT_ADDRESS_V0_6), + fake_mined_op(114, ENTRY_POINT_ADDRESS_V0_6) + ], + unmined_ops: vec![fake_mined_op(102, ENTRY_POINT_ADDRESS_V0_6)], + entity_balance_updates: vec![fake_mined_balance_update( + addr(3), + 0.into(), + false, + ENTRY_POINT_ADDRESS_V0_6 + )], + unmined_entity_balance_updates: vec![ + fake_mined_balance_update(addr(0), 0.into(), true, ENTRY_POINT_ADDRESS_V0_6), + fake_mined_balance_update(addr(1), 0.into(), false, ENTRY_POINT_ADDRESS_V0_6), + ], reorg_larger_than_history: false, } ); @@ -769,9 +1017,24 @@ mod tests { async fn test_sideways_reorg() { let (mut chain, controller) = new_chain(); controller.set_blocks(vec![ - MockBlock::new(hash(0), vec![hash(100)], vec![]), - MockBlock::new(hash(1), vec![hash(101)], vec![addr(1)]), - MockBlock::new(hash(2), vec![hash(102)], vec![]), + MockBlock::new(hash(0)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(100)], + vec![], + vec![], + ), + MockBlock::new(hash(1)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(101)], + vec![addr(1)], + vec![addr(9)], + ), + MockBlock::new(hash(2)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(102)], + vec![], + vec![], + ), ]); chain.sync_to_block(controller.get_head()).await.unwrap(); { @@ -780,23 +1043,47 @@ mod tests { blocks.pop(); blocks.pop(); blocks.extend([ - MockBlock::new(hash(11), vec![hash(111)], vec![addr(2)]), - MockBlock::new(hash(12), vec![hash(112)], vec![]), + MockBlock::new(hash(11)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(111)], + vec![addr(2)], + vec![], + ), + MockBlock::new(hash(12)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(112)], + vec![], + vec![], + ), ]); } let update = chain.sync_to_block(controller.get_head()).await.unwrap(); assert_eq!( update, ChainUpdate { - entity_deposits: vec![fake_mined_deposit(addr(2), 0.into())], + entity_balance_updates: vec![fake_mined_balance_update( + addr(2), + 0.into(), + true, + ENTRY_POINT_ADDRESS_V0_6 + )], latest_block_number: 2, latest_block_hash: hash(12), latest_block_timestamp: 0.into(), earliest_remembered_block_number: 0, reorg_depth: 2, - mined_ops: vec![fake_mined_op(111), fake_mined_op(112)], - unmined_ops: vec![fake_mined_op(101), fake_mined_op(102)], - unmined_entity_deposits: vec![fake_mined_deposit(addr(1), 0.into())], + mined_ops: vec![ + fake_mined_op(111, ENTRY_POINT_ADDRESS_V0_6), + fake_mined_op(112, ENTRY_POINT_ADDRESS_V0_6) + ], + unmined_ops: vec![ + fake_mined_op(101, ENTRY_POINT_ADDRESS_V0_6), + fake_mined_op(102, ENTRY_POINT_ADDRESS_V0_6) + ], + unmined_entity_balance_updates: vec![ + fake_mined_balance_update(addr(1), 0.into(), true, ENTRY_POINT_ADDRESS_V0_6), + fake_mined_balance_update(addr(9), 0.into(), false, ENTRY_POINT_ADDRESS_V0_6), + ], reorg_larger_than_history: false, } ); @@ -806,9 +1093,24 @@ mod tests { async fn test_backwards_reorg() { let (mut chain, controller) = new_chain(); controller.set_blocks(vec![ - MockBlock::new(hash(0), vec![hash(100)], vec![]), - MockBlock::new(hash(1), vec![hash(101)], vec![]), - MockBlock::new(hash(2), vec![hash(102)], vec![]), + MockBlock::new(hash(0)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(100)], + vec![], + vec![], + ), + MockBlock::new(hash(1)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(101)], + vec![], + vec![], + ), + MockBlock::new(hash(2)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(102)], + vec![], + vec![], + ), ]); chain.sync_to_block(controller.get_head()).await.unwrap(); { @@ -816,21 +1118,34 @@ mod tests { let mut blocks = controller.get_blocks_mut(); blocks.pop(); blocks.pop(); - blocks.push(MockBlock::new(hash(11), vec![hash(111)], vec![addr(1)])); + blocks.push(MockBlock::new(hash(11)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(111)], + vec![addr(1)], + vec![], + )); } let update = chain.sync_to_block(controller.get_head()).await.unwrap(); assert_eq!( update, ChainUpdate { latest_block_number: 1, - entity_deposits: vec![fake_mined_deposit(addr(1), 0.into())], + entity_balance_updates: vec![fake_mined_balance_update( + addr(1), + 0.into(), + true, + ENTRY_POINT_ADDRESS_V0_6 + )], latest_block_hash: hash(11), latest_block_timestamp: 0.into(), earliest_remembered_block_number: 0, reorg_depth: 2, - mined_ops: vec![fake_mined_op(111)], - unmined_ops: vec![fake_mined_op(101), fake_mined_op(102)], - unmined_entity_deposits: vec![], + mined_ops: vec![fake_mined_op(111, ENTRY_POINT_ADDRESS_V0_6)], + unmined_ops: vec![ + fake_mined_op(101, ENTRY_POINT_ADDRESS_V0_6), + fake_mined_op(102, ENTRY_POINT_ADDRESS_V0_6) + ], + unmined_entity_balance_updates: vec![], reorg_larger_than_history: false, } ); @@ -840,32 +1155,80 @@ mod tests { async fn test_reorg_longer_than_history() { let (mut chain, controller) = new_chain(); controller.set_blocks(vec![ - MockBlock::new(hash(0), vec![hash(100)], vec![]), - MockBlock::new(hash(1), vec![hash(101)], vec![]), - MockBlock::new(hash(2), vec![hash(102)], vec![]), - MockBlock::new(hash(3), vec![hash(103)], vec![]), + MockBlock::new(hash(0)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(100)], + vec![], + vec![], + ), + MockBlock::new(hash(1)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(101)], + vec![], + vec![], + ), + MockBlock::new(hash(2)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(102)], + vec![], + vec![], + ), + MockBlock::new(hash(3)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(103)], + vec![], + vec![], + ), ]); chain.sync_to_block(controller.get_head()).await.unwrap(); // The history has size 3, so after this update it's completely unrecognizable. controller.set_blocks(vec![ - MockBlock::new(hash(0), vec![hash(100)], vec![]), - MockBlock::new(hash(11), vec![hash(111)], vec![]), - MockBlock::new(hash(12), vec![hash(112)], vec![]), - MockBlock::new(hash(13), vec![hash(113)], vec![]), + MockBlock::new(hash(0)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(100)], + vec![], + vec![], + ), + MockBlock::new(hash(11)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(111)], + vec![], + vec![], + ), + MockBlock::new(hash(12)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(112)], + vec![], + vec![], + ), + MockBlock::new(hash(13)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(113)], + vec![], + vec![], + ), ]); let update = chain.sync_to_block(controller.get_head()).await.unwrap(); assert_eq!( update, ChainUpdate { - entity_deposits: vec![], latest_block_number: 3, latest_block_hash: hash(13), latest_block_timestamp: 0.into(), earliest_remembered_block_number: 1, reorg_depth: 3, - mined_ops: vec![fake_mined_op(111), fake_mined_op(112), fake_mined_op(113)], - unmined_ops: vec![fake_mined_op(101), fake_mined_op(102), fake_mined_op(103)], - unmined_entity_deposits: vec![], + mined_ops: vec![ + fake_mined_op(111, ENTRY_POINT_ADDRESS_V0_6), + fake_mined_op(112, ENTRY_POINT_ADDRESS_V0_6), + fake_mined_op(113, ENTRY_POINT_ADDRESS_V0_6) + ], + unmined_ops: vec![ + fake_mined_op(101, ENTRY_POINT_ADDRESS_V0_6), + fake_mined_op(102, ENTRY_POINT_ADDRESS_V0_6), + fake_mined_op(103, ENTRY_POINT_ADDRESS_V0_6) + ], + entity_balance_updates: vec![], + unmined_entity_balance_updates: vec![], reorg_larger_than_history: true, } ); @@ -875,15 +1238,35 @@ mod tests { async fn test_advance_larger_than_history_size() { let (mut chain, controller) = new_chain(); controller.set_blocks(vec![ - MockBlock::new(hash(0), vec![hash(100)], vec![]), - MockBlock::new(hash(1), vec![hash(101)], vec![]), - MockBlock::new(hash(2), vec![hash(102)], vec![]), + MockBlock::new(hash(0)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(100)], + vec![], + vec![], + ), + MockBlock::new(hash(1)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(101)], + vec![], + vec![], + ), + MockBlock::new(hash(2)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(102)], + vec![], + vec![], + ), ]); chain.sync_to_block(controller.get_head()).await.unwrap(); { let mut blocks = controller.get_blocks_mut(); for i in 3..7 { - blocks.push(MockBlock::new(hash(10 + i), vec![hash(100 + i)], vec![])); + blocks.push(MockBlock::new(hash(10 + i)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(100 + i)], + vec![], + vec![], + )); } } let update = chain.sync_to_block(controller.get_head()).await.unwrap(); @@ -895,10 +1278,14 @@ mod tests { latest_block_timestamp: 0.into(), earliest_remembered_block_number: 4, reorg_depth: 0, - entity_deposits: vec![], - mined_ops: vec![fake_mined_op(104), fake_mined_op(105), fake_mined_op(106)], + entity_balance_updates: vec![], + unmined_entity_balance_updates: vec![], + mined_ops: vec![ + fake_mined_op(104, ENTRY_POINT_ADDRESS_V0_6), + fake_mined_op(105, ENTRY_POINT_ADDRESS_V0_6), + fake_mined_op(106, ENTRY_POINT_ADDRESS_V0_6) + ], unmined_ops: vec![], - unmined_entity_deposits: vec![], reorg_larger_than_history: false, } ); @@ -909,8 +1296,18 @@ mod tests { async fn test_latest_block_number_smaller_than_history_size() { let (mut chain, controller) = new_chain(); let blocks = vec![ - MockBlock::new(hash(0), vec![hash(101), hash(102)], vec![]), - MockBlock::new(hash(1), vec![hash(103)], vec![]), + MockBlock::new(hash(0)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(101), hash(102)], + vec![], + vec![], + ), + MockBlock::new(hash(1)).add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(103)], + vec![], + vec![], + ), ]; controller.set_blocks(blocks); let update = chain.sync_to_block(controller.get_head()).await.unwrap(); @@ -922,10 +1319,62 @@ mod tests { latest_block_timestamp: 0.into(), earliest_remembered_block_number: 0, reorg_depth: 0, - entity_deposits: vec![], - mined_ops: vec![fake_mined_op(101), fake_mined_op(102), fake_mined_op(103),], + mined_ops: vec![ + fake_mined_op(101, ENTRY_POINT_ADDRESS_V0_6), + fake_mined_op(102, ENTRY_POINT_ADDRESS_V0_6), + fake_mined_op(103, ENTRY_POINT_ADDRESS_V0_6), + ], unmined_ops: vec![], - unmined_entity_deposits: vec![], + entity_balance_updates: vec![], + unmined_entity_balance_updates: vec![], + reorg_larger_than_history: false, + } + ); + } + + #[tokio::test] + async fn test_mixed_event_types() { + let (mut chain, controller) = new_chain(); + controller.set_blocks(vec![MockBlock::new(hash(0)) + .add_ep( + ENTRY_POINT_ADDRESS_V0_6, + vec![hash(101), hash(102)], + vec![addr(1), addr(2)], + vec![addr(3), addr(4)], + ) + .add_ep( + ENTRY_POINT_ADDRESS_V0_7, + vec![hash(201), hash(202)], + vec![addr(5), addr(6)], + vec![addr(7), addr(8)], + )]); + let update = chain.sync_to_block(controller.get_head()).await.unwrap(); + assert_eq!( + update, + ChainUpdate { + latest_block_number: 0, + latest_block_hash: hash(0), + latest_block_timestamp: 0.into(), + earliest_remembered_block_number: 0, + reorg_depth: 0, + mined_ops: vec![ + fake_mined_op(101, ENTRY_POINT_ADDRESS_V0_6), + fake_mined_op(102, ENTRY_POINT_ADDRESS_V0_6), + fake_mined_op(201, ENTRY_POINT_ADDRESS_V0_7), + fake_mined_op(202, ENTRY_POINT_ADDRESS_V0_7), + ], + unmined_ops: vec![], + entity_balance_updates: vec![ + fake_mined_balance_update(addr(1), 0.into(), true, ENTRY_POINT_ADDRESS_V0_6), + fake_mined_balance_update(addr(2), 0.into(), true, ENTRY_POINT_ADDRESS_V0_6), + fake_mined_balance_update(addr(3), 0.into(), false, ENTRY_POINT_ADDRESS_V0_6), + fake_mined_balance_update(addr(4), 0.into(), false, ENTRY_POINT_ADDRESS_V0_6), + fake_mined_balance_update(addr(5), 0.into(), true, ENTRY_POINT_ADDRESS_V0_7), + fake_mined_balance_update(addr(6), 0.into(), true, ENTRY_POINT_ADDRESS_V0_7), + fake_mined_balance_update(addr(7), 0.into(), false, ENTRY_POINT_ADDRESS_V0_7), + fake_mined_balance_update(addr(8), 0.into(), false, ENTRY_POINT_ADDRESS_V0_7), + ], + unmined_entity_balance_updates: vec![], reorg_larger_than_history: false, } ); @@ -938,7 +1387,11 @@ mod tests { Settings { history_size: HISTORY_SIZE, poll_interval: Duration::from_secs(250), // Not used in tests. - entry_point_addresses: vec![ENTRY_POINT_ADDRESS], + entry_point_addresses: HashMap::from([ + (ENTRY_POINT_ADDRESS_V0_6, EntryPointVersion::V0_6), + (ENTRY_POINT_ADDRESS_V0_7, EntryPointVersion::V0_7), + ]), + max_sync_retries: 1, }, ); (chain, controller) @@ -968,12 +1421,12 @@ mod tests { (provider, controller) } - fn fake_log(op_hash: H256) -> Log { + fn fake_mined_log_v0_6(op_hash: H256) -> Log { Log { - address: ENTRY_POINT_ADDRESS, + address: ENTRY_POINT_ADDRESS_V0_6, topics: vec![ H256::from(utils::keccak256( - UserOperationEventFilter::abi_signature().as_bytes(), + entry_point_v0_6::UserOperationEventFilter::abi_signature().as_bytes(), )), op_hash, H256::zero(), // sender @@ -990,12 +1443,12 @@ mod tests { } } - fn fake_deposit_log(deposit_address: Address) -> Log { + fn fake_deposit_log_v0_6(deposit_address: Address) -> Log { Log { - address: ENTRY_POINT_ADDRESS, + address: ENTRY_POINT_ADDRESS_V0_6, topics: vec![ H256::from(utils::keccak256( - DepositedFilter::abi_signature().as_bytes(), + entry_point_v0_6::DepositedFilter::abi_signature().as_bytes(), )), H256::from(deposit_address), ], @@ -1007,10 +1460,85 @@ mod tests { } } - fn fake_mined_op(n: u8) -> MinedOp { + fn fake_withdrawal_log_v0_6(withdrawal_address: Address) -> Log { + Log { + address: ENTRY_POINT_ADDRESS_V0_6, + topics: vec![ + H256::from(utils::keccak256( + entry_point_v0_6::WithdrawnFilter::abi_signature().as_bytes(), + )), + H256::from(withdrawal_address), + ], + data: AbiEncode::encode(( + Address::zero(), // withdrawAddress + U256::zero(), // amount + )) + .into(), + ..Default::default() + } + } + + fn fake_mined_log_v0_7(op_hash: H256) -> Log { + Log { + address: ENTRY_POINT_ADDRESS_V0_7, + topics: vec![ + H256::from(utils::keccak256( + entry_point_v0_7::UserOperationEventFilter::abi_signature().as_bytes(), + )), + op_hash, + H256::zero(), // sender + H256::zero(), // paymaster + ], + data: AbiEncode::encode(( + U256::zero(), // nonce + true, // success + U256::zero(), // actual_gas_cost + U256::zero(), // actual_gas_used + )) + .into(), + ..Default::default() + } + } + + fn fake_deposit_log_v0_7(deposit_address: Address) -> Log { + Log { + address: ENTRY_POINT_ADDRESS_V0_7, + topics: vec![ + H256::from(utils::keccak256( + entry_point_v0_7::DepositedFilter::abi_signature().as_bytes(), + )), + H256::from(deposit_address), + ], + data: AbiEncode::encode(( + U256::zero(), // totalDeposits + )) + .into(), + ..Default::default() + } + } + + fn fake_withdrawal_log_v0_7(withdrawal_address: Address) -> Log { + Log { + address: ENTRY_POINT_ADDRESS_V0_7, + topics: vec![ + H256::from(utils::keccak256( + entry_point_v0_7::WithdrawnFilter::abi_signature().as_bytes(), + )), + H256::from(withdrawal_address), + ], + data: AbiEncode::encode(( + Address::zero(), // withdrawAddress + U256::zero(), // amount + )) + .into(), + ..Default::default() + } + } + + fn fake_mined_op(n: u8, ep: Address) -> MinedOp { MinedOp { hash: hash(n), - entry_point: ENTRY_POINT_ADDRESS, + entry_point: ep, sender: Address::zero(), nonce: U256::zero(), actual_gas_cost: U256::zero(), @@ -1018,11 +1546,17 @@ mod tests { } } - fn fake_mined_deposit(address: Address, amount: U256) -> DepositInfo { - DepositInfo { + fn fake_mined_balance_update( + address: Address, + amount: U256, + is_addition: bool, + ep: Address, + ) -> BalanceUpdate { + BalanceUpdate { address, - entrypoint: ENTRY_POINT_ADDRESS, + entrypoint: ep, amount, + is_addition, } } diff --git a/crates/pool/src/emit.rs b/crates/pool/src/emit.rs index 520a1d4b..40b4c87e 100644 --- a/crates/pool/src/emit.rs +++ b/crates/pool/src/emit.rs @@ -14,7 +14,7 @@ use std::fmt::Display; use ethers::types::{Address, H256}; -use rundler_types::{Entity, EntityType, Timestamp, UserOperation}; +use rundler_types::{Entity, EntityType, Timestamp, UserOperation, UserOperationVariant}; use rundler_utils::strs; use crate::mempool::OperationOrigin; @@ -27,7 +27,7 @@ pub enum OpPoolEvent { /// Operation hash op_hash: H256, /// The full operation - op: UserOperation, + op: UserOperationVariant, /// Block number the operation was added to the pool block_number: u64, /// Operation origin @@ -51,6 +51,11 @@ pub enum OpPoolEvent { /// The removed entity entity: Entity, }, + /// An Entity was throttled + ThrottledEntity { + /// The throttled entity + entity: Entity, + }, } /// Summary of the entities associated with an operation @@ -113,6 +118,11 @@ pub enum OpRemovalReason { /// The removed entity entity: Entity, }, + /// Op was removed because an associated entity was throttled + EntityThrottled { + /// The throttled entity + entity: Entity, + }, /// Op was removed because it expired Expired { /// Op was valid until this timestamp @@ -157,8 +167,8 @@ impl Display for OpPoolEvent { format_entity_status("Factory", entities.factory.as_ref()), format_entity_status("Paymaster", entities.paymaster.as_ref()), format_entity_status("Aggregator", entities.aggregator.as_ref()), - op.max_fee_per_gas, - op.max_priority_fee_per_gas, + op.max_fee_per_gas(), + op.max_priority_fee_per_gas(), ) } OpPoolEvent::RemovedOp { op_hash, reason } => { @@ -179,6 +189,9 @@ impl Display for OpPoolEvent { entity, ) } + OpPoolEvent::ThrottledEntity { entity } => { + write!(f, concat!("Throttled entity.", " Entity: {}",), entity,) + } } } } diff --git a/crates/pool/src/lib.rs b/crates/pool/src/lib.rs index aab69baf..8dc63e07 100644 --- a/crates/pool/src/lib.rs +++ b/crates/pool/src/lib.rs @@ -25,16 +25,12 @@ mod emit; pub use emit::OpPoolEvent as PoolEvent; mod mempool; -pub use mempool::{ - MempoolError, PoolConfig, PoolOperation, Reputation, ReputationStatus, StakeStatus, -}; +pub use mempool::PoolConfig; mod server; #[cfg(feature = "test-utils")] pub use server::MockPoolServer; -pub use server::{ - LocalPoolBuilder, LocalPoolHandle, PoolResult, PoolServer, PoolServerError, RemotePoolClient, -}; +pub use server::{LocalPoolBuilder, LocalPoolHandle, RemotePoolClient}; mod task; pub use task::{Args as PoolTaskArgs, PoolTask}; diff --git a/crates/pool/src/mempool/error.rs b/crates/pool/src/mempool/error.rs deleted file mode 100644 index 5fed9ab6..00000000 --- a/crates/pool/src/mempool/error.rs +++ /dev/null @@ -1,111 +0,0 @@ -// This file is part of Rundler. -// -// Rundler is free software: you can redistribute it and/or modify it under the -// terms of the GNU Lesser General Public License as published by the Free Software -// Foundation, either version 3 of the License, or (at your option) any later version. -// -// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; -// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -// See the GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License along with Rundler. -// If not, see https://www.gnu.org/licenses/. - -use std::mem; - -use ethers::{abi::Address, types::U256}; -use rundler_sim::{ - PrecheckError, PrecheckViolation, SimulationError, SimulationViolation, ViolationError, -}; -use rundler_types::Entity; - -/// Mempool result type. -pub(crate) type MempoolResult = std::result::Result; - -/// Mempool error type. -#[derive(Debug, thiserror::Error)] -pub enum MempoolError { - /// Some other error occurred - #[error(transparent)] - Other(#[from] anyhow::Error), - /// Operation with the same hash already in pool - #[error("Operation already known")] - OperationAlreadyKnown, - /// Operation with same sender/nonce already in pool - /// and the replacement operation has lower gas price. - #[error("Replacement operation underpriced. Existing priority fee: {0}. Existing fee: {1}")] - ReplacementUnderpriced(U256, U256), - /// Max operations reached for unstaked sender [UREP-010] or unstaked non-sender entity [UREP-020] - #[error("Max operations ({0}) reached for entity {1}")] - MaxOperationsReached(usize, Address), - /// Multiple roles violation - /// Spec rule: STO-040 - #[error("A {} at {} in this UserOperation is used as a sender entity in another UserOperation currently in mempool.", .0.kind, .0.address)] - MultipleRolesViolation(Entity), - /// An associated storage slot that is accessed in the UserOperation is being used as a sender by another UserOperation in the mempool. - /// Spec rule: STO-041 - #[error("An associated storage slot that is accessed in the UserOperation is being used as a sender by another UserOperation in the mempool")] - AssociatedStorageIsAlternateSender, - /// Sender address used as different entity in another UserOperation currently in the mempool. - /// Spec rule: STO-040 - #[error("The sender address {0} is used as a different entity in another UserOperation currently in mempool")] - SenderAddressUsedAsAlternateEntity(Address), - /// An entity associated with the operation is throttled/banned. - #[error("Entity {0} is throttled/banned")] - EntityThrottled(Entity), - /// Operation was discarded on inserting due to size limit - #[error("Operation was discarded on inserting")] - DiscardedOnInsert, - /// Paymaster balance too low - /// Spec rule: EREP-010 - #[error("Paymaster balance too low. Required balance: {0}. Current balance {1}")] - PaymasterBalanceTooLow(U256, U256), - /// Operation was rejected due to a precheck violation - #[error("Operation violation during precheck {0}")] - PrecheckViolation(PrecheckViolation), - /// Operation was rejected due to a simulation violation - #[error("Operation violation during simulation {0}")] - SimulationViolation(SimulationViolation), - /// Operation was rejected because it used an unsupported aggregator - #[error("Unsupported aggregator {0}")] - UnsupportedAggregator(Address), - /// An unknown entry point was specified - #[error("Unknown entry point {0}")] - UnknownEntryPoint(Address), -} - -impl From for MempoolError { - fn from(mut error: SimulationError) -> Self { - let SimulationError { - violation_error, .. - } = &mut error; - let ViolationError::Violations(violations) = violation_error else { - return Self::Other((*violation_error).clone().into()); - }; - - let Some(violation) = violations.iter_mut().min() else { - return Self::Other((*violation_error).clone().into()); - }; - - // extract violation and replace with dummy - Self::SimulationViolation(mem::replace(violation, SimulationViolation::DidNotRevert)) - } -} - -impl From for MempoolError { - fn from(mut error: PrecheckError) -> Self { - let PrecheckError::Violations(violations) = &mut error else { - return Self::Other(error.into()); - }; - - let Some(violation) = violations.iter_mut().min() else { - return Self::Other(error.into()); - }; - - // extract violation and replace with dummy - Self::PrecheckViolation(mem::replace( - violation, - PrecheckViolation::InitCodeTooShort(0), - )) - } -} diff --git a/crates/pool/src/mempool/mod.rs b/crates/pool/src/mempool/mod.rs index 036e26e8..d6b57e7b 100644 --- a/crates/pool/src/mempool/mod.rs +++ b/crates/pool/src/mempool/mod.rs @@ -11,20 +11,16 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -mod error; -pub use error::MempoolError; - mod entity_tracker; mod pool; mod reputation; -pub(crate) use reputation::{HourlyMovingAverageReputation, ReputationParams}; -pub use reputation::{Reputation, ReputationStatus}; -use rundler_provider::ProviderResult; +pub(crate) use reputation::{AddressReputation, ReputationParams}; mod size; mod paymaster; +pub(crate) use paymaster::{PaymasterConfig, PaymasterTracker}; mod uo_pool; use std::{ @@ -32,17 +28,23 @@ use std::{ sync::Arc, }; -use ethers::types::{Address, H256, U256}; +use ethers::types::{Address, H256}; #[cfg(test)] use mockall::automock; -use rundler_sim::{EntityInfos, MempoolConfig, PrecheckSettings, SimulationSettings}; -use rundler_types::{Entity, EntityType, EntityUpdate, UserOperation, ValidTimeRange}; +use rundler_sim::{MempoolConfig, PrecheckSettings, SimulationSettings}; +use rundler_types::{ + pool::{ + MempoolError, PaymasterMetadata, PoolOperation, Reputation, ReputationStatus, StakeStatus, + }, + EntityUpdate, EntryPointVersion, UserOperationId, UserOperationVariant, +}; use tonic::async_trait; pub(crate) use uo_pool::UoPool; -use self::error::MempoolResult; use super::chain::ChainUpdate; +pub(crate) type MempoolResult = std::result::Result; + #[cfg_attr(test, automock)] #[async_trait] /// In-memory operation pool @@ -53,22 +55,25 @@ pub trait Mempool: Send + Sync + 'static { /// Returns the entry point address this pool targets. fn entry_point(&self) -> Address; + /// Returns the entry point version this pool targets. + fn entry_point_version(&self) -> EntryPointVersion; + /// Adds a user operation to the pool async fn add_operation( &self, origin: OperationOrigin, - op: UserOperation, + op: UserOperationVariant, ) -> MempoolResult; /// Removes a set of operations from the pool. fn remove_operations(&self, hashes: &[H256]); + /// Removes an operation from the pool by its ID. + fn remove_op_by_id(&self, id: &UserOperationId) -> MempoolResult>; + /// Updates the reputation of an entity. fn update_entity(&self, entity_update: EntityUpdate); - /// Returns current paymaster balance - async fn paymaster_balance(&self, paymaster: Address) -> ProviderResult; - /// Returns the best operations from the pool. /// /// Returns the best operations from the pool based on their gas bids up to @@ -92,11 +97,14 @@ pub trait Mempool: Send + Sync + 'static { /// Debug methods /// Clears the mempool of UOs or reputation of all addresses - fn clear_state(&self, clear_mempool: bool, clear_reputation: bool); + fn clear_state(&self, clear_mempool: bool, clear_paymaster: bool, clear_reputation: bool); /// Dumps the mempool's reputation tracking fn dump_reputation(&self) -> Vec; + /// Dumps the mempool's paymaster balance cache + fn dump_paymaster_balances(&self) -> Vec; + /// Dumps the mempool's reputation tracking fn get_reputation_status(&self, address: Address) -> ReputationStatus; @@ -106,8 +114,11 @@ pub trait Mempool: Send + Sync + 'static { /// Get stake status for address async fn get_stake_status(&self, address: Address) -> MempoolResult; - /// Reset paymater state + /// Reset paymaster state async fn reset_confirmed_paymaster_balances(&self) -> MempoolResult<()>; + + /// Turns on and off tracking errors + fn set_tracking(&self, paymaster: bool, reputation: bool); } /// Config for the mempool @@ -115,6 +126,8 @@ pub trait Mempool: Send + Sync + 'static { pub struct PoolConfig { /// Address of the entry point this pool targets pub entry_point: Address, + /// Version of the entry point this pool targets + pub entry_point_version: EntryPointVersion, /// Chain ID this pool targets pub chain_id: u64, /// The maximum number of operations an unstaked sender can have in the mempool @@ -143,23 +156,14 @@ pub struct PoolConfig { pub throttled_entity_mempool_count: u64, /// The maximum number of blocks a user operation with a throttled entity can stay in the mempool pub throttled_entity_live_blocks: u64, -} - -/// Stake status structure -#[derive(Debug, Clone, Copy)] -pub struct StakeStatus { - /// Address is staked - pub is_staked: bool, - /// Stake information about address - pub stake_info: StakeInfo, -} - -#[derive(Debug, Clone, Copy)] -pub struct StakeInfo { - /// Stake ammount - pub stake: u128, - /// Unstake delay in seconds - pub unstake_delay_sec: u32, + /// Boolean field used to toggle the operation of the paymaster tracker + pub paymaster_tracking_enabled: bool, + /// Number of paymaster balances to cache + pub paymaster_cache_length: u32, + /// Boolean field used to toggle the operation of the reputation tracker + pub reputation_tracking_enabled: bool, + /// The minimum number of blocks a user operation must be in the mempool before it can be dropped + pub drop_min_num_blocks: u64, } /// Origin of an operation. @@ -175,109 +179,11 @@ pub enum OperationOrigin { ReturnedAfterReorg, } -/// A user operation with additional metadata from validation. -#[derive(Debug, Default, Clone, Eq, PartialEq)] -pub struct PoolOperation { - /// The user operation stored in the pool - pub uo: UserOperation, - /// The entry point address for this operation - pub entry_point: Address, - /// The aggregator address for this operation, if any. - pub aggregator: Option
, - /// The valid time range for this operation. - pub valid_time_range: ValidTimeRange, - /// The expected code hash for all contracts accessed during validation for this operation. - pub expected_code_hash: H256, - /// The block hash simulation was completed at - pub sim_block_hash: H256, - /// The block number simulation was completed at - pub sim_block_number: u64, - /// List of entities that need to stake for this operation. - pub entities_needing_stake: Vec, - /// Whether the account is staked. - pub account_is_staked: bool, - /// Staking information about all the entities. - pub entity_infos: EntityInfos, -} - -#[derive(Debug, Default, Clone, Eq, PartialEq, Copy)] -pub struct PaymasterMetadata { - /// Paymaster address - pub address: Address, - /// The on-chain balance of the paymaster - pub confirmed_balance: U256, - /// The pending balance is the confirm balance subtracted by - /// the max cost of all the pending user operations that use the paymaster - pub pending_balance: U256, -} - -impl PoolOperation { - /// Returns true if the operation contains the given entity. - pub fn contains_entity(&self, entity: &Entity) -> bool { - if let Some(e) = self.entity_infos.get(entity.kind) { - e.address == entity.address - } else { - false - } - } - - /// Returns true if the operation requires the given entity to stake. - /// - /// For non-accounts, its possible that the entity is staked, but doesn't - /// _need_ to stake for this operation. For example, if the operation does not - /// access any storage slots that require staking. In that case this function - /// will return false. - /// - /// For staked accounts, this function will always return true. Staked accounts - /// are able to circumvent the mempool operation limits always need their reputation - /// checked to prevent them from filling the pool. - pub fn requires_stake(&self, entity: EntityType) -> bool { - match entity { - EntityType::Account => self.account_is_staked, - _ => self.entities_needing_stake.contains(&entity), - } - } - - /// Returns an iterator over all entities that are included in this operation. - pub fn entities(&'_ self) -> impl Iterator + '_ { - self.entity_infos - .entities() - .map(|(t, entity)| Entity::new(t, entity.address)) - } - - /// Returns an iterator over all entities that need stake in this operation. This can be a subset of entities that are staked in the operation. - pub fn entities_requiring_stake(&'_ self) -> impl Iterator + '_ { - self.entity_infos.entities().filter_map(|(t, entity)| { - if self.requires_stake(t) { - Entity::new(t, entity.address).into() - } else { - None - } - }) - } - - /// Return all the unstaked entities that are used in this operation. - pub fn unstaked_entities(&'_ self) -> impl Iterator + '_ { - self.entity_infos.entities().filter_map(|(t, entity)| { - if entity.is_staked { - None - } else { - Entity::new(t, entity.address).into() - } - }) - } - - /// Compute the amount of heap memory the PoolOperation takes up. - pub fn mem_size(&self) -> usize { - std::mem::size_of::() - + self.uo.heap_size() - + self.entities_needing_stake.len() * std::mem::size_of::() - } -} - #[cfg(test)] mod tests { - use rundler_sim::EntityInfo; + use rundler_types::{ + v0_6::UserOperation, Entity, EntityInfo, EntityInfos, EntityType, ValidTimeRange, + }; use super::*; @@ -294,40 +200,35 @@ mod tests { paymaster_and_data: paymaster.as_fixed_bytes().into(), init_code: factory.as_fixed_bytes().into(), ..Default::default() - }, + } + .into(), entry_point: Address::random(), aggregator: Some(aggregator), valid_time_range: ValidTimeRange::all_time(), expected_code_hash: H256::random(), sim_block_hash: H256::random(), sim_block_number: 0, - entities_needing_stake: vec![EntityType::Account, EntityType::Aggregator], account_is_staked: true, entity_infos: EntityInfos { factory: Some(EntityInfo { - address: factory, + entity: Entity::factory(factory), is_staked: false, }), sender: EntityInfo { - address: sender, + entity: Entity::account(sender), is_staked: false, }, paymaster: Some(EntityInfo { - address: paymaster, + entity: Entity::paymaster(paymaster), is_staked: false, }), aggregator: Some(EntityInfo { - address: aggregator, + entity: Entity::aggregator(aggregator), is_staked: false, }), }, }; - assert!(po.requires_stake(EntityType::Account)); - assert!(!po.requires_stake(EntityType::Paymaster)); - assert!(!po.requires_stake(EntityType::Factory)); - assert!(po.requires_stake(EntityType::Aggregator)); - let entities = po.entities().collect::>(); assert_eq!(entities.len(), 4); for e in entities { diff --git a/crates/pool/src/mempool/paymaster.rs b/crates/pool/src/mempool/paymaster.rs index cef98bb0..af2a8a48 100644 --- a/crates/pool/src/mempool/paymaster.rs +++ b/crates/pool/src/mempool/paymaster.rs @@ -17,50 +17,276 @@ use std::collections::HashMap; use anyhow::Context; use ethers::{abi::Address, types::U256}; -use rundler_types::UserOperationId; +use parking_lot::RwLock; +use rundler_provider::EntryPoint; +use rundler_types::{ + pool::{MempoolError, PaymasterMetadata, PoolOperation, StakeStatus}, + StakeInfo, UserOperation, UserOperationId, UserOperationVariant, +}; +use rundler_utils::cache::LruMap; -use super::{error::MempoolResult, PaymasterMetadata}; -use crate::{chain::MinedOp, MempoolError, PoolOperation}; +use super::MempoolResult; +use crate::chain::MinedOp; /// Keeps track of current and pending paymaster balances -#[derive(Debug, Clone, PartialEq, Eq, Default)] -pub(crate) struct PaymasterTracker { - /// map for userop based on id +#[derive(Debug)] +pub(crate) struct PaymasterTracker { + entry_point: E, + state: RwLock, + config: PaymasterConfig, +} + +#[derive(Debug)] +pub(crate) struct PaymasterConfig { + min_stake_value: u128, + min_unstake_delay: u32, + tracker_enabled: bool, + cache_length: u32, +} + +impl PaymasterConfig { + pub(crate) fn new( + min_stake_value: u128, + min_unstake_delay: u32, + tracker_enabled: bool, + cache_length: u32, + ) -> Self { + Self { + min_stake_value, + min_unstake_delay, + tracker_enabled, + cache_length, + } + } +} + +impl PaymasterTracker +where + E: EntryPoint, +{ + pub(crate) fn new(entry_point: E, config: PaymasterConfig) -> Self { + Self { + entry_point, + state: RwLock::new(PaymasterTrackerInner::new( + config.tracker_enabled, + config.cache_length, + )), + config, + } + } + + pub(crate) async fn get_stake_status(&self, address: Address) -> MempoolResult { + let deposit_info = self.entry_point.get_deposit_info(address).await?; + + let is_staked = deposit_info.stake.ge(&self.config.min_stake_value) + && deposit_info + .unstake_delay_sec + .ge(&self.config.min_unstake_delay); + + let stake_status = StakeStatus { + stake_info: StakeInfo { + stake: deposit_info.stake.into(), + unstake_delay_sec: deposit_info.unstake_delay_sec.into(), + }, + is_staked, + }; + + Ok(stake_status) + } + + pub(crate) async fn paymaster_balance( + &self, + paymaster: Address, + ) -> MempoolResult { + if self.state.read().paymaster_exists(paymaster) { + let meta = self + .state + .read() + .paymaster_metadata(paymaster) + .context("Paymaster balance should not be empty if address exists in pool")?; + + return Ok(meta); + } + + let balance = self + .entry_point + .balance_of(paymaster, None) + .await + .context("Paymaster balance should not be empty if address exists in pool")?; + + let paymaster_meta = PaymasterMetadata { + address: paymaster, + pending_balance: balance, + confirmed_balance: balance, + }; + + // Save paymaster balance after first lookup + self.state + .write() + .add_new_paymaster(paymaster, balance, 0.into()); + + Ok(paymaster_meta) + } + + pub(crate) async fn check_operation_cost( + &self, + op: &UserOperationVariant, + ) -> MempoolResult<()> { + if let Some(paymaster) = op.paymaster() { + let balance = self.paymaster_balance(paymaster).await?; + self.state.read().check_operation_cost(op, &balance)? + } + + Ok(()) + } + + pub(crate) fn clear(&self) { + self.state.write().clear(); + } + + pub(crate) fn dump_paymaster_metadata(&self) -> Vec { + self.state.read().dump_paymaster_metadata() + } + + pub(crate) fn set_tracking(&self, tracking_enabled: bool) { + self.state.write().set_tracking(tracking_enabled); + } + + pub(crate) async fn reset_confirmed_balances_for( + &self, + addresses: &[Address], + ) -> MempoolResult<()> { + let balances = self.entry_point.get_balances(addresses.to_vec()).await?; + + self.state + .write() + .set_confimed_balances(addresses, &balances); + + Ok(()) + } + + pub(crate) async fn reset_confirmed_balances(&self) -> MempoolResult<()> { + let paymaster_addresses = self.paymaster_addresses(); + + let balances = self + .entry_point + .get_balances(paymaster_addresses.clone()) + .await?; + + self.state + .write() + .set_confimed_balances(&paymaster_addresses, &balances); + + Ok(()) + } + + pub(crate) fn update_paymaster_balance_from_mined_op(&self, mined_op: &MinedOp) { + self.state + .write() + .update_paymaster_balance_from_mined_op(mined_op); + } + + pub(crate) fn remove_operation(&self, id: &UserOperationId) { + self.state.write().remove_operation(id); + } + + pub(crate) fn paymaster_addresses(&self) -> Vec
{ + self.state.read().paymaster_addresses() + } + + pub(crate) fn unmine_actual_cost(&self, paymaster: &Address, actual_cost: U256) { + self.state + .write() + .unmine_actual_cost(paymaster, actual_cost); + } + + pub(crate) async fn add_or_update_balance(&self, po: &PoolOperation) -> MempoolResult<()> { + if let Some(paymaster) = po.uo.paymaster() { + let paymaster_metadata = self.paymaster_balance(paymaster).await?; + return self + .state + .write() + .add_or_update_balance(po, &paymaster_metadata); + } + + Ok(()) + } +} + +// Keeps track of current and pending paymaster balances +#[derive(Debug)] +struct PaymasterTrackerInner { + // map for userop based on id user_op_fees: HashMap, - /// map for paymaster balance status - paymaster_balances: HashMap, + // map for paymaster balance status + paymaster_balances: LruMap, + // boolean for operation of tracker + tracker_enabled: bool, } -impl PaymasterTracker { - pub(crate) fn new() -> Self { +impl PaymasterTrackerInner { + fn new(tracker_enabled: bool, cache_size: u32) -> Self { Self { - ..Default::default() + user_op_fees: HashMap::new(), + tracker_enabled, + paymaster_balances: LruMap::new(cache_size), } } - pub(crate) fn paymaster_exists(&self, paymaster: Address) -> bool { - self.paymaster_balances.contains_key(&paymaster) + fn paymaster_exists(&self, paymaster: Address) -> bool { + self.paymaster_balances.peek(&paymaster).is_some() + } + + fn set_tracking(&mut self, tracking_enabled: bool) { + self.tracker_enabled = tracking_enabled; + } + + fn check_operation_cost( + &self, + op: &UserOperationVariant, + paymaster_metadata: &PaymasterMetadata, + ) -> MempoolResult<()> { + let max_op_cost = op.max_gas_cost(); + + if let Some(prev) = self.user_op_fees.get(&op.id()) { + let reset_balance = paymaster_metadata + .pending_balance + .saturating_add(prev.max_op_cost); + + if reset_balance.lt(&max_op_cost) { + return Err(MempoolError::PaymasterBalanceTooLow( + max_op_cost, + reset_balance, + )); + } + } else if paymaster_metadata.pending_balance.lt(&max_op_cost) { + return Err(MempoolError::PaymasterBalanceTooLow( + max_op_cost, + paymaster_metadata.pending_balance, + )); + } + + Ok(()) } - pub(crate) fn clear(&mut self) { + fn clear(&mut self) { self.user_op_fees.clear(); self.paymaster_balances.clear(); } - pub(crate) fn set_confimed_balances(&mut self, addresses: &[Address], balances: &[U256]) { + fn set_confimed_balances(&mut self, addresses: &[Address], balances: &[U256]) { for (i, address) in addresses.iter().enumerate() { - if let Some(paymaster_balance) = self.paymaster_balances.get_mut(address) { + if let Some(paymaster_balance) = self.paymaster_balances.get(address) { paymaster_balance.confirmed = balances[i]; } } } - //TODO track if paymaster has become stale and can be removed from the pool - pub(crate) fn update_paymaster_balance_from_mined_op(&mut self, mined_op: &MinedOp) { + fn update_paymaster_balance_from_mined_op(&mut self, mined_op: &MinedOp) { let id = mined_op.id(); if let Some(op_fee) = self.user_op_fees.get(&id) { - if let Some(paymaster_balance) = self.paymaster_balances.get_mut(&op_fee.paymaster) { + if let Some(paymaster_balance) = self.paymaster_balances.get(&op_fee.paymaster) { paymaster_balance.confirmed = paymaster_balance .confirmed .saturating_sub(mined_op.actual_gas_cost); @@ -73,9 +299,9 @@ impl PaymasterTracker { } } - pub(crate) fn remove_operation(&mut self, id: &UserOperationId) { + fn remove_operation(&mut self, id: &UserOperationId) { if let Some(op_fee) = self.user_op_fees.get(id) { - if let Some(paymaster_balance) = self.paymaster_balances.get_mut(&op_fee.paymaster) { + if let Some(paymaster_balance) = self.paymaster_balances.get(&op_fee.paymaster) { paymaster_balance.pending = paymaster_balance.pending.saturating_sub(op_fee.max_op_cost); } @@ -84,36 +310,14 @@ impl PaymasterTracker { } } - pub(crate) fn paymaster_addresses(&self) -> Vec
{ - let keys: Vec
= self.paymaster_balances.keys().cloned().collect(); + fn paymaster_addresses(&self) -> Vec
{ + let keys: Vec
= self.paymaster_balances.iter().map(|(k, _)| *k).collect(); keys } - pub(crate) fn update_paymaster_balance_after_deposit_reorg( - &mut self, - paymaster: Address, - deposit_amount: U256, - ) { - if let Some(paymaster_balance) = self.paymaster_balances.get_mut(&paymaster) { - paymaster_balance.confirmed = - paymaster_balance.confirmed.saturating_sub(deposit_amount); - } - } - - pub(crate) fn update_paymaster_balance_from_deposit( - &mut self, - paymaster: Address, - deposit_amount: U256, - ) { - if let Some(paymaster_balance) = self.paymaster_balances.get_mut(&paymaster) { - paymaster_balance.confirmed = - paymaster_balance.confirmed.saturating_add(deposit_amount); - } - } - - pub(crate) fn paymaster_metadata(&self, paymaster: Address) -> Option { - if let Some(paymaster_balance) = self.paymaster_balances.get(&paymaster) { + fn paymaster_metadata(&self, paymaster: Address) -> Option { + if let Some(paymaster_balance) = self.paymaster_balances.peek(&paymaster) { return Some(PaymasterMetadata { pending_balance: paymaster_balance.pending_balance(), confirmed_balance: paymaster_balance.confirmed, @@ -124,13 +328,24 @@ impl PaymasterTracker { None } - pub(crate) fn unmine_actual_cost(&mut self, paymaster: &Address, actual_cost: U256) { - if let Some(paymaster_balance) = self.paymaster_balances.get_mut(paymaster) { + fn dump_paymaster_metadata(&self) -> Vec { + self.paymaster_balances + .iter() + .map(|(address, balance)| PaymasterMetadata { + pending_balance: balance.pending_balance(), + confirmed_balance: balance.confirmed, + address: *address, + }) + .collect() + } + + fn unmine_actual_cost(&mut self, paymaster: &Address, actual_cost: U256) { + if let Some(paymaster_balance) = self.paymaster_balances.get(paymaster) { paymaster_balance.confirmed = paymaster_balance.confirmed.saturating_add(actual_cost); } } - pub(crate) fn add_or_update_balance( + fn add_or_update_balance( &mut self, po: &PoolOperation, paymaster_metadata: &PaymasterMetadata, @@ -138,7 +353,8 @@ impl PaymasterTracker { let id = po.uo.id(); let max_op_cost = po.uo.max_gas_cost(); - if paymaster_metadata.pending_balance.lt(&max_op_cost) { + // Only return an error if tracking is enabled + if paymaster_metadata.pending_balance.lt(&max_op_cost) && self.tracker_enabled { return Err(MempoolError::PaymasterBalanceTooLow( max_op_cost, paymaster_metadata.pending_balance, @@ -162,17 +378,10 @@ impl PaymasterTracker { &mut self, paymaster: &Address, previous_max_op_cost: U256, - ) -> MempoolResult<()> { - let prev_paymaster_balance = self - .paymaster_balances - .get_mut(paymaster) - .context("Previous paymaster must be valid to update")?; - - prev_paymaster_balance.pending = prev_paymaster_balance - .pending - .saturating_sub(previous_max_op_cost); - - Ok(()) + ) { + if let Some(pb) = self.paymaster_balances.get(paymaster) { + pb.pending = pb.pending.saturating_sub(previous_max_op_cost); + }; } fn replace_existing_user_op( @@ -191,15 +400,13 @@ impl PaymasterTracker { *existing_user_op = UserOpFees::new(paymaster_metadata.address, max_op_cost); - if let Some(paymaster_balance) = - self.paymaster_balances.get_mut(&paymaster_metadata.address) - { + if let Some(paymaster_balance) = self.paymaster_balances.get(&paymaster_metadata.address) { // check to see if paymaster has changed if prev_paymaster.ne(&paymaster_metadata.address) { paymaster_balance.pending = paymaster_balance.pending.saturating_add(max_op_cost); //remove previous limit from data - self.decrement_previous_paymaster_balance(&prev_paymaster, prev_max_op_cost)?; + self.decrement_previous_paymaster_balance(&prev_paymaster, prev_max_op_cost); } else { paymaster_balance.pending = paymaster_balance .pending @@ -210,12 +417,13 @@ impl PaymasterTracker { // check to see if paymaster has changed if prev_paymaster.ne(&paymaster_metadata.address) { //remove previous limit from data - self.decrement_previous_paymaster_balance(&prev_paymaster, prev_max_op_cost)?; + self.decrement_previous_paymaster_balance(&prev_paymaster, prev_max_op_cost); } - self.paymaster_balances.insert( + self.add_new_paymaster( paymaster_metadata.address, - PaymasterBalance::new(paymaster_metadata.confirmed_balance, max_op_cost), + paymaster_metadata.confirmed_balance, + max_op_cost, ); } @@ -233,17 +441,28 @@ impl PaymasterTracker { UserOpFees::new(paymaster_metadata.address, max_op_cost), ); - if let Some(paymaster_balance) = - self.paymaster_balances.get_mut(&paymaster_metadata.address) - { + if let Some(paymaster_balance) = self.paymaster_balances.get(&paymaster_metadata.address) { paymaster_balance.pending = paymaster_balance.pending.saturating_add(max_op_cost); } else { - self.paymaster_balances.insert( + self.add_new_paymaster( paymaster_metadata.address, - PaymasterBalance::new(paymaster_metadata.confirmed_balance, max_op_cost), + paymaster_metadata.confirmed_balance, + max_op_cost, ); } } + + fn add_new_paymaster( + &mut self, + address: Address, + confirmed_balance: U256, + inital_pending_balance: U256, + ) { + self.paymaster_balances.insert( + address, + PaymasterBalance::new(confirmed_balance, inital_pending_balance), + ); + } } #[derive(Debug, Clone, PartialEq, Eq, Default)] @@ -280,158 +499,215 @@ impl PaymasterBalance { #[cfg(test)] mod tests { use ethers::types::{Address, H256, U256}; - use rundler_sim::EntityInfos; - use rundler_types::{UserOperation, UserOperationId, ValidTimeRange}; - - use crate::{ - mempool::{ - paymaster::{PaymasterBalance, PaymasterTracker, UserOpFees}, - PaymasterMetadata, - }, - PoolOperation, + use rundler_provider::{DepositInfo, MockEntryPointV0_6}; + use rundler_types::{ + pool::{PaymasterMetadata, PoolOperation}, + v0_6::UserOperation, + EntityInfos, UserOperation as UserOperationTrait, UserOperationId, ValidTimeRange, }; + use super::*; + use crate::mempool::paymaster::PaymasterTracker; + fn demo_pool_op(uo: UserOperation) -> PoolOperation { PoolOperation { - uo, + uo: uo.into(), entry_point: Address::random(), aggregator: None, valid_time_range: ValidTimeRange::all_time(), expected_code_hash: H256::random(), sim_block_hash: H256::random(), - entities_needing_stake: vec![], account_is_staked: true, entity_infos: EntityInfos::default(), sim_block_number: 0, } } - #[test] - fn new_uo_unused_paymaster() { - let mut paymaster_tracker = PaymasterTracker::new(); + #[tokio::test] + async fn new_uo_unused_paymaster() { + let paymaster_tracker = new_paymaster_tracker(); let paymaster = Address::random(); let sender = Address::random(); - let paymaster_balance = U256::from(100000000); - let confirmed_balance = U256::from(100000000); let uo = UserOperation { sender, call_gas_limit: 10.into(), pre_verification_gas: 10.into(), + paymaster_and_data: paymaster.as_bytes().to_vec().into(), verification_gas_limit: 10.into(), max_fee_per_gas: 1.into(), ..Default::default() }; let uo_max_cost = uo.clone().max_gas_cost(); - let paymaster_meta = PaymasterMetadata { - address: paymaster, - pending_balance: paymaster_balance, - confirmed_balance, - }; let po = demo_pool_op(uo); - let res = paymaster_tracker.add_or_update_balance(&po, &paymaster_meta); + let res = paymaster_tracker.add_or_update_balance(&po).await; assert!(res.is_ok()); + let balance = paymaster_tracker + .paymaster_balance(paymaster) + .await + .unwrap(); + + assert_eq!(balance.confirmed_balance, 1000.into(),); + assert_eq!( - paymaster_tracker - .paymaster_balances - .get(&paymaster) - .unwrap() - .confirmed, - paymaster_balance, - ); - assert_eq!( - paymaster_tracker - .paymaster_balances - .get(&paymaster) - .unwrap() - .pending, - uo_max_cost, + balance.pending_balance, + balance.confirmed_balance.saturating_sub(uo_max_cost), ); } - #[test] - fn new_uo_not_enough_balance() { - let mut paymaster_tracker = PaymasterTracker::new(); + #[tokio::test] + async fn new_uo_not_enough_balance() { + let paymaster_tracker = new_paymaster_tracker(); let paymaster = Address::random(); let sender = Address::random(); let paymaster_balance = U256::from(5); let confirmed_balance = U256::from(5); + + paymaster_tracker.add_new_paymaster(paymaster, confirmed_balance, paymaster_balance); + let uo = UserOperation { sender, call_gas_limit: 10.into(), + paymaster_and_data: paymaster.as_bytes().to_vec().into(), pre_verification_gas: 10.into(), verification_gas_limit: 10.into(), max_fee_per_gas: 1.into(), ..Default::default() }; - let paymaster_meta = PaymasterMetadata { - address: paymaster, - pending_balance: paymaster_balance, - confirmed_balance, + let po = demo_pool_op(uo); + + let res = paymaster_tracker.add_or_update_balance(&po).await; + + assert!(res.is_err()); + } + + #[tokio::test] + async fn new_uo_not_enough_balance_tracking_disabled() { + let paymaster_tracker = new_paymaster_tracker(); + paymaster_tracker.set_tracking(false); + + let paymaster = Address::random(); + let sender = Address::random(); + let pending_op_cost = U256::from(5); + let confirmed_balance = U256::from(5); + let uo = UserOperation { + sender, + call_gas_limit: 10.into(), + pre_verification_gas: 10.into(), + verification_gas_limit: 10.into(), + max_fee_per_gas: 1.into(), + ..Default::default() }; let po = demo_pool_op(uo); - let res = paymaster_tracker.add_or_update_balance(&po, &paymaster_meta); - assert!(res.is_err()); + paymaster_tracker.add_new_paymaster(paymaster, confirmed_balance, pending_op_cost); + + let res = paymaster_tracker.add_or_update_balance(&po).await; + assert!(res.is_ok()); } - #[test] - fn new_uo_not_enough_balance_existing_paymaster() { - let mut paymaster_tracker = PaymasterTracker::new(); + #[tokio::test] + async fn new_uo_not_enough_balance_existing_paymaster() { + let paymaster_tracker = new_paymaster_tracker(); let paymaster = Address::random(); let sender = Address::random(); let paymaster_balance = U256::from(100); let pending_paymaster_balance = U256::from(10); - paymaster_tracker.paymaster_balances.insert( + paymaster_tracker.add_new_paymaster( paymaster, - PaymasterBalance { - pending: pending_paymaster_balance, - confirmed: paymaster_balance, - }, + paymaster_balance, + pending_paymaster_balance, ); let uo = UserOperation { sender, call_gas_limit: 100.into(), + paymaster_and_data: paymaster.as_bytes().to_vec().into(), pre_verification_gas: 100.into(), verification_gas_limit: 100.into(), max_fee_per_gas: 1.into(), ..Default::default() }; - let paymaster_meta = PaymasterMetadata { - address: paymaster, - pending_balance: paymaster_balance, - confirmed_balance: paymaster_balance, - }; - let po = demo_pool_op(uo); - let res = paymaster_tracker.add_or_update_balance(&po, &paymaster_meta); + let res = paymaster_tracker.add_or_update_balance(&po).await; assert!(res.is_err()); } - #[test] - fn new_uo_existing_paymaster_valid_balance() { - let mut paymaster_tracker = PaymasterTracker::new(); + #[tokio::test] + async fn test_reset_balances() { + let paymaster_tracker = new_paymaster_tracker(); + + let paymaster_0 = Address::random(); + let paymaster_0_confimed = 1000.into(); + + paymaster_tracker.add_new_paymaster(paymaster_0, paymaster_0_confimed, 0.into()); + + let balance_0 = paymaster_tracker + .paymaster_balance(paymaster_0) + .await + .unwrap(); + + assert_eq!(balance_0.confirmed_balance, 1000.into()); + + let _ = paymaster_tracker.reset_confirmed_balances().await; + + let balance_0 = paymaster_tracker + .paymaster_balance(paymaster_0) + .await + .unwrap(); + + assert_eq!(balance_0.confirmed_balance, 50.into()); + } + + #[tokio::test] + async fn test_reset_balances_for() { + let paymaster_tracker = new_paymaster_tracker(); + + let paymaster_0 = Address::random(); + let paymaster_0_confimed = 1000.into(); + + paymaster_tracker.add_new_paymaster(paymaster_0, paymaster_0_confimed, 0.into()); + + let balance_0 = paymaster_tracker + .paymaster_balance(paymaster_0) + .await + .unwrap(); + + assert_eq!(balance_0.confirmed_balance, 1000.into()); + + let _ = paymaster_tracker + .reset_confirmed_balances_for(&[paymaster_0]) + .await; + + let balance_0 = paymaster_tracker + .paymaster_balance(paymaster_0) + .await + .unwrap(); + + assert_eq!(balance_0.confirmed_balance, 50.into()); + } + + #[tokio::test] + async fn new_uo_existing_paymaster_valid_balance() { + let paymaster_tracker = new_paymaster_tracker(); let paymaster = Address::random(); let paymaster_balance = U256::from(100000000); let pending_paymaster_balance = U256::from(10); - paymaster_tracker.paymaster_balances.insert( + paymaster_tracker.add_new_paymaster( paymaster, - PaymasterBalance { - pending: pending_paymaster_balance, - confirmed: paymaster_balance, - }, + paymaster_balance, + pending_paymaster_balance, ); let sender = Address::random(); @@ -440,51 +716,35 @@ mod tests { call_gas_limit: 10.into(), pre_verification_gas: 10.into(), verification_gas_limit: 10.into(), + paymaster_and_data: paymaster.as_bytes().to_vec().into(), max_fee_per_gas: 1.into(), ..Default::default() }; let uo_max_cost = uo.clone().max_gas_cost(); - let paymaster_meta = PaymasterMetadata { - address: paymaster, - pending_balance: paymaster_balance, - confirmed_balance: paymaster_balance, - }; - let po = demo_pool_op(uo); - let res = paymaster_tracker.add_or_update_balance(&po, &paymaster_meta); + let res = paymaster_tracker.add_or_update_balance(&po).await; assert!(res.is_ok()); + + let remaining = paymaster_tracker + .paymaster_balance(paymaster) + .await + .unwrap(); + + assert_eq!(remaining.confirmed_balance, paymaster_balance); assert_eq!( - paymaster_tracker - .paymaster_balances - .get(&paymaster) - .unwrap() - .confirmed, - paymaster_balance, - ); - assert_eq!( - paymaster_tracker - .paymaster_balances - .get(&paymaster) - .unwrap() - .pending, - pending_paymaster_balance.saturating_add(uo_max_cost), - ); - assert_eq!( - paymaster_tracker - .paymaster_balances - .get(&paymaster) - .unwrap() - .pending_balance(), - paymaster_balance.saturating_sub(uo_max_cost.saturating_add(pending_paymaster_balance)), + remaining.pending_balance, + paymaster_balance + .saturating_sub(pending_paymaster_balance) + .saturating_sub(uo_max_cost), ); } - #[test] - fn replacement_uo_new_paymaster() { - let mut paymaster_tracker = PaymasterTracker::new(); + #[tokio::test] + async fn replacement_uo_new_paymaster() { + let paymaster_tracker = new_paymaster_tracker(); let paymaster_0 = Address::random(); let paymaster_1 = Address::random(); @@ -496,6 +756,7 @@ mod tests { sender, call_gas_limit: 10.into(), pre_verification_gas: 10.into(), + paymaster_and_data: paymaster_0.as_bytes().to_vec().into(), verification_gas_limit: 10.into(), max_fee_per_gas: 1.into(), ..Default::default() @@ -503,30 +764,27 @@ mod tests { let mut uo_1 = uo.clone(); uo_1.max_fee_per_gas = 2.into(); + uo_1.paymaster_and_data = paymaster_1.as_bytes().to_vec().into(); let max_op_cost_0 = uo.max_gas_cost(); let max_op_cost_1 = uo_1.max_gas_cost(); - let paymaster_meta_0 = PaymasterMetadata { - address: paymaster_0, - pending_balance: paymaster_balance_0, - confirmed_balance: paymaster_balance_0, - }; - - let paymaster_meta_1 = PaymasterMetadata { - address: paymaster_1, - pending_balance: paymaster_balance_1, - confirmed_balance: paymaster_balance_1, - }; + paymaster_tracker.add_new_paymaster(paymaster_0, paymaster_balance_0, 0.into()); + paymaster_tracker.add_new_paymaster(paymaster_1, paymaster_balance_1, 0.into()); let po_0 = demo_pool_op(uo); + // Update first paymaster balance with first uo paymaster_tracker - .add_or_update_balance(&po_0, &paymaster_meta_0) + .add_or_update_balance(&po_0) + .await .unwrap(); assert_eq!( - paymaster_tracker.paymaster_metadata(paymaster_0).unwrap(), + paymaster_tracker + .paymaster_balance(paymaster_0) + .await + .unwrap(), PaymasterMetadata { address: paymaster_0, confirmed_balance: paymaster_balance_0, @@ -537,12 +795,16 @@ mod tests { let po_1 = demo_pool_op(uo_1); // send same uo with updated fees and new paymaster paymaster_tracker - .add_or_update_balance(&po_1, &paymaster_meta_1) + .add_or_update_balance(&po_1) + .await .unwrap(); // check previous paymaster goes back to normal balance assert_eq!( - paymaster_tracker.paymaster_metadata(paymaster_0).unwrap(), + paymaster_tracker + .paymaster_balance(paymaster_0) + .await + .unwrap(), PaymasterMetadata { address: paymaster_0, confirmed_balance: paymaster_balance_0, @@ -552,7 +814,10 @@ mod tests { // check that new paymaster has been updated correctly assert_eq!( - paymaster_tracker.paymaster_metadata(paymaster_1).unwrap(), + paymaster_tracker + .paymaster_balance(paymaster_1) + .await + .unwrap(), PaymasterMetadata { address: paymaster_1, confirmed_balance: paymaster_balance_1, @@ -561,9 +826,9 @@ mod tests { ); } - #[test] - fn replacement_uo_same_paymaster() { - let mut paymaster_tracker = PaymasterTracker::new(); + #[tokio::test] + async fn replacement_uo_same_paymaster() { + let paymaster_tracker = new_paymaster_tracker(); let sender = Address::random(); let paymaster = Address::random(); let paymaster_balance = U256::from(100000000); @@ -572,22 +837,19 @@ mod tests { let existing_id = UserOperationId { sender, nonce }; - paymaster_tracker.paymaster_balances.insert( + // add paymaster + paymaster_tracker.add_new_paymaster( paymaster, - PaymasterBalance { - pending: pending_paymaster_balance, - confirmed: paymaster_balance, - }, + paymaster_balance, + pending_paymaster_balance, ); - // existing fee - paymaster_tracker.user_op_fees.insert( - existing_id, - UserOpFees { - paymaster, - max_op_cost: 30.into(), - }, - ); + let meta = paymaster_tracker + .paymaster_balance(paymaster) + .await + .unwrap(); + + paymaster_tracker.add_new_user_op(&existing_id, &meta, 30.into()); // replacement_uo let uo = UserOperation { @@ -596,45 +858,116 @@ mod tests { call_gas_limit: 100.into(), pre_verification_gas: 100.into(), verification_gas_limit: 100.into(), + paymaster_and_data: paymaster.as_bytes().to_vec().into(), max_fee_per_gas: 1.into(), ..Default::default() }; - let paymaster_meta = PaymasterMetadata { - address: paymaster, - pending_balance: paymaster_balance, - confirmed_balance: paymaster_balance, - }; - let max_op_cost = uo.clone().max_gas_cost(); let po = demo_pool_op(uo); - let res = paymaster_tracker.add_or_update_balance(&po, &paymaster_meta); + let res = paymaster_tracker.add_or_update_balance(&po).await; assert!(res.is_ok()); assert_eq!( paymaster_tracker - .paymaster_balances - .get(&paymaster) + .paymaster_balance(paymaster) + .await .unwrap() - .confirmed, + .confirmed_balance, paymaster_balance, ); assert_eq!( paymaster_tracker - .paymaster_balances - .get(&paymaster) - .unwrap() - .pending, - max_op_cost, - ); - assert_eq!( - paymaster_tracker - .paymaster_balances - .get(&paymaster) + .paymaster_balance(paymaster) + .await .unwrap() - .pending_balance(), - paymaster_balance.saturating_sub(max_op_cost), + .pending_balance, + paymaster_balance + .saturating_sub(pending_paymaster_balance) + .saturating_sub(max_op_cost), ); } + + #[tokio::test] + async fn test_stake_status_staked() { + let tracker = new_paymaster_tracker(); + + let status = tracker.get_stake_status(Address::random()).await.unwrap(); + + assert!(status.is_staked); + } + + #[test] + fn test_inner_cache_full() { + let mut inner = PaymasterTrackerInner::new(true, 2); + + let paymaster_0 = Address::random(); + let paymaster_1 = Address::random(); + let paymaster_2 = Address::random(); + + let confirmed_balance = U256::from(1000); + let pending_balance = U256::from(100); + + inner.add_new_paymaster(paymaster_0, confirmed_balance, pending_balance); + inner.add_new_paymaster(paymaster_1, confirmed_balance, pending_balance); + inner.add_new_paymaster(paymaster_2, confirmed_balance, pending_balance); + + assert_eq!(inner.paymaster_balances.len(), 2); + assert!(!inner.paymaster_exists(paymaster_0)); + assert!(inner.paymaster_exists(paymaster_1)); + assert!(inner.paymaster_exists(paymaster_2)); + } + + fn new_paymaster_tracker() -> PaymasterTracker { + let mut entrypoint = MockEntryPointV0_6::new(); + + entrypoint.expect_get_deposit_info().returning(|_| { + Ok(DepositInfo { + deposit: 1000.into(), + staked: true, + stake: 10000, + unstake_delay_sec: 100, + withdraw_time: 10, + }) + }); + + entrypoint + .expect_get_balances() + .returning(|_| Ok(vec![50.into()])); + + entrypoint + .expect_balance_of() + .returning(|_, _| Ok(U256::from(1000))); + + let config = PaymasterConfig::new(1001, 99, true, u32::MAX); + + PaymasterTracker::new(entrypoint, config) + } + + impl PaymasterTracker { + fn add_new_user_op( + &self, + id: &UserOperationId, + paymaster_metadata: &PaymasterMetadata, + max_op_cost: U256, + ) { + self.state + .write() + .add_new_user_op(id, paymaster_metadata, max_op_cost) + } + + fn add_new_paymaster( + &self, + address: Address, + confirmed_balance: U256, + inital_pending_balance: U256, + ) { + self.state.write().add_new_paymaster( + address, + confirmed_balance, + inital_pending_balance, + ); + } + } } diff --git a/crates/pool/src/mempool/pool.rs b/crates/pool/src/mempool/pool.rs index 5bb38ee7..b8a5e3a2 100644 --- a/crates/pool/src/mempool/pool.rs +++ b/crates/pool/src/mempool/pool.rs @@ -12,9 +12,10 @@ // If not, see https://www.gnu.org/licenses/. use std::{ - cmp::Ordering, + cmp::{self, Ordering}, collections::{hash_map::Entry, BTreeSet, HashMap, HashSet}, sync::Arc, + time::{Duration, SystemTime, UNIX_EPOCH}, }; use anyhow::Context; @@ -22,18 +23,15 @@ use ethers::{ abi::Address, types::{H256, U256}, }; -use rundler_types::{Entity, EntityType, Timestamp, UserOperation, UserOperationId}; -use rundler_utils::math; -use tracing::info; - -use super::{ - entity_tracker::EntityCounter, - error::{MempoolError, MempoolResult}, - paymaster::PaymasterTracker, - size::SizeTracker, - PaymasterMetadata, PoolConfig, PoolOperation, +use rundler_types::{ + pool::{MempoolError, PoolOperation}, + Entity, EntityType, GasFees, Timestamp, UserOperation, UserOperationId, UserOperationVariant, }; -use crate::chain::{DepositInfo, MinedOp}; +use rundler_utils::math; +use tracing::{info, warn}; + +use super::{entity_tracker::EntityCounter, size::SizeTracker, MempoolResult, PoolConfig}; +use crate::chain::MinedOp; #[derive(Debug, Clone)] pub(crate) struct PoolInnerConfig { @@ -69,6 +67,8 @@ pub(crate) struct PoolInner { by_id: HashMap, /// Best operations, sorted by gas price best: BTreeSet, + /// Time to mine info + time_to_mine: HashMap, /// Removed operations, temporarily kept around in case their blocks are /// reorged away. Stored along with the block number at which it was /// removed. @@ -80,12 +80,14 @@ pub(crate) struct PoolInner { count_by_address: HashMap, /// Submission ID counter submission_id: u64, - /// A field that keeps track of paymaster balances across the mempool - paymaster_balances: PaymasterTracker, /// keeps track of the size of the pool in bytes pool_size: SizeTracker, /// keeps track of the size of the removed cache in bytes cache_size: SizeTracker, + /// The time of the previous block + prev_sys_block_time: Duration, + /// The number of the previous block + prev_block_number: u64, } impl PoolInner { @@ -95,22 +97,27 @@ impl PoolInner { by_hash: HashMap::new(), by_id: HashMap::new(), best: BTreeSet::new(), - paymaster_balances: PaymasterTracker::new(), + time_to_mine: HashMap::new(), mined_at_block_number_by_hash: HashMap::new(), mined_hashes_with_block_numbers: BTreeSet::new(), count_by_address: HashMap::new(), submission_id: 0, pool_size: SizeTracker::default(), cache_size: SizeTracker::default(), + prev_sys_block_time: Duration::default(), + prev_block_number: 0, } } /// Returns hash of operation to replace if operation is a replacement - pub(crate) fn check_replacement(&self, op: &UserOperation) -> MempoolResult> { + pub(crate) fn check_replacement( + &self, + op: &UserOperationVariant, + ) -> MempoolResult> { // Check if operation already known if self .by_hash - .contains_key(&op.op_hash(self.config.entry_point, self.config.chain_id)) + .contains_key(&op.hash(self.config.entry_point, self.config.chain_id)) { return Err(MempoolError::OperationAlreadyKnown); } @@ -119,69 +126,88 @@ impl PoolInner { let (replacement_priority_fee, replacement_fee) = self.get_min_replacement_fees(pool_op.uo()); - if op.max_priority_fee_per_gas < replacement_priority_fee - || op.max_fee_per_gas < replacement_fee + if op.max_priority_fee_per_gas() < replacement_priority_fee + || op.max_fee_per_gas() < replacement_fee { return Err(MempoolError::ReplacementUnderpriced( - pool_op.uo().max_priority_fee_per_gas, - pool_op.uo().max_fee_per_gas, + pool_op.uo().max_priority_fee_per_gas(), + pool_op.uo().max_fee_per_gas(), )); } Ok(Some( pool_op .uo() - .op_hash(self.config.entry_point, self.config.chain_id), + .hash(self.config.entry_point, self.config.chain_id), )) } else { Ok(None) } } - pub(crate) fn add_operation( - &mut self, - op: PoolOperation, - paymaster_meta: Option, - ) -> MempoolResult { + pub(crate) fn add_operation(&mut self, op: PoolOperation) -> MempoolResult { println!("HC pool add_operation {:?}", op); - let ret = self.add_operation_internal(Arc::new(op), None, paymaster_meta); + let ret = self.add_operation_internal(Arc::new(op), None); self.update_metrics(); ret } - pub(crate) fn paymaster_addresses(&self) -> Vec
{ - self.paymaster_balances.paymaster_addresses() - } - - pub(crate) fn set_confirmed_paymaster_balances( - &mut self, - addresses: &[Address], - balances: &[U256], - ) { - self.paymaster_balances - .set_confimed_balances(addresses, balances); - } - pub(crate) fn best_operations(&self) -> impl Iterator> { self.best.clone().into_iter().map(|v| v.po) } - /// Removes all operations using the given entity, returning the hashes of the removed operations. + /// Does maintenance on the pool. + /// + /// 1) Removes all operations using the given entity, returning the hashes of the removed operations. + /// 2) Updates time to mine stats for all operations in the pool. /// /// NOTE: This method is O(n) where n is the number of operations in the pool. /// It should be called sparingly (e.g. when a block is mined). - pub(crate) fn remove_expired(&mut self, expire_before: Timestamp) -> Vec<(H256, Timestamp)> { + pub(crate) fn do_maintenance( + &mut self, + block_number: u64, + block_timestamp: Timestamp, + candidate_gas_fees: GasFees, + base_fee: U256, + ) -> Vec<(H256, Timestamp)> { + let sys_block_time = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("time should be after epoch"); + + let block_delta_time = sys_block_time.saturating_sub(self.prev_sys_block_time); + let block_delta_height = block_number.saturating_sub(self.prev_block_number); + let candidate_gas_price = base_fee + candidate_gas_fees.max_priority_fee_per_gas; let mut expired = Vec::new(); - for (hash, op) in &self.by_hash { - if op.po.valid_time_range.valid_until < expire_before { + let mut num_candidates = 0; + + for (hash, op) in &mut self.by_hash { + if op.po.valid_time_range.valid_until < block_timestamp { expired.push((*hash, op.po.valid_time_range.valid_until)); } + + let uo_gas_price = cmp::min( + op.uo().max_fee_per_gas(), + op.uo().max_priority_fee_per_gas() + base_fee, + ); + + num_candidates += if uo_gas_price >= candidate_gas_price { + if let Some(ttm) = self.time_to_mine.get_mut(hash) { + ttm.increase(block_delta_time, block_delta_height); + } + 1 + } else { + 0 + }; } for (hash, _) in &expired { self.remove_operation_by_hash(*hash); } + PoolMetrics::set_num_candidates(num_candidates, self.config.entry_point); + self.prev_block_number = block_number; + self.prev_sys_block_time = sys_block_time; + expired } @@ -197,6 +223,10 @@ impl PoolInner { self.by_hash.get(&hash).map(|o| o.po.clone()) } + pub(crate) fn get_operation_by_id(&self, id: &UserOperationId) -> Option> { + self.by_id.get(id).map(|o| o.po.clone()) + } + pub(crate) fn remove_operation_by_hash(&mut self, hash: H256) -> Option> { let ret = self.remove_operation_internal(hash, None); self.update_metrics(); @@ -204,10 +234,15 @@ impl PoolInner { } // STO-040 - pub(crate) fn check_multiple_roles_violation(&self, uo: &UserOperation) -> MempoolResult<()> { - if let Some(ec) = self.count_by_address.get(&uo.sender) { + pub(crate) fn check_multiple_roles_violation( + &self, + uo: &UserOperationVariant, + ) -> MempoolResult<()> { + if let Some(ec) = self.count_by_address.get(&uo.sender()) { if ec.includes_non_sender() { - return Err(MempoolError::SenderAddressUsedAsAlternateEntity(uo.sender)); + return Err(MempoolError::SenderAddressUsedAsAlternateEntity( + uo.sender(), + )); } } @@ -231,11 +266,11 @@ impl PoolInner { pub(crate) fn check_associated_storage( &self, accessed_storage: &HashSet
, - uo: &UserOperation, + uo: &UserOperationVariant, ) -> MempoolResult<()> { for storage_address in accessed_storage { if let Some(ec) = self.count_by_address.get(storage_address) { - if ec.sender().gt(&0) && storage_address.ne(&uo.sender) { + if ec.sender().gt(&0) && storage_address.ne(&uo.sender()) { // Reject UO if the sender is also an entity in another UO in the mempool for entity in uo.entities() { if storage_address.eq(&entity.address) { @@ -256,12 +291,22 @@ impl PoolInner { ) -> Option> { let tx_in_pool = self.by_id.get(&mined_op.id())?; + // TODO(danc): there is a bug here with replacements. + // UO1 is replaced by UO2, they both have the same ID. + // UO1 was bundled before UO2 replaced it, and eventually UO1 gets mined. + // UO2 should be removed from the pool, but since the hashes don't match, it will + // stay in the pool forever as `remove_operation_internal` is hash based. + // Time to mine will also fail because UO1's hash was removed from the pool. + + if let Some(time_to_mine) = self.time_to_mine.get(&mined_op.hash) { + PoolMetrics::record_time_to_mine(time_to_mine, mined_op.entry_point); + } else { + warn!("Could not find time to mine for {:?}", mined_op.hash); + } + let hash = tx_in_pool .uo() - .op_hash(mined_op.entry_point, self.config.chain_id); - - self.paymaster_balances - .update_paymaster_balance_from_mined_op(mined_op); + .hash(mined_op.entry_point, self.config.chain_id); let ret = self.remove_operation_internal(hash, Some(block_number)); @@ -275,7 +320,7 @@ impl PoolInner { self.mined_hashes_with_block_numbers .remove(&(block_number, hash)); - if let Err(error) = self.put_back_unmined_operation(op.clone(), mined_op) { + if let Err(error) = self.put_back_unmined_operation(op.clone()) { info!("Could not put back unmined operation: {error}"); }; self.update_metrics(); @@ -306,10 +351,7 @@ impl PoolInner { } false }) - .map(|o| { - o.po.uo - .op_hash(self.config.entry_point, self.config.chain_id) - }) + .map(|o| o.po.uo.hash(self.config.entry_point, self.config.chain_id)) .collect::>(); for &hash in &to_remove { self.remove_operation_internal(hash, None); @@ -348,38 +390,11 @@ impl PoolInner { self.update_metrics(); } - pub(crate) fn paymaster_metadata(&self, paymaster: Address) -> Option { - self.paymaster_balances.paymaster_metadata(paymaster) - } - - pub(crate) fn paymaster_exists(&self, paymaster: Address) -> bool { - self.paymaster_balances.paymaster_exists(paymaster) - } - - pub(crate) fn update_paymaster_balances_after_update( - &mut self, - deposits: &Vec, - unmined_entity_deposits: &Vec, - ) { - for deposit in deposits { - self.paymaster_balances - .update_paymaster_balance_from_deposit(deposit.address, deposit.amount) - } - - for unmined_deposit in unmined_entity_deposits { - self.paymaster_balances - .update_paymaster_balance_after_deposit_reorg( - unmined_deposit.address, - unmined_deposit.amount, - ) - } - } - pub(crate) fn clear(&mut self) { self.by_hash.clear(); self.by_id.clear(); - self.paymaster_balances.clear(); self.best.clear(); + self.time_to_mine.clear(); self.mined_at_block_number_by_hash.clear(); self.mined_hashes_with_block_numbers.clear(); self.count_by_address.clear(); @@ -395,7 +410,7 @@ impl PoolInner { if let Some(worst) = self.best.pop_last() { let hash = worst .uo() - .op_hash(self.config.entry_point, self.config.chain_id); + .hash(self.config.entry_point, self.config.chain_id); let _ = self .remove_operation_internal(hash, None) @@ -408,27 +423,14 @@ impl PoolInner { Ok(removed) } - fn put_back_unmined_operation( - &mut self, - op: OrderedPoolOperation, - mined_op: &MinedOp, - ) -> MempoolResult { - let mut paymaster_meta = None; - if let Some(paymaster) = op.uo().paymaster() { - self.paymaster_balances - .unmine_actual_cost(&paymaster, mined_op.actual_gas_cost); - - paymaster_meta = self.paymaster_metadata(paymaster); - } - - self.add_operation_internal(op.po, Some(op.submission_id), paymaster_meta) + fn put_back_unmined_operation(&mut self, op: OrderedPoolOperation) -> MempoolResult { + self.add_operation_internal(op.po, Some(op.submission_id)) } fn add_operation_internal( &mut self, op: Arc, submission_id: Option, - paymaster_meta: Option, ) -> MempoolResult { // Check if operation already known or replacing an existing operation // if replacing, remove the existing operation @@ -436,12 +438,6 @@ impl PoolInner { self.remove_operation_by_hash(hash); } - // check or update paymaster balance - if let Some(paymaster_meta) = paymaster_meta { - self.paymaster_balances - .add_or_update_balance(&op, &paymaster_meta)?; - } - let pool_op = OrderedPoolOperation { po: op, submission_id: submission_id.unwrap_or_else(|| self.next_submission_id()), @@ -458,11 +454,12 @@ impl PoolInner { // create and insert ordered operation let hash = pool_op .uo() - .op_hash(self.config.entry_point, self.config.chain_id); + .hash(self.config.entry_point, self.config.chain_id); self.pool_size += pool_op.mem_size(); self.by_hash.insert(hash, pool_op.clone()); self.by_id.insert(pool_op.uo().id(), pool_op.clone()); self.best.insert(pool_op); + self.time_to_mine.insert(hash, TimeToMineInfo::new()); // TODO(danc): This silently drops UOs from the pool without reporting let removed = self @@ -485,7 +482,7 @@ impl PoolInner { let id = &op.po.uo.id(); self.by_id.remove(id); self.best.remove(&op); - self.paymaster_balances.remove_operation(id); + self.time_to_mine.remove(&hash); if let Some(block_number) = block_number { self.cache_size += op.mem_size(); @@ -518,13 +515,13 @@ impl PoolInner { id } - fn get_min_replacement_fees(&self, op: &UserOperation) -> (U256, U256) { + fn get_min_replacement_fees(&self, op: &UserOperationVariant) -> (U256, U256) { let replacement_priority_fee = math::increase_by_percent( - op.max_priority_fee_per_gas, + op.max_priority_fee_per_gas(), self.config.min_replacement_fee_increase_percentage, ); let replacement_fee = math::increase_by_percent( - op.max_fee_per_gas, + op.max_fee_per_gas(), self.config.min_replacement_fee_increase_percentage, ); (replacement_priority_fee, replacement_fee) @@ -553,12 +550,12 @@ struct OrderedPoolOperation { } impl OrderedPoolOperation { - fn uo(&self) -> &UserOperation { + fn uo(&self) -> &UserOperationVariant { &self.po.uo } fn mem_size(&self) -> usize { - std::mem::size_of::() + self.po.mem_size() + std::mem::size_of::() + self.po.mem_size() } } @@ -569,8 +566,8 @@ impl Ord for OrderedPoolOperation { // Sort by gas price descending then by id ascending other .uo() - .max_fee_per_gas - .cmp(&self.uo().max_fee_per_gas) + .max_fee_per_gas() + .cmp(&self.uo().max_fee_per_gas()) .then_with(|| self.submission_id.cmp(&other.submission_id)) } } @@ -587,22 +584,69 @@ impl PartialEq for OrderedPoolOperation { } } +#[derive(Debug, Clone)] +struct TimeToMineInfo { + candidate_for_blocks: u64, + candidate_for_time: Duration, +} + +impl TimeToMineInfo { + fn new() -> Self { + Self { + candidate_for_blocks: 0, + candidate_for_time: Duration::default(), + } + } + + fn increase(&mut self, block_delta_time: Duration, block_delta_height: u64) { + self.candidate_for_blocks += block_delta_height; + self.candidate_for_time += block_delta_time; + } +} + struct PoolMetrics {} impl PoolMetrics { fn set_pool_metrics(num_ops: usize, size_bytes: isize, entry_point: Address) { - metrics::gauge!("op_pool_num_ops_in_pool", num_ops as f64, "entrypoint_addr" => entry_point.to_string()); - metrics::gauge!("op_pool_size_bytes", size_bytes as f64, "entrypoint_addr" => entry_point.to_string()); + metrics::gauge!("op_pool_num_ops_in_pool", "entry_point" => entry_point.to_string()) + .set(num_ops as f64); + metrics::gauge!("op_pool_size_bytes", "entry_point" => entry_point.to_string()) + .set(size_bytes as f64); } + fn set_cache_metrics(num_ops: usize, size_bytes: isize, entry_point: Address) { - metrics::gauge!("op_pool_num_ops_in_cache", num_ops as f64, "entrypoint_addr" => entry_point.to_string()); - metrics::gauge!("op_pool_cache_size_bytes", size_bytes as f64, "entrypoint_addr" => entry_point.to_string()); + metrics::gauge!("op_pool_num_ops_in_cache", "entry_point" => entry_point.to_string()) + .set(num_ops as f64); + metrics::gauge!("op_pool_cache_size_bytes", "entry_point" => entry_point.to_string()) + .set(size_bytes as f64); + } + + // Set the number of candidates in the pool, only changes on block boundaries + fn set_num_candidates(num_candidates: usize, entry_point: Address) { + metrics::gauge!("op_pool_num_candidates", "entry_point" => entry_point.to_string()) + .set(num_candidates as f64); + } + + fn record_time_to_mine(time_to_mine: &TimeToMineInfo, entry_point: Address) { + metrics::histogram!( + "op_pool_time_to_mine", + "entry_point" => entry_point.to_string() + ) + .record(time_to_mine.candidate_for_time.as_millis() as f64); + metrics::histogram!( + "op_pool_blocks_to_mine", + "entry_point" => entry_point.to_string() + ) + .record(time_to_mine.candidate_for_blocks as f64); } } #[cfg(test)] mod tests { - use rundler_sim::{EntityInfo, EntityInfos}; + use rundler_types::{ + v0_6::UserOperation, EntityInfo, EntityInfos, UserOperation as UserOperationTrait, + ValidTimeRange, + }; use super::*; @@ -610,13 +654,43 @@ mod tests { fn add_single_op() { let mut pool = PoolInner::new(conf()); let op = create_op(Address::random(), 0, 1); - let hash = pool.add_operation(op.clone(), None).unwrap(); + let hash = pool.add_operation(op.clone()).unwrap(); check_map_entry(pool.by_hash.get(&hash), Some(&op)); check_map_entry(pool.by_id.get(&op.uo.id()), Some(&op)); check_map_entry(pool.best.iter().next(), Some(&op)); } + #[test] + fn test_get_by_hash() { + let mut pool = PoolInner::new(conf()); + let op = create_op(Address::random(), 0, 1); + let hash = pool.add_operation(op.clone()).unwrap(); + + let get_op = pool.get_operation_by_hash(hash).unwrap(); + assert_eq!(op, *get_op); + + assert_eq!(pool.get_operation_by_hash(H256::random()), None); + } + + #[test] + fn test_get_by_id() { + let mut pool = PoolInner::new(conf()); + let op = create_op(Address::random(), 0, 1); + pool.add_operation(op.clone()).unwrap(); + let id = op.uo.id(); + + let get_op = pool.get_operation_by_id(&id).unwrap(); + assert_eq!(op, *get_op); + + let bad_id = UserOperationId { + sender: Address::random(), + nonce: 0.into(), + }; + + assert_eq!(pool.get_operation_by_id(&bad_id), None); + } + #[test] fn add_multiple_ops() { let mut pool = PoolInner::new(conf()); @@ -628,7 +702,7 @@ mod tests { let mut hashes = vec![]; for op in ops.iter() { - hashes.push(pool.add_operation(op.clone(), None).unwrap()); + hashes.push(pool.add_operation(op.clone()).unwrap()); } for (hash, op) in hashes.iter().zip(&ops) { @@ -654,7 +728,7 @@ mod tests { let mut hashes = vec![]; for op in ops.iter() { - hashes.push(pool.add_operation(op.clone(), None).unwrap()); + hashes.push(pool.add_operation(op.clone()).unwrap()); } // best should be sorted by gas, then by submission id @@ -675,7 +749,7 @@ mod tests { let mut hashes = vec![]; for op in ops.iter() { - hashes.push(pool.add_operation(op.clone(), None).unwrap()); + hashes.push(pool.add_operation(op.clone()).unwrap()); } assert!(pool.remove_operation_by_hash(hashes[0]).is_some()); @@ -706,7 +780,7 @@ mod tests { ]; for mut op in ops.into_iter() { op.aggregator = Some(account); - pool.add_operation(op.clone(), None).unwrap(); + pool.add_operation(op.clone()).unwrap(); } assert_eq!(pool.by_hash.len(), 3); @@ -724,9 +798,9 @@ mod tests { let op = create_op(sender, nonce, 1); - let hash = op.uo.op_hash(pool.config.entry_point, pool.config.chain_id); + let hash = op.uo.hash(pool.config.entry_point, pool.config.chain_id); - pool.add_operation(op, None).unwrap(); + pool.add_operation(op).unwrap(); let mined_op = MinedOp { paymaster: None, @@ -753,12 +827,10 @@ mod tests { let op = create_op(sender, nonce, 1); let op_2 = create_op(sender, nonce, 2); - let hash = op_2 - .uo - .op_hash(pool.config.entry_point, pool.config.chain_id); + let hash = op_2.uo.hash(pool.config.entry_point, pool.config.chain_id); - pool.add_operation(op, None).unwrap(); - pool.add_operation(op_2, None).unwrap(); + pool.add_operation(op).unwrap(); + pool.add_operation(op_2).unwrap(); let mined_op = MinedOp { paymaster: None, @@ -788,10 +860,10 @@ mod tests { for mut op in ops.into_iter() { op.aggregator = Some(agg); op.entity_infos.aggregator = Some(EntityInfo { - address: agg, + entity: Entity::aggregator(agg), is_staked: false, }); - pool.add_operation(op.clone(), None).unwrap(); + pool.add_operation(op.clone()).unwrap(); } assert_eq!(pool.by_hash.len(), 3); @@ -811,12 +883,14 @@ mod tests { create_op(Address::random(), 0, 1), ]; for mut op in ops.into_iter() { - op.uo.paymaster_and_data = paymaster.as_bytes().to_vec().into(); + let uo: &mut UserOperation = op.uo.as_mut(); + + uo.paymaster_and_data = paymaster.as_bytes().to_vec().into(); op.entity_infos.paymaster = Some(EntityInfo { - address: op.uo.paymaster().unwrap(), + entity: Entity::paymaster(paymaster), is_staked: false, }); - pool.add_operation(op.clone(), None).unwrap(); + pool.add_operation(op.clone()).unwrap(); } assert_eq!(pool.by_hash.len(), 3); @@ -835,19 +909,20 @@ mod tests { let aggregator = Address::random(); let mut op = create_op(sender, 0, 1); - op.uo.paymaster_and_data = paymaster.as_bytes().to_vec().into(); + let uo: &mut UserOperation = op.uo.as_mut(); + uo.paymaster_and_data = paymaster.as_bytes().to_vec().into(); op.entity_infos.paymaster = Some(EntityInfo { - address: op.uo.paymaster().unwrap(), + entity: Entity::paymaster(paymaster), is_staked: false, }); - op.uo.init_code = factory.as_bytes().to_vec().into(); + uo.init_code = factory.as_bytes().to_vec().into(); op.entity_infos.factory = Some(EntityInfo { - address: op.uo.factory().unwrap(), + entity: Entity::factory(factory), is_staked: false, }); op.aggregator = Some(aggregator); op.entity_infos.aggregator = Some(EntityInfo { - address: aggregator, + entity: Entity::aggregator(aggregator), is_staked: false, }); @@ -855,8 +930,9 @@ mod tests { let mut hashes = vec![]; for i in 0..count { let mut op = op.clone(); - op.uo.nonce = i.into(); - hashes.push(pool.add_operation(op, None).unwrap()); + let uo: &mut UserOperation = op.uo.as_mut(); + uo.nonce = i.into(); + hashes.push(pool.add_operation(op).unwrap()); } assert_eq!(pool.address_count(&sender), 5); @@ -880,12 +956,12 @@ mod tests { let mut pool = PoolInner::new(args.clone()); for i in 0..20 { let op = create_op(Address::random(), i, i + 1); - pool.add_operation(op, None).unwrap(); + pool.add_operation(op).unwrap(); } // on greater gas, new op should win let op = create_op(Address::random(), args.max_size_of_pool_bytes, 2); - let result = pool.add_operation(op, None); + let result = pool.add_operation(op); assert!(result.is_ok(), "{:?}", result.err()); } @@ -895,15 +971,15 @@ mod tests { let mut pool = PoolInner::new(args.clone()); for i in 0..20 { let op = create_op(Address::random(), i, i + 1); - pool.add_operation(op, None).unwrap(); + pool.add_operation(op).unwrap(); } let op = create_op(Address::random(), 4, 1); - assert!(pool.add_operation(op, None).is_err()); + assert!(pool.add_operation(op).is_err()); // on equal gas, worst should remain because it came first let op = create_op(Address::random(), 4, 2); - let result = pool.add_operation(op, None); + let result = pool.add_operation(op); assert!(result.is_ok(), "{:?}", result.err()); } @@ -912,12 +988,14 @@ mod tests { let mut pool = PoolInner::new(conf()); let sender = Address::random(); let mut po1 = create_op(sender, 0, 100); - po1.uo.max_priority_fee_per_gas = 100.into(); - let _ = pool.add_operation(po1.clone(), None).unwrap(); + let uo1: &mut UserOperation = po1.uo.as_mut(); + uo1.max_priority_fee_per_gas = 100.into(); + let _ = pool.add_operation(po1.clone()).unwrap(); let mut po2 = create_op(sender, 0, 101); - po2.uo.max_priority_fee_per_gas = 101.into(); - let res = pool.add_operation(po2, None); + let uo2: &mut UserOperation = po2.uo.as_mut(); + uo2.max_priority_fee_per_gas = 101.into(); + let res = pool.add_operation(po2); assert!(res.is_err()); match res.err().unwrap() { MempoolError::ReplacementUnderpriced(a, b) => { @@ -932,7 +1010,7 @@ mod tests { pool.pool_size, OrderedPoolOperation { po: Arc::new(po1), - submission_id: 0 + submission_id: 0, } .mem_size() ); @@ -944,24 +1022,26 @@ mod tests { let sender = Address::random(); let paymaster1 = Address::random(); let mut po1 = create_op(sender, 0, 10); - po1.uo.max_priority_fee_per_gas = 10.into(); - po1.uo.paymaster_and_data = paymaster1.as_bytes().to_vec().into(); + let uo1: &mut UserOperation = po1.uo.as_mut(); + uo1.max_priority_fee_per_gas = 10.into(); + uo1.paymaster_and_data = paymaster1.as_bytes().to_vec().into(); po1.entity_infos.paymaster = Some(EntityInfo { - address: po1.uo.paymaster().unwrap(), + entity: Entity::paymaster(paymaster1), is_staked: false, }); - let _ = pool.add_operation(po1, None).unwrap(); + let _ = pool.add_operation(po1).unwrap(); assert_eq!(pool.address_count(&paymaster1), 1); let paymaster2 = Address::random(); let mut po2 = create_op(sender, 0, 11); - po2.uo.max_priority_fee_per_gas = 11.into(); - po2.uo.paymaster_and_data = paymaster2.as_bytes().to_vec().into(); + let uo2: &mut UserOperation = po2.uo.as_mut(); + uo2.max_priority_fee_per_gas = 11.into(); + uo2.paymaster_and_data = paymaster2.as_bytes().to_vec().into(); po2.entity_infos.paymaster = Some(EntityInfo { - address: po2.uo.paymaster().unwrap(), + entity: Entity::paymaster(paymaster2), is_staked: false, }); - let _ = pool.add_operation(po2.clone(), None).unwrap(); + let _ = pool.add_operation(po2.clone()).unwrap(); assert_eq!(pool.address_count(&sender), 1); assert_eq!(pool.address_count(&paymaster1), 0); @@ -970,7 +1050,7 @@ mod tests { pool.pool_size, OrderedPoolOperation { po: Arc::new(po2), - submission_id: 0 + submission_id: 0, } .mem_size() ); @@ -981,10 +1061,11 @@ mod tests { let mut pool = PoolInner::new(conf()); let sender = Address::random(); let mut po1 = create_op(sender, 0, 10); - po1.uo.max_priority_fee_per_gas = 10.into(); - let _ = pool.add_operation(po1.clone(), None).unwrap(); + let uo1: &mut UserOperation = po1.uo.as_mut(); + uo1.max_priority_fee_per_gas = 10.into(); + let _ = pool.add_operation(po1.clone()).unwrap(); - let res = pool.add_operation(po1, None); + let res = pool.add_operation(po1); assert!(res.is_err()); match res.err().unwrap() { MempoolError::OperationAlreadyKnown => (), @@ -999,11 +1080,11 @@ mod tests { let sender = Address::random(); let mut po1 = create_op(sender, 0, 10); po1.valid_time_range.valid_until = Timestamp::from(1); - let _ = pool.add_operation(po1.clone(), None).unwrap(); + let _ = pool.add_operation(po1.clone()).unwrap(); - let res = pool.remove_expired(Timestamp::from(2)); + let res = pool.do_maintenance(0, Timestamp::from(2), GasFees::default(), 0.into()); assert_eq!(res.len(), 1); - assert_eq!(res[0].0, po1.uo.op_hash(conf.entry_point, conf.chain_id)); + assert_eq!(res[0].0, po1.uo.hash(conf.entry_point, conf.chain_id)); assert_eq!(res[0].1, Timestamp::from(1)); } @@ -1014,20 +1095,20 @@ mod tests { let mut po1 = create_op(Address::random(), 0, 10); po1.valid_time_range.valid_until = 5.into(); - let _ = pool.add_operation(po1.clone(), None).unwrap(); + let _ = pool.add_operation(po1.clone()).unwrap(); let mut po2 = create_op(Address::random(), 0, 10); po2.valid_time_range.valid_until = 10.into(); - let _ = pool.add_operation(po2.clone(), None).unwrap(); - + let _ = pool.add_operation(po2.clone()).unwrap(); let mut po3 = create_op(Address::random(), 0, 10); po3.valid_time_range.valid_until = 9.into(); - let _ = pool.add_operation(po3.clone(), None).unwrap(); + let _ = pool.add_operation(po3.clone()).unwrap(); + + let res = pool.do_maintenance(0, Timestamp::from(10), GasFees::default(), 0.into()); - let res = pool.remove_expired(10.into()); assert_eq!(res.len(), 2); - assert!(res.contains(&(po1.uo.op_hash(conf.entry_point, conf.chain_id), 5.into()))); - assert!(res.contains(&(po3.uo.op_hash(conf.entry_point, conf.chain_id), 9.into()))); + assert!(res.contains(&(po1.uo.hash(conf.entry_point, conf.chain_id), 5.into()))); + assert!(res.contains(&(po3.uo.hash(conf.entry_point, conf.chain_id), 9.into()))); } fn conf() -> PoolInnerConfig { @@ -1055,19 +1136,25 @@ mod tests { sender, nonce: nonce.into(), max_fee_per_gas: max_fee_per_gas.into(), - ..UserOperation::default() - }, + } + .into(), entity_infos: EntityInfos { factory: None, sender: EntityInfo { - address: sender, + entity: Entity::account(sender), is_staked: false, }, paymaster: None, aggregator: None, }, - ..PoolOperation::default() + entry_point: Address::random(), + valid_time_range: ValidTimeRange::default(), + aggregator: None, + expected_code_hash: H256::random(), + sim_block_hash: H256::random(), + sim_block_number: 0, + account_is_staked: false, } } diff --git a/crates/pool/src/mempool/reputation.rs b/crates/pool/src/mempool/reputation.rs index 4fc5fcc0..aa81a0d2 100644 --- a/crates/pool/src/mempool/reputation.rs +++ b/crates/pool/src/mempool/reputation.rs @@ -17,226 +17,150 @@ use std::{ }; use ethers::types::Address; -#[cfg(test)] -use mockall::automock; use parking_lot::RwLock; -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; +use rundler_types::pool::{Reputation, ReputationStatus}; use tokio::time::interval; -/// Reputation status for an entity -#[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum ReputationStatus { - /// Entity is not throttled or banned - Ok, - /// Entity is throttled - Throttled, - /// Entity is banned - Banned, +#[derive(Debug, Clone, Copy)] +pub(crate) struct ReputationParams { + bundle_invalidation_ops_seen_staked_penalty: u64, + bundle_invalidation_ops_seen_unstaked_penalty: u64, + same_unstaked_entity_mempool_count: u64, + min_inclusion_rate_denominator: u64, + inclusion_rate_factor: u64, + throttling_slack: u64, + ban_slack: u64, + tracking_enabled: bool, + decay_interval_secs: u64, + decay_factor: u64, } -impl Serialize for ReputationStatus { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - match self { - ReputationStatus::Ok => serializer.serialize_str("ok"), - ReputationStatus::Throttled => serializer.serialize_str("throttled"), - ReputationStatus::Banned => serializer.serialize_str("banned"), +impl Default for ReputationParams { + fn default() -> Self { + Self { + bundle_invalidation_ops_seen_staked_penalty: 10_000, + bundle_invalidation_ops_seen_unstaked_penalty: 1_000, + same_unstaked_entity_mempool_count: 10, + min_inclusion_rate_denominator: 10, + inclusion_rate_factor: 10, + throttling_slack: 10, + ban_slack: 50, + tracking_enabled: true, + decay_interval_secs: 3600, + decay_factor: 24, } } } -impl<'de> Deserialize<'de> for ReputationStatus { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let s = String::deserialize(deserializer)?; - match s.as_str() { - "ok" => Ok(ReputationStatus::Ok), - "throttled" => Ok(ReputationStatus::Throttled), - "banned" => Ok(ReputationStatus::Banned), - _ => Err(de::Error::custom(format!("Invalid reputation status {s}"))), +impl ReputationParams { + pub(crate) fn new(tracking_enabled: bool) -> Self { + Self { + tracking_enabled, + ..Default::default() } } -} - -/// The reputation of an entity -#[derive(Debug, Clone)] -pub struct Reputation { - /// The entity's address - pub address: Address, - /// Number of ops seen in the current interval - pub ops_seen: u64, - /// Number of ops included in the current interval - pub ops_included: u64, -} - -/// Reputation manager trait -/// -/// Interior mutability pattern used as ReputationManagers may -/// need to be thread-safe. -#[cfg_attr(test, automock)] -pub(crate) trait ReputationManager: Send + Sync + 'static { - /// Called by mempool before returning operations to bundler - fn status(&self, address: Address) -> ReputationStatus; - - /// Called by mempool when an operation that requires stake is added to the - /// pool - fn add_seen(&self, address: Address); - - /// Called by mempool when an unstaked entity causes the invalidation of a bundle - fn handle_urep_030_penalty(&self, address: Address); - - /// Called by mempool when a staked entity causes the invalidation of a bundle - fn handle_srep_050_penalty(&self, address: Address); - - /// Called by the mempool when an operation that requires stake is removed - /// from the pool - fn add_included(&self, address: Address); - - /// Called by the mempool when a previously mined operation that requires - /// stake is returned to the pool. - fn remove_included(&self, address: Address); - - /// Called by debug API - fn dump_reputation(&self) -> Vec; - /// Called by debug API - fn set_reputation(&self, address: Address, ops_seen: u64, ops_included: u64); + #[allow(dead_code)] + pub(crate) fn bundler_default() -> Self { + Self::default() + } - /// Get the ops allowed for an unstaked entity - fn get_ops_allowed(&self, address: Address) -> u64; + #[allow(dead_code)] + pub(crate) fn client_default() -> Self { + Self { + min_inclusion_rate_denominator: 100, + ..Self::default() + } + } - /// Clear all reputation values - fn clear(&self); + #[cfg(test)] + pub(crate) fn test_parameters(ban_slack: u64, throttling_slack: u64) -> Self { + Self { + ban_slack, + throttling_slack, + ..Self::default() + } + } } -#[derive(Debug)] -pub(crate) struct HourlyMovingAverageReputation { - reputation: RwLock, +pub(crate) struct AddressReputation { + state: RwLock, } -impl HourlyMovingAverageReputation { +impl AddressReputation { pub(crate) fn new( params: ReputationParams, - blocklist: Option>, - allowlist: Option>, - ) -> Self { - let rep = AddressReputation::new(params) - .with_blocklist(blocklist.unwrap_or_default()) - .with_allowlist(allowlist.unwrap_or_default()); - + blocklist: HashSet
, + allowlist: HashSet
, + ) -> AddressReputation { Self { - reputation: RwLock::new(rep), + state: RwLock::new( + AddressReputationInner::new(params) + .with_blocklist(blocklist) + .with_allowlist(allowlist), + ), } } - // run the reputation hourly update job pub(crate) async fn run(&self) { - let mut tick = interval(Duration::from_secs(60 * 60)); + let mut tick = interval(Duration::from_secs( + self.state.read().params.decay_interval_secs, + )); loop { tick.tick().await; - self.reputation.write().hourly_update(); + self.state.write().update(); } } -} -impl ReputationManager for HourlyMovingAverageReputation { - fn status(&self, address: Address) -> ReputationStatus { - self.reputation.read().status(address) + pub(crate) fn status(&self, address: Address) -> ReputationStatus { + self.state.read().status(address) } - fn add_seen(&self, address: Address) { - self.reputation.write().add_seen(address); + pub(crate) fn add_seen(&self, address: Address) { + self.state.write().add_seen(address); } - fn handle_urep_030_penalty(&self, address: Address) { - self.reputation.write().handle_urep_030_penalty(address); + pub(crate) fn handle_urep_030_penalty(&self, address: Address) { + self.state.write().handle_urep_030_penalty(address); } - fn handle_srep_050_penalty(&self, address: Address) { - self.reputation.write().handle_srep_050_penalty(address); + pub(crate) fn handle_srep_050_penalty(&self, address: Address) { + self.state.write().handle_srep_050_penalty(address); } - fn add_included(&self, address: Address) { - self.reputation.write().add_included(address); + pub(crate) fn dump_reputation(&self) -> Vec { + self.state.read().dump_reputation() } - fn remove_included(&self, address: Address) { - self.reputation.write().remove_included(address); + pub(crate) fn add_included(&self, address: Address) { + self.state.write().add_included(address); } - fn dump_reputation(&self) -> Vec { - let reputation = self.reputation.read(); - reputation - .counts - .iter() - .map(|(address, count)| Reputation { - address: *address, - ops_seen: count.ops_seen, - ops_included: count.ops_included, - }) - .collect() + pub(crate) fn remove_included(&self, address: Address) { + self.state.write().remove_included(address); } - fn set_reputation(&self, address: Address, ops_seen: u64, ops_included: u64) { - self.reputation + pub(crate) fn set_reputation(&self, address: Address, ops_seen: u64, ops_included: u64) { + self.state .write() - .set_reputation(address, ops_seen, ops_included) - } - - fn get_ops_allowed(&self, address: Address) -> u64 { - self.reputation.read().get_ops_allowed(address) + .set_reputation(address, ops_seen, ops_included); } - fn clear(&self) { - self.reputation.write().clear() + pub(crate) fn get_ops_allowed(&self, address: Address) -> u64 { + self.state.read().get_ops_allowed(address) } -} - -#[derive(Debug, Clone, Copy)] -pub(crate) struct ReputationParams { - bundle_invalidation_ops_seen_staked_penalty: u64, - bundle_invalidation_ops_seen_unstaked_penalty: u64, - same_unstaked_entity_mempool_count: u64, - min_inclusion_rate_denominator: u64, - inclusion_rate_factor: u64, - throttling_slack: u64, - ban_slack: u64, -} -impl Default for ReputationParams { - fn default() -> Self { - Self { - bundle_invalidation_ops_seen_staked_penalty: 10_000, - bundle_invalidation_ops_seen_unstaked_penalty: 1_000, - same_unstaked_entity_mempool_count: 10, - min_inclusion_rate_denominator: 10, - inclusion_rate_factor: 10, - throttling_slack: 10, - ban_slack: 50, - } + pub(crate) fn clear(&self) { + self.state.write().clear(); } -} -impl ReputationParams { - pub(crate) fn bundler_default() -> Self { - Self::default() - } - - #[allow(dead_code)] - pub(crate) fn client_default() -> Self { - Self { - min_inclusion_rate_denominator: 100, - ..Self::default() - } + pub(crate) fn set_tracking(&self, tracking_enabled: bool) { + self.state.write().set_tracking(tracking_enabled); } } #[derive(Debug)] -struct AddressReputation { +struct AddressReputationInner { // Addresses that are always banned blocklist: HashSet
, // Addresses that are always exempt from throttling and banning @@ -245,9 +169,9 @@ struct AddressReputation { params: ReputationParams, } -impl AddressReputation { - fn new(params: ReputationParams) -> Self { - Self { +impl AddressReputationInner { + fn new(params: ReputationParams) -> AddressReputationInner { + AddressReputationInner { blocklist: HashSet::new(), allowlist: HashSet::new(), counts: HashMap::new(), @@ -255,12 +179,12 @@ impl AddressReputation { } } - fn with_blocklist(self, blocklist: HashSet
) -> Self { - Self { blocklist, ..self } + fn with_blocklist(self, blocklist: HashSet
) -> AddressReputationInner { + AddressReputationInner { blocklist, ..self } } - fn with_allowlist(self, allowlist: HashSet
) -> Self { - Self { allowlist, ..self } + fn with_allowlist(self, allowlist: HashSet
) -> AddressReputationInner { + AddressReputationInner { allowlist, ..self } } fn status(&self, address: Address) -> ReputationStatus { @@ -270,6 +194,10 @@ impl AddressReputation { return ReputationStatus::Ok; } + if !self.params.tracking_enabled { + return ReputationStatus::Ok; + } + let count = match self.counts.get(&address) { Some(count) => count, None => return ReputationStatus::Ok, @@ -301,6 +229,17 @@ impl AddressReputation { count.ops_seen = self.params.bundle_invalidation_ops_seen_staked_penalty; } + fn dump_reputation(&self) -> Vec { + self.counts + .iter() + .map(|(address, count)| Reputation { + address: *address, + ops_seen: count.ops_seen, + ops_included: count.ops_included, + }) + .collect() + } + fn add_included(&mut self, address: Address) { let count = self.counts.entry(address).or_default(); count.ops_included += 1; @@ -334,10 +273,10 @@ impl AddressReputation { self.params.same_unstaked_entity_mempool_count + inclusion_based_count } - fn hourly_update(&mut self) { + fn update(&mut self) { for count in self.counts.values_mut() { - count.ops_seen -= count.ops_seen / 24; - count.ops_included -= count.ops_included / 24; + count.ops_seen -= count.ops_seen / self.params.decay_factor; + count.ops_included -= count.ops_included / self.params.decay_factor; } self.counts .retain(|_, count| count.ops_seen > 0 || count.ops_included > 0); @@ -346,6 +285,10 @@ impl AddressReputation { fn clear(&mut self) { self.counts.clear(); } + + fn set_tracking(&mut self, tracking_enabled: bool) { + self.params.tracking_enabled = tracking_enabled; + } } #[derive(Debug, Default, Clone)] @@ -363,7 +306,7 @@ mod tests { #[test] fn seen_included() { let addr = Address::random(); - let mut reputation = AddressReputation::new(ReputationParams::bundler_default()); + let mut reputation = AddressReputationInner::new(ReputationParams::bundler_default()); for _ in 0..1000 { reputation.add_seen(addr); @@ -377,7 +320,7 @@ mod tests { #[test] fn set_rep() { let addr = Address::random(); - let mut reputation = AddressReputation::new(ReputationParams::bundler_default()); + let mut reputation = AddressReputationInner::new(ReputationParams::bundler_default()); reputation.set_reputation(addr, 1000, 1000); let counts = reputation.counts.get(&addr).unwrap(); @@ -388,7 +331,7 @@ mod tests { #[test] fn reputation_ok() { let addr = Address::random(); - let mut reputation = AddressReputation::new(ReputationParams::bundler_default()); + let mut reputation = AddressReputationInner::new(ReputationParams::bundler_default()); reputation.add_seen(addr); assert_eq!(reputation.status(addr), ReputationStatus::Ok); } @@ -397,7 +340,7 @@ mod tests { fn reputation_throttled() { let addr = Address::random(); let params = ReputationParams::bundler_default(); - let mut reputation = AddressReputation::new(params); + let mut reputation = AddressReputationInner::new(params); let ops_seen = 1000; let ops_included = @@ -410,7 +353,7 @@ mod tests { fn reputation_throttled_edge() { let addr = Address::random(); let params = ReputationParams::bundler_default(); - let mut reputation = AddressReputation::new(params); + let mut reputation = AddressReputationInner::new(params); let ops_seen = 1000; let ops_included = @@ -423,7 +366,7 @@ mod tests { fn reputation_banned() { let addr = Address::random(); let params = ReputationParams::bundler_default(); - let mut reputation = AddressReputation::new(params); + let mut reputation = AddressReputationInner::new(params); let ops_seen = 1000; let ops_included = ops_seen / params.min_inclusion_rate_denominator - params.ban_slack - 1; @@ -431,26 +374,44 @@ mod tests { assert_eq!(reputation.status(addr), ReputationStatus::Banned); } + #[test] + fn reputation_banned_tracking_disabled() { + let addr = Address::random(); + let params = ReputationParams::new(false); + let mut reputation = AddressReputationInner::new(params); + + let ops_seen = 1000; + let ops_included = ops_seen / params.min_inclusion_rate_denominator - params.ban_slack - 1; + reputation.set_reputation(addr, ops_seen, ops_included); + assert_eq!(reputation.status(addr), ReputationStatus::Ok); + } + #[test] fn hourly_update() { let addr = Address::random(); - let mut reputation = AddressReputation::new(ReputationParams::bundler_default()); + let mut reputation = AddressReputationInner::new(ReputationParams::bundler_default()); for _ in 0..1000 { reputation.add_seen(addr); reputation.add_included(addr); } - reputation.hourly_update(); + reputation.update(); let counts = reputation.counts.get(&addr).unwrap(); - assert_eq!(counts.ops_seen, 1000 - 1000 / 24); - assert_eq!(counts.ops_included, 1000 - 1000 / 24); + assert_eq!( + counts.ops_seen, + 1000 - 1000 / reputation.params.decay_factor + ); + assert_eq!( + counts.ops_included, + 1000 - 1000 / reputation.params.decay_factor + ); } #[test] fn test_blocklist() { let addr = Address::random(); - let reputation = AddressReputation::new(ReputationParams::bundler_default()) + let reputation = AddressReputationInner::new(ReputationParams::bundler_default()) .with_blocklist(HashSet::from([addr])); assert_eq!(reputation.status(addr), ReputationStatus::Banned); @@ -460,7 +421,7 @@ mod tests { #[test] fn test_allowlist() { let addr = Address::random(); - let mut reputation = AddressReputation::new(ReputationParams::bundler_default()) + let mut reputation = AddressReputationInner::new(ReputationParams::bundler_default()) .with_allowlist(HashSet::from([addr])); reputation.set_reputation(addr, 1000000, 0); @@ -471,8 +432,7 @@ mod tests { #[test] fn manager_seen_included() { - let manager = - HourlyMovingAverageReputation::new(ReputationParams::bundler_default(), None, None); + let mut manager = AddressReputationInner::new(ReputationParams::bundler_default()); let addrs = [Address::random(), Address::random(), Address::random()]; for _ in 0..10 { @@ -485,8 +445,7 @@ mod tests { for addr in &addrs { assert_eq!(manager.status(*addr), ReputationStatus::Ok); - let rep = manager.reputation.read(); - let counts = rep.counts.get(addr).unwrap(); + let counts = manager.counts.get(addr).unwrap(); assert_eq!(counts.ops_seen, 10); assert_eq!(counts.ops_included, 10); } @@ -494,8 +453,7 @@ mod tests { #[test] fn manager_set_dump_reputation() { - let manager = - HourlyMovingAverageReputation::new(ReputationParams::bundler_default(), None, None); + let mut manager = AddressReputationInner::new(ReputationParams::bundler_default()); let addrs = [Address::random(), Address::random(), Address::random()]; for addr in &addrs { diff --git a/crates/pool/src/mempool/uo_pool.rs b/crates/pool/src/mempool/uo_pool.rs index 0eca7901..2805def3 100644 --- a/crates/pool/src/mempool/uo_pool.rs +++ b/crates/pool/src/mempool/uo_pool.rs @@ -11,7 +11,7 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -use std::{collections::HashSet, sync::Arc}; +use std::{collections::HashSet, marker::PhantomData, sync::Arc}; use ethers::{ types::{Address, H256, U256}, @@ -19,22 +19,26 @@ use ethers::{ }; use itertools::Itertools; use parking_lot::RwLock; -use rundler_provider::{EntryPoint, PaymasterHelper, ProviderResult}; +use rundler_provider::EntryPoint; use rundler_sim::{Prechecker, Simulator}; -use rundler_types::{Entity, EntityUpdate, EntityUpdateType, UserOperation}; +use rundler_types::{ + pool::{ + MempoolError, PaymasterMetadata, PoolOperation, Reputation, ReputationStatus, StakeStatus, + }, + Entity, EntityUpdate, EntityUpdateType, EntryPointVersion, GasFees, UserOperation, + UserOperationId, UserOperationVariant, +}; use rundler_utils::emit::WithEntryPoint; use tokio::sync::broadcast; use tonic::async_trait; use tracing::info; use super::{ - error::{MempoolError, MempoolResult}, - pool::PoolInner, - reputation::{Reputation, ReputationManager, ReputationStatus}, - Mempool, OperationOrigin, PaymasterMetadata, PoolConfig, PoolOperation, StakeInfo, StakeStatus, + paymaster::PaymasterTracker, pool::PoolInner, reputation::AddressReputation, Mempool, + MempoolResult, OperationOrigin, PoolConfig, }; use crate::{ - chain::{ChainUpdate, DepositInfo}, + chain::ChainUpdate, emit::{EntityReputation, EntityStatus, EntitySummary, OpPoolEvent, OpRemovalReason}, }; @@ -43,59 +47,55 @@ use crate::{ /// Wrapper around a pool object that implements thread-safety /// via a RwLock. Safe to call from multiple threads. Methods /// block on write locks. -pub(crate) struct UoPool< - R: ReputationManager, - P: Prechecker, - S: Simulator, - E: EntryPoint, - PH: PaymasterHelper, -> { +pub(crate) struct UoPool { config: PoolConfig, - reputation: Arc, state: RwLock, + paymaster: PaymasterTracker, + reputation: Arc, event_sender: broadcast::Sender>, prechecker: P, simulator: S, - entry_point: E, - paymaster_helper: PH, + _uo_type: PhantomData, } struct UoPoolState { pool: PoolInner, throttled_ops: HashSet, block_number: u64, + gas_fees: GasFees, + base_fee: U256, } -impl UoPool +impl UoPool where - R: ReputationManager, - P: Prechecker, - S: Simulator, + UO: UserOperation, + P: Prechecker, + S: Simulator, E: EntryPoint, - PH: PaymasterHelper, { pub(crate) fn new( config: PoolConfig, - reputation: Arc, event_sender: broadcast::Sender>, prechecker: P, simulator: S, - entry_point: E, - paymaster_helper: PH, + paymaster: PaymasterTracker, + reputation: Arc, ) -> Self { Self { - config: config.clone(), - reputation, state: RwLock::new(UoPoolState { - pool: PoolInner::new(config.into()), + pool: PoolInner::new(config.clone().into()), throttled_ops: HashSet::new(), block_number: 0, + gas_fees: GasFees::default(), + base_fee: U256::zero(), }), + reputation, + paymaster, event_sender, prechecker, simulator, - entry_point, - paymaster_helper, + config, + _uo_type: PhantomData, } } @@ -112,12 +112,12 @@ where let removed_op_hashes = state.pool.throttle_entity(entity, block_number); let count = removed_op_hashes.len(); - self.emit(OpPoolEvent::RemovedEntity { entity }); + self.emit(OpPoolEvent::ThrottledEntity { entity }); for op_hash in removed_op_hashes { self.emit(OpPoolEvent::RemovedOp { op_hash, - reason: OpRemovalReason::EntityRemoved { entity }, + reason: OpRemovalReason::EntityThrottled { entity }, }) } UoPoolMetrics::increment_removed_operations(count, self.config.entry_point); @@ -140,13 +140,12 @@ where } #[async_trait] -impl Mempool for UoPool +impl Mempool for UoPool where - R: ReputationManager, - P: Prechecker, - S: Simulator, + UO: UserOperation + From + Into, + P: Prechecker, + S: Simulator, E: EntryPoint, - PH: PaymasterHelper, { async fn on_chain_update(&self, update: &ChainUpdate) { { @@ -156,19 +155,24 @@ where .iter() .filter(|op| op.entry_point == self.config.entry_point); - let deposits: Vec = update - .entity_deposits - .iter() - .filter(|d| d.entrypoint == self.config.entry_point) - .cloned() - .collect(); + let entity_balance_updates = update.entity_balance_updates.iter().filter_map(|u| { + if u.entrypoint == self.config.entry_point { + Some(u.address) + } else { + None + } + }); - let unmined_entity_deposits: Vec = update - .unmined_entity_deposits + let unmined_entity_balance_updates = update + .unmined_entity_balance_updates .iter() - .filter(|d| d.entrypoint == self.config.entry_point) - .cloned() - .collect(); + .filter_map(|u| { + if u.entrypoint == self.config.entry_point { + Some(u.address) + } else { + None + } + }); let unmined_ops = deduped_ops .unmined_ops @@ -177,59 +181,87 @@ where let mut mined_op_count = 0; let mut unmined_op_count = 0; - if update.reorg_larger_than_history { - let _ = self.reset_confirmed_paymaster_balances().await; - } - - let mut state = self.state.write(); - state - .pool - .update_paymaster_balances_after_update(&deposits, &unmined_entity_deposits); - for op in mined_ops { if op.entry_point != self.config.entry_point { continue; } + self.paymaster.update_paymaster_balance_from_mined_op(op); // Remove throttled ops that were included in the block - state.throttled_ops.remove(&op.hash); + self.state.write().throttled_ops.remove(&op.hash); - if let Some(op) = state.pool.mine_operation(op, update.latest_block_number) { + if let Some(pool_op) = self + .state + .write() + .pool + .mine_operation(op, update.latest_block_number) + { // Only account for an entity once - for entity_addr in op.entities().map(|e| e.address).unique() { + for entity_addr in pool_op.entities().map(|e| e.address).unique() { self.reputation.add_included(entity_addr); } mined_op_count += 1; } } + for op in unmined_ops { if op.entry_point != self.config.entry_point { continue; } - if let Some(op) = state.pool.unmine_operation(op) { - // Only account for an entity once - for entity_addr in op.entities().map(|e| e.address).unique() { + if let Some(paymaster) = op.paymaster { + self.paymaster + .unmine_actual_cost(&paymaster, op.actual_gas_cost); + } + + let pool_op = self.state.write().pool.unmine_operation(op); + + if let Some(po) = pool_op { + for entity_addr in po.entities().map(|e| e.address).unique() { self.reputation.remove_included(entity_addr); } + unmined_op_count += 1; + let _ = self.paymaster.add_or_update_balance(&po).await; + } + } + + // Update paymaster balances AFTER updating the pool to reset confirmed balances if needed. + if update.reorg_larger_than_history { + if let Err(e) = self.reset_confirmed_paymaster_balances().await { + tracing::error!("Failed to reset confirmed paymaster balances: {:?}", e); + } + } else { + let addresses = entity_balance_updates + .chain(unmined_entity_balance_updates) + .unique() + .collect::>(); + if !addresses.is_empty() { + if let Err(e) = self + .paymaster + .reset_confirmed_balances_for(&addresses) + .await + { + tracing::error!("Failed to reset confirmed paymaster balances: {:?}", e); + } } } + if mined_op_count > 0 { info!( - "{mined_op_count} op(s) mined on entry point {:?} when advancing to block with number {}, hash {:?}.", - self.config.entry_point, - update.latest_block_number, - update.latest_block_hash, - ); + "{mined_op_count} op(s) mined on entry point {:?} when advancing to block with number {}, hash {:?}.", + self.config.entry_point, + update.latest_block_number, + update.latest_block_hash, + ); } if unmined_op_count > 0 { info!( - "{unmined_op_count} op(s) unmined in reorg on entry point {:?} when advancing to block with number {}, hash {:?}.", - self.config.entry_point, - update.latest_block_number, - update.latest_block_hash, - ); + "{unmined_op_count} op(s) unmined in reorg on entry point {:?} when advancing to block with number {}, hash {:?}.", + self.config.entry_point, + update.latest_block_number, + update.latest_block_hash, + ); } UoPoolMetrics::update_ops_seen( mined_op_count as isize - unmined_op_count as isize, @@ -237,9 +269,11 @@ where ); UoPoolMetrics::increment_unmined_operations(unmined_op_count, self.config.entry_point); + let mut state = self.state.write(); state .pool .forget_mined_operations_before_block(update.earliest_remembered_block_number); + // Remove throttled ops that are too old let mut to_remove = HashSet::new(); for hash in state.throttled_ops.iter() { @@ -254,6 +288,7 @@ where } } } + for (hash, added_at_block) in to_remove { state.pool.remove_operation_by_hash(hash); state.throttled_ops.remove(&hash); @@ -266,38 +301,61 @@ where }) } - // expire old UOs - let expired = state.pool.remove_expired(update.latest_block_timestamp); + // pool maintenance + let gas_fees = state.gas_fees; + let base_fee = state.base_fee; + let expired = state.pool.do_maintenance( + update.latest_block_number, + update.latest_block_timestamp, + gas_fees, + base_fee, + ); + for (hash, until) in expired { self.emit(OpPoolEvent::RemovedOp { op_hash: hash, reason: OpRemovalReason::Expired { valid_until: until }, }) } - - state.block_number = update.latest_block_number; } // update required bundle fees and update metrics - if let Ok((bundle_fees, base_fee)) = self.prechecker.update_fees().await { - let max_fee = match format_units(bundle_fees.max_fee_per_gas, "gwei") { - Ok(s) => s.parse::().unwrap_or_default(), - Err(_) => 0.0, - }; - UoPoolMetrics::current_max_fee_gwei(max_fee); - - let max_priority_fee = match format_units(bundle_fees.max_priority_fee_per_gas, "gwei") - { - Ok(s) => s.parse::().unwrap_or_default(), - Err(_) => 0.0, - }; - UoPoolMetrics::current_max_priority_fee_gwei(max_priority_fee); - - let base_fee = match format_units(base_fee, "gwei") { - Ok(s) => s.parse::().unwrap_or_default(), - Err(_) => 0.0, - }; - UoPoolMetrics::current_base_fee(base_fee); + match self.prechecker.update_fees().await { + Ok((bundle_fees, base_fee)) => { + let max_fee = match format_units(bundle_fees.max_fee_per_gas, "gwei") { + Ok(s) => s.parse::().unwrap_or_default(), + Err(_) => 0.0, + }; + UoPoolMetrics::current_max_fee_gwei(max_fee); + + let max_priority_fee = + match format_units(bundle_fees.max_priority_fee_per_gas, "gwei") { + Ok(s) => s.parse::().unwrap_or_default(), + Err(_) => 0.0, + }; + UoPoolMetrics::current_max_priority_fee_gwei(max_priority_fee); + + let base_fee_f64 = match format_units(base_fee, "gwei") { + Ok(s) => s.parse::().unwrap_or_default(), + Err(_) => 0.0, + }; + UoPoolMetrics::current_base_fee(base_fee_f64); + + // cache for the next update + { + let mut state = self.state.write(); + state.block_number = update.latest_block_number; + state.gas_fees = bundle_fees; + state.base_fee = base_fee; + } + } + Err(e) => { + tracing::error!("Failed to update fees: {:?}", e); + { + let mut state = self.state.write(); + state.block_number = update.latest_block_number; + } + } } } @@ -305,26 +363,27 @@ where self.config.entry_point } - async fn reset_confirmed_paymaster_balances(&self) -> MempoolResult<()> { - let paymaster_addresses = self.state.read().pool.paymaster_addresses(); + fn entry_point_version(&self) -> EntryPointVersion { + self.config.entry_point_version + } - let balances = self - .paymaster_helper - .get_balances(paymaster_addresses.clone()) - .await?; + fn set_tracking(&self, paymaster: bool, reputation: bool) { + self.paymaster.set_tracking(paymaster); + self.reputation.set_tracking(reputation); + } - self.state - .write() - .pool - .set_confirmed_paymaster_balances(&paymaster_addresses, &balances); + async fn reset_confirmed_paymaster_balances(&self) -> MempoolResult<()> { + self.paymaster.reset_confirmed_balances().await + } - Ok(()) + async fn get_stake_status(&self, address: Address) -> MempoolResult { + self.paymaster.get_stake_status(address).await } async fn add_operation( &self, origin: OperationOrigin, - op: UserOperation, + op: UserOperationVariant, ) -> MempoolResult { // TODO(danc) aggregator reputation is not implemented // TODO(danc) catch ops with aggregators prior to simulation and reject @@ -334,6 +393,7 @@ where // If banned, reject let mut entity_summary = EntitySummary::default(); let mut throttled = false; + for entity in op.entities() { let address = entity.address; let reputation = match self.reputation.status(address) { @@ -369,25 +429,19 @@ where self.state.read().pool.check_multiple_roles_violation(&op)?; // check if paymaster is present and exists in pool - // Note: this is super gross but due the fact that we do not want to make - // http calls when we hold the readwrite lock its a work around - let mut paymaster_metadata = None; - if let Some(address) = op.paymaster() { - let meta = self - .paymaster_balance(address) - .await - .map_err(|e| MempoolError::Other(e.into()))?; - - paymaster_metadata = Some(meta); - } + // this is optimistic and could potentially lead to + // multiple user operations call this before they are + // added to the pool and can lead to an overdraft + self.paymaster.check_operation_cost(&op).await?; // Prechecks - self.prechecker.check(&op).await?; + let versioned_op = op.clone().into(); + self.prechecker.check(&versioned_op).await?; // Only let ops with successful simulations through let sim_result = self .simulator - .simulate_validation(op.clone(), None, None) + .simulate_validation(versioned_op, None, None) .await?; // No aggregators supported for now @@ -410,7 +464,6 @@ where expected_code_hash: sim_result.code_hash, sim_block_hash: sim_result.block_hash, sim_block_number: sim_result.block_number.unwrap(), // simulation always returns a block number when called without a specified block_hash - entities_needing_stake: sim_result.entities_needing_stake, account_is_staked: sim_result.account_is_staked, entity_infos: sim_result.entity_infos, }; @@ -419,25 +472,25 @@ where { let state = self.state.read(); if !pool_op.account_is_staked - && state.pool.address_count(&pool_op.uo.sender) + && state.pool.address_count(&pool_op.uo.sender()) >= self.config.same_sender_mempool_count { return Err(MempoolError::MaxOperationsReached( self.config.same_sender_mempool_count, - pool_op.uo.sender, + Entity::account(pool_op.uo.sender()), )); } // Check unstaked non-sender entity counts in the mempool for entity in pool_op .unstaked_entities() - .filter(|e| e.address != pool_op.entity_infos.sender.address) + .filter(|e| e.address != pool_op.entity_infos.sender.address()) { let ops_allowed = self.reputation.get_ops_allowed(entity.address); if state.pool.address_count(&entity.address) >= ops_allowed as usize { return Err(MempoolError::MaxOperationsReached( ops_allowed as usize, - entity.address, + entity, )); } } @@ -446,15 +499,18 @@ where // Add op to pool let hash = { let mut state = self.state.write(); - let hash = state - .pool - .add_operation(pool_op.clone(), paymaster_metadata)?; + let hash = state.pool.add_operation(pool_op.clone())?; + if throttled { state.throttled_ops.insert(hash); } hash }; + // Add op cost to pending paymaster balance + // once the operation has been added to the pool + self.paymaster.add_or_update_balance(&pool_op).await?; + // Update reputation if replacement.is_none() { pool_op.entities().unique().for_each(|e| { @@ -468,7 +524,7 @@ where } let op_hash = pool_op .uo - .op_hash(self.config.entry_point, self.config.chain_id); + .hash(self.config.entry_point, self.config.chain_id); let valid_after = pool_op.valid_time_range.valid_after; let valid_until = pool_op.valid_time_range.valid_until; self.emit(OpPoolEvent::ReceivedOp { @@ -490,7 +546,8 @@ where { let mut state = self.state.write(); for hash in hashes { - if state.pool.remove_operation_by_hash(*hash).is_some() { + if let Some(op) = state.pool.remove_operation_by_hash(*hash) { + self.paymaster.remove_operation(&op.uo.id()); count += 1; removed_hashes.push(*hash); } @@ -506,6 +563,45 @@ where UoPoolMetrics::increment_removed_operations(count, self.config.entry_point); } + fn remove_op_by_id(&self, id: &UserOperationId) -> MempoolResult> { + // Check for the operation in the pool and its age + let po = { + let state = self.state.read(); + match state.pool.get_operation_by_id(id) { + Some(po) => { + if po.sim_block_number + self.config.drop_min_num_blocks > state.block_number { + return Err(MempoolError::OperationDropTooSoon( + po.sim_block_number, + state.block_number, + self.config.drop_min_num_blocks, + )); + } + po + } + None => return Ok(None), + } + }; + + let hash = po.uo.hash(self.config.entry_point, self.config.chain_id); + + // This can return none if the operation was removed by another thread + if self + .state + .write() + .pool + .remove_operation_by_hash(hash) + .is_none() + { + return Ok(None); + } + + self.emit(OpPoolEvent::RemovedOp { + op_hash: hash, + reason: OpRemovalReason::Requested, + }); + Ok(Some(hash)) + } + fn update_entity(&self, update: EntityUpdate) { let entity = update.entity; match update.update_type { @@ -522,28 +618,6 @@ where } } - async fn paymaster_balance(&self, paymaster: Address) -> ProviderResult { - if self.state.read().pool.paymaster_exists(paymaster) { - let meta = self - .state - .read() - .pool - .paymaster_metadata(paymaster) - .expect("Paymaster balance should not be empty if address exists in pool"); - return Ok(meta); - } - - let balance = self.entry_point.balance_of(paymaster, None).await?; - - let paymaster_meta = PaymasterMetadata { - address: paymaster, - pending_balance: balance, - confirmed_balance: balance, - }; - - Ok(paymaster_meta) - } - fn best_operations( &self, max: usize, @@ -563,14 +637,19 @@ where .filter(|op| { // short-circuit the mod if there is only 1 shard ((self.config.num_shards == 1) || - (U256::from_little_endian(op.uo.sender.as_bytes()) + (U256::from_little_endian(op.uo.sender().as_bytes()) .div_mod(self.config.num_shards.into()) .1 == shard_index.into())) && - // filter out ops from senders we've already seen - senders.insert(op.uo.sender) + // filter out ops from unstaked senders we've already seen + if !op.account_is_staked { + senders.insert(op.uo.sender()) + } else { + true + } }) .take(max) + .map(Into::into) .collect()) } @@ -582,10 +661,15 @@ where self.state.read().pool.get_operation_by_hash(hash) } - fn clear_state(&self, clear_mempool: bool, clear_reputation: bool) { + fn clear_state(&self, clear_mempool: bool, clear_paymaster: bool, clear_reputation: bool) { if clear_mempool { - self.state.write().pool.clear() + self.state.write().pool.clear(); + } + + if clear_paymaster { + self.paymaster.clear(); } + if clear_reputation { self.reputation.clear() } @@ -595,29 +679,12 @@ where self.reputation.dump_reputation() } - fn get_reputation_status(&self, address: Address) -> ReputationStatus { - self.reputation.status(address) + fn dump_paymaster_balances(&self) -> Vec { + self.paymaster.dump_paymaster_metadata() } - async fn get_stake_status(&self, address: Address) -> MempoolResult { - let deposit_info = self.paymaster_helper.get_deposit_info(address).await?; - - let is_staked = deposit_info - .stake - .ge(&self.config.sim_settings.min_stake_value) - && deposit_info - .unstake_delay_sec - .ge(&self.config.sim_settings.min_unstake_delay); - - let stake_status = StakeStatus { - stake_info: StakeInfo { - stake: deposit_info.stake, - unstake_delay_sec: deposit_info.unstake_delay_sec, - }, - is_staked, - }; - - Ok(stake_status) + fn get_reputation_status(&self, address: Address) -> ReputationStatus { + self.reputation.status(address) } fn set_reputation(&self, address: Address, ops_seen: u64, ops_included: u64) { @@ -630,31 +697,35 @@ struct UoPoolMetrics {} impl UoPoolMetrics { fn update_ops_seen(num_ops: isize, entry_point: Address) { - metrics::increment_gauge!("op_pool_ops_seen", num_ops as f64, "entrypoint" => entry_point.to_string()); + metrics::gauge!("op_pool_ops_seen", "entry_point" => entry_point.to_string()) + .increment(num_ops as f64); } fn increment_unmined_operations(num_ops: usize, entry_point: Address) { - metrics::counter!("op_pool_unmined_operations", num_ops as u64, "entrypoint" => entry_point.to_string()); + metrics::counter!("op_pool_unmined_operations", "entry_point" => entry_point.to_string()) + .increment(num_ops as u64); } fn increment_removed_operations(num_ops: usize, entry_point: Address) { - metrics::counter!("op_pool_removed_operations", num_ops as u64, "entrypoint" => entry_point.to_string()); + metrics::counter!("op_pool_removed_operations", "entry_point" => entry_point.to_string()) + .increment(num_ops as u64); } fn increment_removed_entities(entry_point: Address) { - metrics::increment_counter!("op_pool_removed_entities", "entrypoint" => entry_point.to_string()); + metrics::counter!("op_pool_removed_entities", "entry_point" => entry_point.to_string()) + .increment(1); } fn current_max_fee_gwei(fee: f64) { - metrics::gauge!("op_pool_current_max_fee_gwei", fee); + metrics::gauge!("op_pool_current_max_fee_gwei").set(fee); } fn current_max_priority_fee_gwei(fee: f64) { - metrics::gauge!("op_pool_current_max_priority_fee_gwei", fee); + metrics::gauge!("op_pool_current_max_priority_fee_gwei").set(fee); } fn current_base_fee(fee: f64) { - metrics::gauge!("op_pool_current_base_fee", fee); + metrics::gauge!("op_pool_current_base_fee").set(fee); } } @@ -663,16 +734,24 @@ mod tests { use std::collections::HashMap; use ethers::types::{Bytes, H160}; - use rundler_provider::{MockEntryPoint, MockPaymasterHelper}; + use mockall::Sequence; + use rundler_provider::{DepositInfo, MockEntryPointV0_6}; use rundler_sim::{ - EntityInfo, EntityInfos, MockPrechecker, MockSimulator, PrecheckError, PrecheckSettings, - PrecheckViolation, SimulationError, SimulationResult, SimulationSettings, - SimulationViolation, ViolationError, + MockPrechecker, MockSimulator, PrecheckError, PrecheckSettings, SimulationError, + SimulationResult, SimulationSettings, ViolationError, + }; + use rundler_types::{ + pool::{PrecheckViolation, SimulationViolation}, + v0_6::UserOperation, + EntityInfo, EntityInfos, EntityType, EntryPointVersion, GasFees, + UserOperation as UserOperationTrait, ValidTimeRange, }; - use rundler_types::{DepositInfo, EntityType, GasFees, ValidTimeRange}; use super::*; - use crate::chain::MinedOp; + use crate::{ + chain::{BalanceUpdate, MinedOp}, + mempool::{PaymasterConfig, ReputationParams}, + }; const THROTTLE_SLACK: u64 = 5; const BAN_SLACK: u64 = 10; @@ -733,17 +812,32 @@ mod tests { .unwrap(); } check_ops(pool.best_operations(3, 0).unwrap(), uos); - pool.clear_state(true, true); + pool.clear_state(true, true, true); assert_eq!(pool.best_operations(3, 0).unwrap(), vec![]); } #[tokio::test] async fn chain_update_mine() { - let (pool, uos) = create_pool_insert_ops(vec![ - create_op(Address::random(), 0, 3, None), - create_op(Address::random(), 0, 2, None), - create_op(Address::random(), 0, 1, None), - ]) + let paymaster = Address::random(); + + let mut entrypoint = MockEntryPointV0_6::new(); + // initial balance + entrypoint + .expect_balance_of() + .returning(|_, _| Ok(U256::from(1000))); + // after updates + entrypoint + .expect_get_balances() + .returning(|_| Ok(vec![1110.into()])); + + let (pool, uos) = create_pool_with_entrypoint_insert_ops( + vec![ + create_op(Address::random(), 0, 3, None), + create_op(Address::random(), 0, 2, None), + create_op(Address::random(), 0, 1, Some(paymaster)), + ], + entrypoint, + ) .await; check_ops(pool.best_operations(3, 0).unwrap(), uos.clone()); @@ -755,20 +849,33 @@ mod tests { reorg_depth: 0, mined_ops: vec![MinedOp { entry_point: pool.config.entry_point, - hash: uos[0].op_hash(pool.config.entry_point, 1), - sender: uos[0].sender, - nonce: uos[0].nonce, + hash: uos[0].hash(pool.config.entry_point, 1), + sender: uos[0].sender(), + nonce: uos[0].nonce(), actual_gas_cost: U256::zero(), paymaster: None, }], unmined_ops: vec![], - entity_deposits: vec![], - unmined_entity_deposits: vec![], + entity_balance_updates: vec![BalanceUpdate { + address: paymaster, + amount: 100.into(), + entrypoint: pool.config.entry_point, + is_addition: true, + }], + unmined_entity_balance_updates: vec![BalanceUpdate { + address: paymaster, + amount: 10.into(), + entrypoint: pool.config.entry_point, + is_addition: false, + }], reorg_larger_than_history: false, }) .await; check_ops(pool.best_operations(3, 0).unwrap(), uos[1..].to_vec()); + + let paymaster_balance = pool.paymaster.paymaster_balance(paymaster).await.unwrap(); + assert_eq!(paymaster_balance.confirmed_balance, 1110.into()); } #[tokio::test] @@ -781,21 +888,39 @@ mod tests { create_op(Address::random(), 0, 1, Some(paymaster)), ]; - // add pending max cost of 30 for each uo + // add pending max cost of 50 for each uo for op in &mut ops { - op.op.call_gas_limit = 10.into(); - op.op.verification_gas_limit = 10.into(); - op.op.pre_verification_gas = 10.into(); - op.op.max_fee_per_gas = 1.into(); + let uo: &mut UserOperation = op.op.as_mut(); + uo.call_gas_limit = 10.into(); + uo.verification_gas_limit = 10.into(); + uo.pre_verification_gas = 10.into(); + uo.max_fee_per_gas = 1.into(); } - let (pool, uos) = create_pool_insert_ops(ops).await; - let metadata = pool - .state - .read() - .pool - .paymaster_metadata(paymaster) - .unwrap(); + let mut entrypoint = MockEntryPointV0_6::new(); + // initial balance, pending = 850 + entrypoint + .expect_balance_of() + .returning(|_, _| Ok(U256::from(1000))); + // after updates + let mut seq = Sequence::new(); + // one UO mined with actual cost of 10, unmine deposit of 10, mine deposit 100 + // confirmed = 1000 - 10 - 10 + 100 = 1080. Pending = 1080 - 50*2 = 980 + entrypoint + .expect_get_balances() + .once() + .in_sequence(&mut seq) + .returning(|_| Ok(vec![1080.into()])); + // Unmine UO of 10, unmine deposit of 100 + // confirmed = 1080 + 10 - 100 = 990. Pending = 990 - 50*3 = 840 + entrypoint + .expect_get_balances() + .once() + .in_sequence(&mut seq) + .returning(|_| Ok(vec![990.into()])); + + let (pool, uos) = create_pool_with_entrypoint_insert_ops(ops, entrypoint).await; + let metadata = pool.paymaster.paymaster_balance(paymaster).await.unwrap(); assert_eq!(metadata.pending_balance, 850.into()); check_ops(pool.best_operations(3, 0).unwrap(), uos.clone()); @@ -809,15 +934,25 @@ mod tests { reorg_depth: 0, mined_ops: vec![MinedOp { entry_point: pool.config.entry_point, - hash: uos[0].op_hash(pool.config.entry_point, 1), - sender: uos[0].sender, - nonce: uos[0].nonce, + hash: uos[0].hash(pool.config.entry_point, 1), + sender: uos[0].sender(), + nonce: uos[0].nonce(), actual_gas_cost: 10.into(), paymaster: Some(paymaster), }], unmined_ops: vec![], - entity_deposits: vec![], - unmined_entity_deposits: vec![], + entity_balance_updates: vec![BalanceUpdate { + address: paymaster, + amount: 100.into(), + entrypoint: pool.config.entry_point, + is_addition: true, + }], + unmined_entity_balance_updates: vec![BalanceUpdate { + address: paymaster, + amount: 10.into(), + entrypoint: pool.config.entry_point, + is_addition: false, + }], reorg_larger_than_history: false, }) .await; @@ -827,13 +962,8 @@ mod tests { uos.clone()[1..].to_vec(), ); - let metadata = pool - .state - .read() - .pool - .paymaster_metadata(paymaster) - .unwrap(); - assert_eq!(metadata.pending_balance, 890.into()); + let metadata = pool.paymaster.paymaster_balance(paymaster).await.unwrap(); + assert_eq!(metadata.pending_balance, 980.into()); pool.on_chain_update(&ChainUpdate { latest_block_number: 1, @@ -844,25 +974,25 @@ mod tests { mined_ops: vec![], unmined_ops: vec![MinedOp { entry_point: pool.config.entry_point, - hash: uos[0].op_hash(pool.config.entry_point, 1), - sender: uos[0].sender, - nonce: uos[0].nonce, + hash: uos[0].hash(pool.config.entry_point, 1), + sender: uos[0].sender(), + nonce: uos[0].nonce(), actual_gas_cost: 10.into(), paymaster: None, }], - entity_deposits: vec![], - unmined_entity_deposits: vec![], + entity_balance_updates: vec![], + unmined_entity_balance_updates: vec![BalanceUpdate { + address: paymaster, + amount: 100.into(), + entrypoint: pool.config.entry_point, + is_addition: true, + }], reorg_larger_than_history: false, }) .await; - let metadata = pool - .state - .read() - .pool - .paymaster_metadata(paymaster) - .unwrap(); - assert_eq!(metadata.pending_balance, 850.into()); + let metadata = pool.paymaster.paymaster_balance(paymaster).await.unwrap(); + assert_eq!(metadata.pending_balance, 840.into()); check_ops(pool.best_operations(3, 0).unwrap(), uos); } @@ -885,15 +1015,15 @@ mod tests { reorg_depth: 0, mined_ops: vec![MinedOp { entry_point: Address::random(), - hash: uos[0].op_hash(pool.config.entry_point, 1), - sender: uos[0].sender, - nonce: uos[0].nonce, + hash: uos[0].hash(pool.config.entry_point, 1), + sender: uos[0].sender(), + nonce: uos[0].nonce(), actual_gas_cost: U256::zero(), paymaster: None, }], unmined_ops: vec![], - entity_deposits: vec![], - unmined_entity_deposits: vec![], + entity_balance_updates: vec![], + unmined_entity_balance_updates: vec![], reorg_larger_than_history: false, }) .await; @@ -905,13 +1035,13 @@ mod tests { async fn test_account_reputation() { let address = Address::random(); let (pool, uos) = create_pool_insert_ops(vec![ - create_op_with_errors(address, 0, 2, None, None, true), - create_op_with_errors(address, 1, 2, None, None, true), - create_op_with_errors(address, 1, 2, None, None, true), + create_op_with_errors(address, 0, 2, None, None, true), // accept + create_op_with_errors(address, 1, 2, None, None, true), // accept + create_op_with_errors(address, 1, 2, None, None, true), // reject ]) .await; - // Only return 1 op per sender - check_ops(pool.best_operations(3, 0).unwrap(), vec![uos[0].clone()]); + // staked, so include all ops + check_ops(pool.best_operations(3, 0).unwrap(), uos[0..2].to_vec()); let rep = pool.dump_reputation(); assert_eq!(rep.len(), 1); @@ -927,15 +1057,15 @@ mod tests { reorg_depth: 0, mined_ops: vec![MinedOp { entry_point: pool.config.entry_point, - hash: uos[0].op_hash(pool.config.entry_point, 1), - sender: uos[0].sender, - nonce: uos[0].nonce, + hash: uos[0].hash(pool.config.entry_point, 1), + sender: uos[0].sender(), + nonce: uos[0].nonce(), actual_gas_cost: U256::zero(), paymaster: None, }], unmined_ops: vec![], - entity_deposits: vec![], - unmined_entity_deposits: vec![], + entity_balance_updates: vec![], + unmined_entity_balance_updates: vec![], reorg_larger_than_history: false, }) .await; @@ -956,8 +1086,12 @@ mod tests { } let uos = ops.iter().map(|op| op.op.clone()).collect::>(); let pool = create_pool(ops); + + let ops_seen = 100; + let ops_included = ops_seen / 10 - THROTTLE_SLACK - 1; + // Past throttle slack - pool.set_reputation(address, 1 + THROTTLE_SLACK, 0); + pool.set_reputation(address, ops_seen, ops_included); // Ops 0 through 3 should be included for uo in uos.iter().take(4) { @@ -999,15 +1133,15 @@ mod tests { reorg_depth: 0, mined_ops: vec![MinedOp { entry_point: pool.config.entry_point, - hash: uos[0].op_hash(pool.config.entry_point, 1), - sender: uos[0].sender, - nonce: uos[0].nonce, + hash: uos[0].hash(pool.config.entry_point, 1), + sender: uos[0].sender(), + nonce: uos[0].nonce(), actual_gas_cost: U256::zero(), paymaster: None, }], - entity_deposits: vec![], + entity_balance_updates: vec![], + unmined_entity_balance_updates: vec![], unmined_ops: vec![], - unmined_entity_deposits: vec![], reorg_larger_than_history: false, }) .await; @@ -1035,7 +1169,10 @@ mod tests { let uo = op.op.clone(); let pool = create_pool(vec![op]); // Past ban slack - pool.set_reputation(address, 1 + BAN_SLACK, 0); + + let ops_seen = 1000; + let ops_included = ops_seen / 10 - BAN_SLACK - 1; + pool.set_reputation(address, ops_seen, ops_included); // First op should be banned let ret = pool.add_operation(OperationOrigin::Local, uo.clone()).await; @@ -1053,13 +1190,19 @@ mod tests { async fn test_paymaster_balance_insufficient() { let paymaster = Address::random(); let mut op = create_op(Address::random(), 0, 0, Some(paymaster)); - op.op.call_gas_limit = 1000.into(); - op.op.verification_gas_limit = 1000.into(); - op.op.pre_verification_gas = 1000.into(); - op.op.max_fee_per_gas = 1.into(); + let uo: &mut UserOperation = op.op.as_mut(); + uo.call_gas_limit = 1000.into(); + uo.verification_gas_limit = 1000.into(); + uo.pre_verification_gas = 1000.into(); + uo.max_fee_per_gas = 1.into(); + + let mut entrypoint = MockEntryPointV0_6::new(); + entrypoint + .expect_balance_of() + .returning(|_, _| Ok(U256::from(1000))); let uo = op.op.clone(); - let pool = create_pool(vec![op]); + let pool = create_pool_with_entry_point(vec![op], entrypoint); let ret = pool .add_operation(OperationOrigin::Local, uo.clone()) @@ -1071,11 +1214,12 @@ mod tests { #[tokio::test] async fn precheck_error() { + let sender = Address::random(); let op = create_op_with_errors( - Address::random(), + sender, 0, 0, - Some(PrecheckViolation::InitCodeTooShort(0)), + Some(PrecheckViolation::SenderIsNotContractAndNoInitCode(sender)), None, false, ); @@ -1083,7 +1227,9 @@ mod tests { let pool = create_pool(ops); match pool.add_operation(OperationOrigin::Local, op.op).await { - Err(MempoolError::PrecheckViolation(PrecheckViolation::InitCodeTooShort(_))) => {} + Err(MempoolError::PrecheckViolation( + PrecheckViolation::SenderIsNotContractAndNoInitCode(_), + )) => {} _ => panic!("Expected InitCodeTooShort error"), } assert_eq!(pool.best_operations(1, 0).unwrap(), vec![]); @@ -1139,7 +1285,8 @@ mod tests { .unwrap(); let mut replacement = op.op.clone(); - replacement.max_fee_per_gas = replacement.max_fee_per_gas + 1; + let r: &mut UserOperation = replacement.as_mut(); + r.max_fee_per_gas = r.max_fee_per_gas + 1; let err = pool .add_operation(OperationOrigin::Local, replacement) @@ -1151,21 +1298,19 @@ mod tests { check_ops(pool.best_operations(1, 0).unwrap(), vec![op.op]); } - #[tokio::test] - async fn test_stake_status_staked() { - let mut pool = create_pool(vec![]); - - pool.config.sim_settings.min_stake_value = 9999; - pool.config.sim_settings.min_unstake_delay = 99; - - let status = pool.get_stake_status(Address::random()).await.unwrap(); - - assert!(status.is_staked); - } - #[tokio::test] async fn test_stake_status_not_staked() { - let mut pool = create_pool(vec![]); + let mut entrypoint = MockEntryPointV0_6::new(); + entrypoint.expect_get_deposit_info().returning(|_| { + Ok(DepositInfo { + deposit: 1000.into(), + staked: true, + stake: 10000, + unstake_delay_sec: 100, + withdraw_time: 10, + }) + }); + let mut pool = create_pool_with_entry_point(vec![], entrypoint); pool.config.sim_settings.min_stake_value = 10001; pool.config.sim_settings.min_unstake_delay = 101; @@ -1180,12 +1325,17 @@ mod tests { let paymaster = Address::random(); let mut op = create_op(Address::random(), 0, 5, Some(paymaster)); - op.op.call_gas_limit = 10.into(); - op.op.verification_gas_limit = 10.into(); - op.op.pre_verification_gas = 10.into(); - op.op.max_fee_per_gas = 1.into(); + let uo: &mut UserOperation = op.op.as_mut(); + uo.call_gas_limit = 10.into(); + uo.verification_gas_limit = 10.into(); + uo.pre_verification_gas = 10.into(); + uo.max_fee_per_gas = 1.into(); - let pool = create_pool(vec![op.clone()]); + let mut entrypoint = MockEntryPointV0_6::new(); + entrypoint + .expect_balance_of() + .returning(|_, _| Ok(U256::from(1000))); + let pool = create_pool_with_entry_point(vec![op.clone()], entrypoint); let _ = pool .add_operation(OperationOrigin::Local, op.op.clone()) @@ -1193,7 +1343,8 @@ mod tests { .unwrap(); let mut replacement = op.op.clone(); - replacement.max_fee_per_gas = replacement.max_fee_per_gas + 1; + let r: &mut UserOperation = replacement.as_mut(); + r.max_fee_per_gas = r.max_fee_per_gas + 1; let _ = pool .add_operation(OperationOrigin::Local, replacement.clone()) @@ -1202,16 +1353,11 @@ mod tests { check_ops(pool.best_operations(1, 0).unwrap(), vec![replacement]); - let paymaster_balance = pool - .state - .read() - .pool - .paymaster_metadata(paymaster) - .unwrap(); + let paymaster_balance = pool.paymaster.paymaster_balance(paymaster).await.unwrap(); assert_eq!(paymaster_balance.pending_balance, U256::from(900)); let rep = pool.dump_reputation(); assert_eq!(rep.len(), 1); - assert_eq!(rep[0].address, op.op.sender); + assert_eq!(rep[0].address, op.op.sender()); assert_eq!(rep[0].ops_seen, 1); assert_eq!(rep[0].ops_included, 0); } @@ -1255,6 +1401,64 @@ mod tests { assert_eq!(pool_op.uo, op.op); } + #[tokio::test] + async fn test_remove_by_id_too_soon() { + let op = create_op(Address::random(), 0, 0, None); + let pool = create_pool(vec![op.clone()]); + + let _ = pool + .add_operation(OperationOrigin::Local, op.op.clone()) + .await + .unwrap(); + + assert!(matches!( + pool.remove_op_by_id(&op.op.id()), + Err(MempoolError::OperationDropTooSoon(_, _, _)) + )); + check_ops(pool.best_operations(1, 0).unwrap(), vec![op.op]); + } + + #[tokio::test] + async fn test_remove_by_id_not_found() { + let op = create_op(Address::random(), 0, 0, None); + let pool = create_pool(vec![op.clone()]); + + let _ = pool + .add_operation(OperationOrigin::Local, op.op.clone()) + .await + .unwrap(); + + assert!(matches!( + pool.remove_op_by_id(&UserOperationId { + sender: Address::random(), + nonce: 0.into() + }), + Ok(None) + )); + check_ops(pool.best_operations(1, 0).unwrap(), vec![op.op]); + } + + #[tokio::test] + async fn test_remove_by_id() { + let op = create_op(Address::random(), 0, 0, None); + let pool = create_pool(vec![op.clone()]); + + let _ = pool + .add_operation(OperationOrigin::Local, op.op.clone()) + .await + .unwrap(); + let hash = op.op.hash(pool.config.entry_point, 1); + + pool.on_chain_update(&ChainUpdate { + latest_block_number: 11, + ..Default::default() + }) + .await; + + assert_eq!(pool.remove_op_by_id(&op.op.id()).unwrap().unwrap(), hash); + check_ops(pool.best_operations(1, 0).unwrap(), vec![]); + } + #[tokio::test] async fn test_get_user_op_by_hash_not_found() { let op = create_op(Address::random(), 0, 0, None); @@ -1289,9 +1493,22 @@ mod tests { .is_err()); } + #[tokio::test] + async fn test_best_staked() { + let address = Address::random(); + let (pool, uos) = create_pool_insert_ops(vec![ + create_op_with_errors(address, 0, 2, None, None, true), + create_op_with_errors(address, 1, 2, None, None, true), + create_op_with_errors(address, 2, 2, None, None, true), + ]) + .await; + // staked, so include all ops + check_ops(pool.best_operations(3, 0).unwrap(), uos); + } + #[derive(Clone, Debug)] struct OpWithErrors { - op: UserOperation, + op: UserOperationVariant, valid_time_range: ValidTimeRange, precheck_error: Option, simulation_error: Option, @@ -1301,17 +1518,64 @@ mod tests { fn create_pool( ops: Vec, ) -> UoPool< - impl ReputationManager, - impl Prechecker, - impl Simulator, + UserOperation, + impl Prechecker, + impl Simulator, impl EntryPoint, - impl PaymasterHelper, > { - let reputation = Arc::new(MockReputationManager::new(THROTTLE_SLACK, BAN_SLACK)); + let entrypoint = MockEntryPointV0_6::new(); + create_pool_with_entry_point(ops, entrypoint) + } + + fn create_pool_with_entry_point( + ops: Vec, + entrypoint: MockEntryPointV0_6, + ) -> UoPool< + UserOperation, + impl Prechecker, + impl Simulator, + impl EntryPoint, + > { + let args = PoolConfig { + entry_point: Address::random(), + entry_point_version: EntryPointVersion::V0_6, + chain_id: 1, + min_replacement_fee_increase_percentage: 10, + max_size_of_pool_bytes: 10000, + blocklist: None, + allowlist: None, + precheck_settings: PrecheckSettings::default(), + sim_settings: SimulationSettings::default(), + mempool_channel_configs: HashMap::new(), + num_shards: 1, + same_sender_mempool_count: 4, + throttled_entity_mempool_count: 4, + throttled_entity_live_blocks: 10, + paymaster_tracking_enabled: true, + paymaster_cache_length: 100, + reputation_tracking_enabled: true, + drop_min_num_blocks: 10, + }; + let mut simulator = MockSimulator::new(); let mut prechecker = MockPrechecker::new(); - let mut entrypoint = MockEntryPoint::new(); - let mut paymaster_helper = MockPaymasterHelper::new(); + + let paymaster = PaymasterTracker::new( + entrypoint, + PaymasterConfig::new( + args.sim_settings.min_stake_value, + args.sim_settings.min_unstake_delay, + args.paymaster_tracking_enabled, + args.paymaster_cache_length, + ), + ); + + let reputation = Arc::new(AddressReputation::new( + ReputationParams::test_parameters(BAN_SLACK, THROTTLE_SLACK), + args.blocklist.clone().unwrap_or_default(), + args.allowlist.clone().unwrap_or_default(), + )); + prechecker.expect_update_fees().returning(|| { Ok(( GasFees { @@ -1322,20 +1586,6 @@ mod tests { )) }); - paymaster_helper.expect_get_deposit_info().returning(|_| { - Ok(DepositInfo { - deposit: 1000, - staked: true, - stake: 10000, - unstake_delay_sec: 100, - withdraw_time: 10, - }) - }); - - entrypoint - .expect_balance_of() - .returning(|_, _| Ok(U256::from(1000))); - for op in ops { prechecker.expect_check().returning(move |_| { if let Some(error) = &op.precheck_error { @@ -1359,7 +1609,7 @@ mod tests { valid_time_range: op.valid_time_range, entity_infos: EntityInfos { sender: EntityInfo { - address: op.op.sender, + entity: Entity::account(op.op.sender()), is_staked: false, }, ..EntityInfos::default() @@ -1370,45 +1620,48 @@ mod tests { }); } - let args = PoolConfig { - entry_point: Address::random(), - chain_id: 1, - min_replacement_fee_increase_percentage: 10, - max_size_of_pool_bytes: 10000, - blocklist: None, - allowlist: None, - precheck_settings: PrecheckSettings::default(), - sim_settings: SimulationSettings::default(), - mempool_channel_configs: HashMap::new(), - num_shards: 1, - same_sender_mempool_count: 4, - throttled_entity_mempool_count: 4, - throttled_entity_live_blocks: 10, - }; let (event_sender, _) = broadcast::channel(4); UoPool::new( args, - reputation, event_sender, prechecker, simulator, - entrypoint, - paymaster_helper, + paymaster, + reputation, ) } + async fn create_pool_with_entrypoint_insert_ops( + ops: Vec, + entrypoint: MockEntryPointV0_6, + ) -> ( + UoPool< + UserOperation, + impl Prechecker, + impl Simulator, + impl EntryPoint, + >, + Vec, + ) { + let uos = ops.iter().map(|op| op.op.clone()).collect::>(); + let pool = create_pool_with_entry_point(ops, entrypoint); + for op in &uos { + let _ = pool.add_operation(OperationOrigin::Local, op.clone()).await; + } + (pool, uos) + } + async fn create_pool_insert_ops( ops: Vec, ) -> ( UoPool< - impl ReputationManager, - impl Prechecker, - impl Simulator, + UserOperation, + impl Prechecker, + impl Simulator, impl EntryPoint, - impl PaymasterHelper, >, - Vec, + Vec, ) { let uos = ops.iter().map(|op| op.op.clone()).collect::>(); let pool = create_pool(ops); @@ -1437,7 +1690,8 @@ mod tests { max_fee_per_gas: max_fee_per_gas.into(), paymaster_and_data, ..UserOperation::default() - }, + } + .into(), valid_time_range: ValidTimeRange::default(), precheck_error: None, simulation_error: None, @@ -1459,7 +1713,8 @@ mod tests { nonce: nonce.into(), max_fee_per_gas: max_fee_per_gas.into(), ..UserOperation::default() - }, + } + .into(), valid_time_range: ValidTimeRange::default(), precheck_error, simulation_error, @@ -1467,117 +1722,10 @@ mod tests { } } - fn check_ops(ops: Vec>, expected: Vec) { + fn check_ops(ops: Vec>, expected: Vec) { assert_eq!(ops.len(), expected.len()); for (actual, expected) in ops.into_iter().zip(expected) { assert_eq!(actual.uo, expected); } } - - #[derive(Default, Clone)] - struct MockReputationManager { - bundle_invalidation_ops_seen_staked_penalty: u64, - bundle_invalidation_ops_seen_unstaked_penalty: u64, - same_unstaked_entity_mempool_count: u64, - inclusion_rate_factor: u64, - throttling_slack: u64, - ban_slack: u64, - counts: Arc>, - } - - #[derive(Default)] - struct Counts { - seen: HashMap, - included: HashMap, - } - - impl MockReputationManager { - fn new(throttling_slack: u64, ban_slack: u64) -> Self { - Self { - throttling_slack, - ban_slack, - ..Self::default() - } - } - } - - impl ReputationManager for MockReputationManager { - fn status(&self, address: Address) -> ReputationStatus { - let counts = self.counts.read(); - - let seen = *counts.seen.get(&address).unwrap_or(&0); - let included = *counts.included.get(&address).unwrap_or(&0); - let diff = seen.saturating_sub(included); - if diff > self.ban_slack { - ReputationStatus::Banned - } else if diff > self.throttling_slack { - ReputationStatus::Throttled - } else { - ReputationStatus::Ok - } - } - - fn add_seen(&self, address: Address) { - *self.counts.write().seen.entry(address).or_default() += 1; - } - - fn handle_srep_050_penalty(&self, address: Address) { - *self.counts.write().seen.entry(address).or_default() = - self.bundle_invalidation_ops_seen_staked_penalty; - } - - fn handle_urep_030_penalty(&self, address: Address) { - *self.counts.write().seen.entry(address).or_default() += - self.bundle_invalidation_ops_seen_unstaked_penalty; - } - - fn add_included(&self, address: Address) { - *self.counts.write().included.entry(address).or_default() += 1; - } - - fn remove_included(&self, address: Address) { - let mut counts = self.counts.write(); - let included = counts.included.entry(address).or_default(); - *included = included.saturating_sub(1); - } - - fn dump_reputation(&self) -> Vec { - self.counts - .read() - .seen - .iter() - .map(|(address, ops_seen)| Reputation { - address: *address, - ops_seen: *ops_seen, - ops_included: *self.counts.read().included.get(address).unwrap_or(&0), - }) - .collect() - } - - fn set_reputation(&self, address: Address, ops_seen: u64, ops_included: u64) { - let mut counts = self.counts.write(); - counts.seen.insert(address, ops_seen); - counts.included.insert(address, ops_included); - } - - fn get_ops_allowed(&self, address: Address) -> u64 { - let counts = self.counts.read(); - let seen = *counts.seen.get(&address).unwrap_or(&0); - let included = *counts.included.get(&address).unwrap_or(&0); - let inclusion_based_count = if seen == 0 { - // make sure we aren't dividing by 0 - 0 - } else { - included * self.inclusion_rate_factor / seen + std::cmp::min(included, 10_000) - }; - - // return ops allowed, as defined by UREP-020 - self.same_unstaked_entity_mempool_count + inclusion_based_count - } - - fn clear(&self) { - self.counts.write().seen.clear(); - self.counts.write().included.clear(); - } - } } diff --git a/crates/pool/src/server/local.rs b/crates/pool/src/server/local.rs index b3c0f571..0733f9a7 100644 --- a/crates/pool/src/server/local.rs +++ b/crates/pool/src/server/local.rs @@ -11,14 +11,21 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -use std::{collections::HashMap, pin::Pin, sync::Arc}; +use std::{collections::HashMap, future::Future, pin::Pin, sync::Arc}; use async_stream::stream; use async_trait::async_trait; use ethers::types::{Address, H256}; +use futures::future; use futures_util::Stream; use rundler_task::server::{HealthCheck, ServerStatus}; -use rundler_types::{EntityUpdate, UserOperation}; +use rundler_types::{ + pool::{ + MempoolError, NewHead, PaymasterMetadata, Pool, PoolError, PoolOperation, PoolResult, + Reputation, ReputationStatus, StakeStatus, + }, + EntityUpdate, EntryPointVersion, UserOperationId, UserOperationVariant, +}; use tokio::{ sync::{broadcast, mpsc, oneshot}, task::JoinHandle, @@ -26,12 +33,9 @@ use tokio::{ use tokio_util::sync::CancellationToken; use tracing::error; -use super::{PoolResult, PoolServerError}; use crate::{ chain::ChainUpdate, - mempool::{Mempool, MempoolError, OperationOrigin, PoolOperation, StakeStatus}, - server::{NewHead, PoolServer, Reputation}, - ReputationStatus, + mempool::{Mempool, OperationOrigin}, }; /// Local pool server builder @@ -62,9 +66,9 @@ impl LocalPoolBuilder { } /// Run the local pool server, consumes the builder - pub fn run( + pub fn run( self, - mempools: HashMap>, + mempools: HashMap>, chain_updates: broadcast::Receiver>, shutdown_token: CancellationToken, ) -> JoinHandle> { @@ -86,10 +90,10 @@ pub struct LocalPoolHandle { req_sender: mpsc::Sender, } -struct LocalPoolServerRunner { +struct LocalPoolServerRunner { req_receiver: mpsc::Receiver, block_sender: broadcast::Sender, - mempools: HashMap>, + mempools: HashMap>, chain_updates: broadcast::Receiver>, } @@ -109,17 +113,17 @@ impl LocalPoolHandle { } #[async_trait] -impl PoolServer for LocalPoolHandle { +impl Pool for LocalPoolHandle { async fn get_supported_entry_points(&self) -> PoolResult> { let req = ServerRequestKind::GetSupportedEntryPoints; let resp = self.send(req).await?; match resp { ServerResponse::GetSupportedEntryPoints { entry_points } => Ok(entry_points), - _ => Err(PoolServerError::UnexpectedResponse), + _ => Err(PoolError::UnexpectedResponse), } } - async fn add_op(&self, entry_point: Address, op: UserOperation) -> PoolResult { + async fn add_op(&self, entry_point: Address, op: UserOperationVariant) -> PoolResult { let req = ServerRequestKind::AddOp { entry_point, op, @@ -128,7 +132,7 @@ impl PoolServer for LocalPoolHandle { let resp = self.send(req).await?; match resp { ServerResponse::AddOp { hash } => Ok(hash), - _ => Err(PoolServerError::UnexpectedResponse), + _ => Err(PoolError::UnexpectedResponse), } } @@ -146,7 +150,7 @@ impl PoolServer for LocalPoolHandle { let resp = self.send(req).await?; match resp { ServerResponse::GetOps { ops } => Ok(ops), - _ => Err(PoolServerError::UnexpectedResponse), + _ => Err(PoolError::UnexpectedResponse), } } @@ -155,7 +159,7 @@ impl PoolServer for LocalPoolHandle { let resp = self.send(req).await?; match resp { ServerResponse::GetOpByHash { op } => Ok(op), - _ => Err(PoolServerError::UnexpectedResponse), + _ => Err(PoolError::UnexpectedResponse), } } @@ -164,7 +168,20 @@ impl PoolServer for LocalPoolHandle { let resp = self.send(req).await?; match resp { ServerResponse::RemoveOps => Ok(()), - _ => Err(PoolServerError::UnexpectedResponse), + _ => Err(PoolError::UnexpectedResponse), + } + } + + async fn remove_op_by_id( + &self, + entry_point: Address, + id: UserOperationId, + ) -> PoolResult> { + let req = ServerRequestKind::RemoveOpById { entry_point, id }; + let resp = self.send(req).await?; + match resp { + ServerResponse::RemoveOpById { hash } => Ok(hash), + _ => Err(PoolError::UnexpectedResponse), } } @@ -180,23 +197,43 @@ impl PoolServer for LocalPoolHandle { let resp = self.send(req).await?; match resp { ServerResponse::UpdateEntities => Ok(()), - _ => Err(PoolServerError::UnexpectedResponse), + _ => Err(PoolError::UnexpectedResponse), } } async fn debug_clear_state( &self, clear_mempool: bool, + clear_paymaster: bool, clear_reputation: bool, - ) -> Result<(), PoolServerError> { + ) -> Result<(), PoolError> { let req = ServerRequestKind::DebugClearState { clear_mempool, clear_reputation, + clear_paymaster, }; let resp = self.send(req).await?; match resp { ServerResponse::DebugClearState => Ok(()), - _ => Err(PoolServerError::UnexpectedResponse), + _ => Err(PoolError::UnexpectedResponse), + } + } + + async fn admin_set_tracking( + &self, + entry_point: Address, + paymaster: bool, + reputation: bool, + ) -> Result<(), PoolError> { + let req = ServerRequestKind::AdminSetTracking { + entry_point, + paymaster, + reputation, + }; + let resp = self.send(req).await?; + match resp { + ServerResponse::AdminSetTracking => Ok(()), + _ => Err(PoolError::UnexpectedResponse), } } @@ -205,7 +242,7 @@ impl PoolServer for LocalPoolHandle { let resp = self.send(req).await?; match resp { ServerResponse::DebugDumpMempool { ops } => Ok(ops), - _ => Err(PoolServerError::UnexpectedResponse), + _ => Err(PoolError::UnexpectedResponse), } } @@ -221,7 +258,7 @@ impl PoolServer for LocalPoolHandle { let resp = self.send(req).await?; match resp { ServerResponse::DebugSetReputations => Ok(()), - _ => Err(PoolServerError::UnexpectedResponse), + _ => Err(PoolError::UnexpectedResponse), } } @@ -230,7 +267,19 @@ impl PoolServer for LocalPoolHandle { let resp = self.send(req).await?; match resp { ServerResponse::DebugDumpReputation { reputations } => Ok(reputations), - _ => Err(PoolServerError::UnexpectedResponse), + _ => Err(PoolError::UnexpectedResponse), + } + } + + async fn debug_dump_paymaster_balances( + &self, + entry_point: Address, + ) -> PoolResult> { + let req = ServerRequestKind::DebugDumpPaymasterBalances { entry_point }; + let resp = self.send(req).await?; + match resp { + ServerResponse::DebugDumpPaymasterBalances { balances } => Ok(balances), + _ => Err(PoolError::UnexpectedResponse), } } @@ -246,7 +295,7 @@ impl PoolServer for LocalPoolHandle { let resp = self.send(req).await?; match resp { ServerResponse::GetStakeStatus { status } => Ok(status), - _ => Err(PoolServerError::UnexpectedResponse), + _ => Err(PoolError::UnexpectedResponse), } } @@ -262,7 +311,7 @@ impl PoolServer for LocalPoolHandle { let resp = self.send(req).await?; match resp { ServerResponse::GetReputationStatus { status } => Ok(status), - _ => Err(PoolServerError::UnexpectedResponse), + _ => Err(PoolError::UnexpectedResponse), } } @@ -284,7 +333,7 @@ impl PoolServer for LocalPoolHandle { } } })), - _ => Err(PoolServerError::UnexpectedResponse), + _ => Err(PoolError::UnexpectedResponse), } } } @@ -304,14 +353,11 @@ impl HealthCheck for LocalPoolHandle { } } -impl LocalPoolServerRunner -where - M: Mempool, -{ +impl LocalPoolServerRunner { fn new( req_receiver: mpsc::Receiver, block_sender: broadcast::Sender, - mempools: HashMap>, + mempools: HashMap>, chain_updates: broadcast::Receiver>, ) -> Self { Self { @@ -322,10 +368,10 @@ where } } - fn get_pool(&self, entry_point: Address) -> PoolResult<&Arc> { - self.mempools.get(&entry_point).ok_or_else(|| { - PoolServerError::MempoolError(MempoolError::UnknownEntryPoint(entry_point)) - }) + fn get_pool(&self, entry_point: Address) -> PoolResult<&Arc> { + self.mempools + .get(&entry_point) + .ok_or_else(|| PoolError::MempoolError(MempoolError::UnknownEntryPoint(entry_point))) } fn get_ops( @@ -357,6 +403,15 @@ where Ok(()) } + fn remove_op_by_id( + &self, + entry_point: Address, + id: &UserOperationId, + ) -> PoolResult> { + let mempool = self.get_pool(entry_point)?; + mempool.remove_op_by_id(id).map_err(|e| e.into()) + } + fn update_entities<'a>( &self, entry_point: Address, @@ -369,13 +424,29 @@ where Ok(()) } - fn debug_clear_state(&self, clear_mempool: bool, clear_reputation: bool) -> PoolResult<()> { + fn debug_clear_state( + &self, + clear_mempool: bool, + clear_paymaster: bool, + clear_reputation: bool, + ) -> PoolResult<()> { for mempool in self.mempools.values() { - mempool.clear_state(clear_mempool, clear_reputation); + mempool.clear_state(clear_mempool, clear_paymaster, clear_reputation); } Ok(()) } + fn admin_set_tracking( + &self, + entry_point: Address, + paymaster: bool, + reputation: bool, + ) -> PoolResult<()> { + let mempool = self.get_pool(entry_point)?; + mempool.set_tracking(paymaster, reputation); + Ok(()) + } + fn debug_dump_mempool(&self, entry_point: Address) -> PoolResult> { let mempool = self.get_pool(entry_point)?; Ok(mempool @@ -402,6 +473,14 @@ where Ok(mempool.dump_reputation()) } + fn debug_dump_paymaster_balances( + &self, + entry_point: Address, + ) -> PoolResult> { + let mempool = self.get_pool(entry_point)?; + Ok(mempool.dump_paymaster_balances()) + } + fn get_reputation_status( &self, entry_point: Address, @@ -411,6 +490,28 @@ where Ok(mempool.get_reputation_status(address)) } + fn get_pool_and_spawn( + &self, + entry_point: Address, + response: oneshot::Sender>, + f: F, + ) where + F: FnOnce(Arc, oneshot::Sender>) -> Fut, + Fut: Future + Send + 'static, + { + match self.get_pool(entry_point) { + Ok(mempool) => { + let mempool = Arc::clone(mempool); + tokio::spawn(f(mempool, response)); + } + Err(e) => { + if let Err(e) = response.send(Err(e)) { + tracing::error!("Failed to send response: {:?}", e); + } + } + } + } + async fn run(&mut self, shutdown_token: CancellationToken) -> anyhow::Result<()> { loop { tokio::select! { @@ -425,41 +526,79 @@ where // For example, a bundle builder listening for a new block to kick off // its bundle building process will want to be able to query the mempool // and only receive operations that have not yet been mined. - for mempool in self.mempools.values() { - mempool.on_chain_update(&chain_update).await; - } - - let _ = self.block_sender.send(NewHead { - block_hash: chain_update.latest_block_hash, - block_number: chain_update.latest_block_number, + let block_sender = self.block_sender.clone(); + let update_futures : Vec<_> = self.mempools.values().map(|m| { + let m = Arc::clone(m); + let cu = Arc::clone(&chain_update); + async move { m.on_chain_update(&cu).await } + }).collect(); + tokio::spawn(async move { + future::join_all(update_futures).await; + let _ = block_sender.send(NewHead { + block_hash: chain_update.latest_block_hash, + block_number: chain_update.latest_block_number, + }); }); } } Some(req) = self.req_receiver.recv() => { let resp = match req.request { + // Async methods + // Responses are sent in the spawned task + ServerRequestKind::AddOp { entry_point, op, origin } => { + let fut = |mempool: Arc, response: oneshot::Sender>| async move { + let resp = 'resp: { + match mempool.entry_point_version() { + EntryPointVersion::V0_6 => { + if !matches!(&op, UserOperationVariant::V0_6(_)){ + break 'resp Err(anyhow::anyhow!("Invalid user operation version for mempool v0.6 {:?}", op.uo_type()).into()); + } + } + EntryPointVersion::V0_7 => { + if !matches!(&op, UserOperationVariant::V0_7(_)){ + break 'resp Err(anyhow::anyhow!("Invalid user operation version for mempool v0.7 {:?}", op.uo_type()).into()); + } + } + EntryPointVersion::Unspecified => { + panic!("Found mempool with unspecified entry point version") + } + } + + match mempool.add_operation(origin, op).await { + Ok(hash) => Ok(ServerResponse::AddOp { hash }), + Err(e) => Err(e.into()), + } + }; + + if let Err(e) = response.send(resp) { + tracing::error!("Failed to send response: {:?}", e); + } + }; + + self.get_pool_and_spawn(entry_point, req.response, fut); + continue; + }, + ServerRequestKind::GetStakeStatus { entry_point, address }=> { + let fut = |mempool: Arc, response: oneshot::Sender>| async move { + let resp = match mempool.get_stake_status(address).await { + Ok(status) => Ok(ServerResponse::GetStakeStatus { status }), + Err(e) => Err(e.into()), + }; + if let Err(e) = response.send(resp) { + tracing::error!("Failed to send response: {:?}", e); + } + }; + self.get_pool_and_spawn(entry_point, req.response, fut); + continue; + }, + + // Sync methods + // Responses are sent in the main loop below ServerRequestKind::GetSupportedEntryPoints => { Ok(ServerResponse::GetSupportedEntryPoints { entry_points: self.mempools.keys().copied().collect() }) }, - ServerRequestKind::AddOp { entry_point, op, origin } => { - match self.get_pool(entry_point) { - Ok(mempool) => { - let mempool = Arc::clone(mempool); - tokio::spawn(async move { - let resp = match mempool.add_operation(origin, op).await { - Ok(hash) => Ok(ServerResponse::AddOp { hash }), - Err(e) => Err(e.into()), - }; - if let Err(e) = req.response.send(resp) { - tracing::error!("Failed to send response: {:?}", e); - } - }); - continue; - }, - Err(e) => Err(e), - } - }, ServerRequestKind::GetOps { entry_point, max_ops, shard_index } => { match self.get_ops(entry_point, max_ops, shard_index) { Ok(ops) => Ok(ServerResponse::GetOps { ops }), @@ -478,14 +617,26 @@ where Err(e) => Err(e), } }, + ServerRequestKind::RemoveOpById { entry_point, id } => { + match self.remove_op_by_id(entry_point, &id) { + Ok(hash) => Ok(ServerResponse::RemoveOpById{ hash }), + Err(e) => Err(e), + } + }, + ServerRequestKind::AdminSetTracking{ entry_point, paymaster, reputation } => { + match self.admin_set_tracking(entry_point, paymaster, reputation) { + Ok(_) => Ok(ServerResponse::AdminSetTracking), + Err(e) => Err(e), + } + }, ServerRequestKind::UpdateEntities { entry_point, entity_updates } => { match self.update_entities(entry_point, &entity_updates) { Ok(_) => Ok(ServerResponse::UpdateEntities), Err(e) => Err(e), } }, - ServerRequestKind::DebugClearState { clear_mempool, clear_reputation } => { - match self.debug_clear_state(clear_mempool, clear_reputation) { + ServerRequestKind::DebugClearState { clear_mempool, clear_paymaster, clear_reputation } => { + match self.debug_clear_state(clear_mempool, clear_paymaster, clear_reputation) { Ok(_) => Ok(ServerResponse::DebugClearState), Err(e) => Err(e), } @@ -508,27 +659,15 @@ where Err(e) => Err(e), } }, - ServerRequestKind::GetReputationStatus{ entry_point, address } => { - match self.get_reputation_status(entry_point, address) { - Ok(status) => Ok(ServerResponse::GetReputationStatus { status }), + ServerRequestKind::DebugDumpPaymasterBalances { entry_point } => { + match self.debug_dump_paymaster_balances(entry_point) { + Ok(balances) => Ok(ServerResponse::DebugDumpPaymasterBalances { balances }), Err(e) => Err(e), } }, - ServerRequestKind::GetStakeStatus { entry_point, address }=> { - match self.get_pool(entry_point) { - Ok(mempool) => { - let mempool = Arc::clone(mempool); - tokio::spawn(async move { - let resp = match mempool.get_stake_status(address).await { - Ok(status) => Ok(ServerResponse::GetStakeStatus { status }), - Err(e) => Err(e.into()), - }; - if let Err(e) = req.response.send(resp) { - tracing::error!("Failed to send response: {:?}", e); - } - }); - continue; - }, + ServerRequestKind::GetReputationStatus{ entry_point, address } => { + match self.get_reputation_status(entry_point, address) { + Ok(status) => Ok(ServerResponse::GetReputationStatus { status }), Err(e) => Err(e), } }, @@ -558,7 +697,7 @@ enum ServerRequestKind { GetSupportedEntryPoints, AddOp { entry_point: Address, - op: UserOperation, + op: UserOperationVariant, origin: OperationOrigin, }, GetOps { @@ -573,6 +712,10 @@ enum ServerRequestKind { entry_point: Address, ops: Vec, }, + RemoveOpById { + entry_point: Address, + id: UserOperationId, + }, UpdateEntities { entry_point: Address, entity_updates: Vec, @@ -580,6 +723,12 @@ enum ServerRequestKind { DebugClearState { clear_mempool: bool, clear_reputation: bool, + clear_paymaster: bool, + }, + AdminSetTracking { + entry_point: Address, + paymaster: bool, + reputation: bool, }, DebugDumpMempool { entry_point: Address, @@ -591,6 +740,9 @@ enum ServerRequestKind { DebugDumpReputation { entry_point: Address, }, + DebugDumpPaymasterBalances { + entry_point: Address, + }, GetReputationStatus { entry_point: Address, address: Address, @@ -617,8 +769,12 @@ enum ServerResponse { op: Option, }, RemoveOps, + RemoveOpById { + hash: Option, + }, UpdateEntities, DebugClearState, + AdminSetTracking, DebugDumpMempool { ops: Vec, }, @@ -626,6 +782,9 @@ enum ServerResponse { DebugDumpReputation { reputations: Vec, }, + DebugDumpPaymasterBalances { + balances: Vec, + }, GetReputationStatus { status: ReputationStatus, }, @@ -642,6 +801,7 @@ mod tests { use std::{iter::zip, sync::Arc}; use futures_util::StreamExt; + use rundler_types::v0_6::UserOperation; use super::*; use crate::{chain::ChainUpdate, mempool::MockMempool}; @@ -650,18 +810,18 @@ mod tests { async fn test_add_op() { let mut mock_pool = MockMempool::new(); let hash0 = H256::random(); + mock_pool + .expect_entry_point_version() + .returning(|| EntryPointVersion::V0_6); mock_pool .expect_add_operation() .returning(move |_, _| Ok(hash0)); let ep = Address::random(); - let state = setup(HashMap::from([(ep, Arc::new(mock_pool))])); + let pool: Arc = Arc::new(mock_pool); + let state = setup(HashMap::from([(ep, pool)])); - let hash1 = state - .handle - .add_op(ep, UserOperation::default()) - .await - .unwrap(); + let hash1 = state.handle.add_op(ep, mock_op()).await.unwrap(); assert_eq!(hash0, hash1); } @@ -671,7 +831,8 @@ mod tests { mock_pool.expect_on_chain_update().returning(|_| ()); let ep = Address::random(); - let state = setup(HashMap::from([(ep, Arc::new(mock_pool))])); + let pool: Arc = Arc::new(mock_pool); + let state = setup(HashMap::from([(ep, pool)])); let mut sub = state.handle.subscribe_new_heads().await.unwrap(); @@ -697,7 +858,10 @@ mod tests { let state = setup( eps0.iter() - .map(|ep| (*ep, Arc::new(MockMempool::new()))) + .map(|ep| { + let pool: Arc = Arc::new(MockMempool::new()); + (*ep, pool) + }) .collect(), ); @@ -716,31 +880,36 @@ mod tests { let h1 = H256::random(); let h2 = H256::random(); let hashes = [h0, h1, h2]; + pools[0] + .expect_entry_point_version() + .returning(|| EntryPointVersion::V0_6); pools[0] .expect_add_operation() .returning(move |_, _| Ok(h0)); + pools[1] + .expect_entry_point_version() + .returning(|| EntryPointVersion::V0_6); pools[1] .expect_add_operation() .returning(move |_, _| Ok(h1)); + pools[2] + .expect_entry_point_version() + .returning(|| EntryPointVersion::V0_6); pools[2] .expect_add_operation() .returning(move |_, _| Ok(h2)); let state = setup( zip(eps.iter(), pools.into_iter()) - .map(|(ep, pool)| (*ep, Arc::new(pool))) + .map(|(ep, pool)| { + let pool: Arc = Arc::new(pool); + (*ep, pool) + }) .collect(), ); for (ep, hash) in zip(eps.iter(), hashes.iter()) { - assert_eq!( - *hash, - state - .handle - .add_op(*ep, UserOperation::default()) - .await - .unwrap() - ); + assert_eq!(*hash, state.handle.add_op(*ep, mock_op()).await.unwrap()); } } @@ -750,7 +919,7 @@ mod tests { _run_handle: JoinHandle>, } - fn setup(pools: HashMap>) -> State { + fn setup(pools: HashMap>) -> State { let builder = LocalPoolBuilder::new(10, 10); let handle = builder.get_handle(); let (tx, rx) = broadcast::channel(10); @@ -761,4 +930,8 @@ mod tests { _run_handle: run_handle, } } + + fn mock_op() -> UserOperationVariant { + UserOperationVariant::V0_6(UserOperation::default()) + } } diff --git a/crates/pool/src/server/mod.rs b/crates/pool/src/server/mod.rs index 62215861..2cf32bef 100644 --- a/crates/pool/src/server/mod.rs +++ b/crates/pool/src/server/mod.rs @@ -11,116 +11,9 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -mod error; mod local; -mod remote; - -use std::pin::Pin; - -use async_trait::async_trait; -pub use error::PoolServerError; -use ethers::types::{Address, H256}; -use futures_util::Stream; pub use local::{LocalPoolBuilder, LocalPoolHandle}; -#[cfg(feature = "test-utils")] -use mockall::automock; + +mod remote; pub(crate) use remote::spawn_remote_mempool_server; pub use remote::RemotePoolClient; -use rundler_types::{EntityUpdate, UserOperation}; - -use crate::{ - mempool::{PoolOperation, Reputation, StakeStatus}, - ReputationStatus, -}; - -/// Result type for pool server operations. -pub type PoolResult = std::result::Result; - -#[derive(Clone, Debug)] -pub struct NewHead { - pub block_hash: H256, - pub block_number: u64, -} - -impl Default for NewHead { - fn default() -> NewHead { - NewHead { - block_hash: H256::zero(), - block_number: 0, - } - } -} - -/// Pool server trait -#[cfg_attr(feature = "test-utils", automock)] -#[async_trait] -pub trait PoolServer: Send + Sync + 'static { - /// Get the supported entry points of the pool - async fn get_supported_entry_points(&self) -> PoolResult>; - - /// Add an operation to the pool - async fn add_op(&self, entry_point: Address, op: UserOperation) -> PoolResult; - - /// Get operations from the pool - async fn get_ops( - &self, - entry_point: Address, - max_ops: u64, - shard_index: u64, - ) -> PoolResult>; - - /// Get an operation from the pool by hash - /// Checks each entry point in order until the operation is found - /// Returns None if the operation is not found - async fn get_op_by_hash(&self, hash: H256) -> PoolResult>; - - /// Remove operations from the pool by hash - async fn remove_ops(&self, entry_point: Address, ops: Vec) -> PoolResult<()>; - - /// Update operations associated with entities from the pool - async fn update_entities( - &self, - entry_point: Address, - entities: Vec, - ) -> PoolResult<()>; - - /// Subscribe to new chain heads from the pool. - /// - /// The pool will notify the subscriber when a new chain head is received, and the pool - /// has processed all operations up to that head. - async fn subscribe_new_heads(&self) -> PoolResult + Send>>>; - - /// Clear the pool state, used for debug methods - async fn debug_clear_state( - &self, - clear_mempool: bool, - clear_reputation: bool, - ) -> PoolResult<()>; - - /// Dump all operations in the pool, used for debug methods - async fn debug_dump_mempool(&self, entry_point: Address) -> PoolResult>; - - /// Set reputations for entities, used for debug methods - async fn debug_set_reputations( - &self, - entry_point: Address, - reputations: Vec, - ) -> PoolResult<()>; - - /// Get reputation status given entrypoint and address - async fn get_reputation_status( - &self, - entry_point: Address, - address: Address, - ) -> PoolResult; - - /// Get stake status given entrypoint and address - async fn get_stake_status( - &self, - entry_point: Address, - address: Address, - ) -> PoolResult; - - /// Dump reputations for entities, used for debug methods - async fn debug_dump_reputation(&self, entry_point: Address) -> PoolResult>; -} diff --git a/crates/pool/src/server/remote/client.rs b/crates/pool/src/server/remote/client.rs index 79a275be..faf8e13f 100644 --- a/crates/pool/src/server/remote/client.rs +++ b/crates/pool/src/server/remote/client.rs @@ -13,13 +13,21 @@ use std::{pin::Pin, str::FromStr}; +use anyhow::Context; use ethers::types::{Address, H256}; use futures_util::Stream; use rundler_task::{ - grpc::protos::{from_bytes, ConversionError}, + grpc::protos::{from_bytes, ConversionError, ToProtoBytes}, server::{HealthCheck, ServerStatus}, }; -use rundler_types::{EntityUpdate, UserOperation}; +use rundler_types::{ + chain::ChainSpec, + pool::{ + NewHead, PaymasterMetadata, Pool, PoolError, PoolOperation, PoolResult, Reputation, + ReputationStatus, StakeStatus, + }, + EntityUpdate, UserOperationId, UserOperationVariant, +}; use rundler_utils::retry::{self, UnlimitedRetryOpts}; use tokio::sync::mpsc; use tokio_stream::wrappers::UnboundedReceiverStream; @@ -33,18 +41,16 @@ use tonic_health::{ }; use super::protos::{ - self, add_op_response, debug_clear_state_response, debug_dump_mempool_response, + self, add_op_response, admin_set_tracking_response, debug_clear_state_response, + debug_dump_mempool_response, debug_dump_paymaster_balances_response, debug_dump_reputation_response, debug_set_reputation_response, get_op_by_hash_response, get_ops_response, get_reputation_status_response, get_stake_status_response, - op_pool_client::OpPoolClient, remove_ops_response, update_entities_response, AddOpRequest, - DebugClearStateRequest, DebugDumpMempoolRequest, DebugDumpReputationRequest, + op_pool_client::OpPoolClient, remove_op_by_id_response, remove_ops_response, + update_entities_response, AddOpRequest, AdminSetTrackingRequest, DebugClearStateRequest, + DebugDumpMempoolRequest, DebugDumpPaymasterBalancesRequest, DebugDumpReputationRequest, DebugSetReputationRequest, GetOpsRequest, GetReputationStatusRequest, GetStakeStatusRequest, - RemoveOpsRequest, SubscribeNewHeadsRequest, SubscribeNewHeadsResponse, UpdateEntitiesRequest, -}; -use crate::{ - mempool::{PoolOperation, Reputation, StakeStatus}, - server::{error::PoolServerError, NewHead, PoolResult, PoolServer}, - ReputationStatus, + RemoveOpsRequest, ReputationStatus as ProtoReputationStatus, SubscribeNewHeadsRequest, + SubscribeNewHeadsResponse, TryUoFromProto, UpdateEntitiesRequest, }; /// Remote pool client @@ -52,17 +58,19 @@ use crate::{ /// Used to submit requests to a remote pool server. #[derive(Debug, Clone)] pub struct RemotePoolClient { + chain_spec: ChainSpec, op_pool_client: OpPoolClient, op_pool_health: HealthClient, } impl RemotePoolClient { /// Connect to a remote pool server, returning a client for submitting requests. - pub async fn connect(url: String) -> anyhow::Result { + pub async fn connect(url: String, chain_spec: ChainSpec) -> anyhow::Result { let op_pool_client = OpPoolClient::connect(url.clone()).await?; let op_pool_health = HealthClient::new(Channel::builder(Uri::from_str(&url)?).connect().await?); Ok(Self { + chain_spec, op_pool_client, op_pool_health, }) @@ -121,21 +129,23 @@ impl RemotePoolClient { } #[async_trait] -impl PoolServer for RemotePoolClient { +impl Pool for RemotePoolClient { async fn get_supported_entry_points(&self) -> PoolResult> { Ok(self .op_pool_client .clone() .get_supported_entry_points(protos::GetSupportedEntryPointsRequest {}) - .await? + .await + .map_err(anyhow::Error::from)? .into_inner() .entry_points .into_iter() .map(|ep| from_bytes(ep.as_slice())) - .collect::>()?) + .collect::>() + .map_err(anyhow::Error::from)?) } - async fn add_op(&self, entry_point: Address, op: UserOperation) -> PoolResult { + async fn add_op(&self, entry_point: Address, op: UserOperationVariant) -> PoolResult { let res = self .op_pool_client .clone() @@ -143,14 +153,15 @@ impl PoolServer for RemotePoolClient { entry_point: entry_point.as_bytes().to_vec(), op: Some(protos::UserOperation::from(&op)), }) - .await? + .await + .map_err(anyhow::Error::from)? .into_inner() .result; match res { Some(add_op_response::Result::Success(s)) => Ok(H256::from_slice(&s.hash)), Some(add_op_response::Result::Failure(f)) => Err(f.try_into()?), - None => Err(PoolServerError::Other(anyhow::anyhow!( + None => Err(PoolError::Other(anyhow::anyhow!( "should have received result from op pool" )))?, } @@ -170,7 +181,8 @@ impl PoolServer for RemotePoolClient { max_ops, shard_index, }) - .await? + .await + .map_err(anyhow::Error::from)? .into_inner() .result; @@ -178,11 +190,14 @@ impl PoolServer for RemotePoolClient { Some(get_ops_response::Result::Success(s)) => s .ops .into_iter() - .map(PoolOperation::try_from) - .map(|res| res.map_err(PoolServerError::from)) + .map(|proto_uo| { + PoolOperation::try_uo_from_proto(proto_uo, &self.chain_spec) + .context("should convert proto uo to pool operation") + }) + .map(|res| res.map_err(PoolError::from)) .collect(), Some(get_ops_response::Result::Failure(f)) => Err(f.try_into()?), - None => Err(PoolServerError::Other(anyhow::anyhow!( + None => Err(PoolError::Other(anyhow::anyhow!( "should have received result from op pool" )))?, } @@ -195,21 +210,26 @@ impl PoolServer for RemotePoolClient { .get_op_by_hash(protos::GetOpByHashRequest { hash: hash.as_bytes().to_vec(), }) - .await? + .await + .map_err(anyhow::Error::from)? .into_inner() .result; match res { - Some(get_op_by_hash_response::Result::Success(s)) => { - Ok(s.op.map(PoolOperation::try_from).transpose()?) - } + Some(get_op_by_hash_response::Result::Success(s)) => Ok(s + .op + .map(|proto_uo| { + PoolOperation::try_uo_from_proto(proto_uo, &self.chain_spec) + .context("should convert proto uo to pool operation") + }) + .transpose()?), Some(get_op_by_hash_response::Result::Failure(e)) => match e.error { Some(_) => Err(e.try_into()?), - None => Err(PoolServerError::Other(anyhow::anyhow!( + None => Err(PoolError::Other(anyhow::anyhow!( "should have received error from op pool" )))?, }, - None => Err(PoolServerError::Other(anyhow::anyhow!( + None => Err(PoolError::Other(anyhow::anyhow!( "should have received result from op pool" )))?, } @@ -223,14 +243,48 @@ impl PoolServer for RemotePoolClient { entry_point: entry_point.as_bytes().to_vec(), hashes: ops.into_iter().map(|h| h.as_bytes().to_vec()).collect(), }) - .await? + .await + .map_err(anyhow::Error::from)? .into_inner() .result; match res { Some(remove_ops_response::Result::Success(_)) => Ok(()), Some(remove_ops_response::Result::Failure(f)) => Err(f.try_into()?), - None => Err(PoolServerError::Other(anyhow::anyhow!( + None => Err(PoolError::Other(anyhow::anyhow!( + "should have received result from op pool" + )))?, + } + } + + async fn remove_op_by_id( + &self, + entry_point: Address, + id: UserOperationId, + ) -> PoolResult> { + let res = self + .op_pool_client + .clone() + .remove_op_by_id(protos::RemoveOpByIdRequest { + entry_point: entry_point.to_proto_bytes(), + sender: id.sender.to_proto_bytes(), + nonce: id.nonce.to_proto_bytes(), + }) + .await + .map_err(anyhow::Error::from)? + .into_inner() + .result; + + match res { + Some(remove_op_by_id_response::Result::Success(s)) => { + if s.hash.is_empty() { + Ok(None) + } else { + Ok(Some(H256::from_slice(&s.hash))) + } + } + Some(remove_op_by_id_response::Result::Failure(f)) => Err(f.try_into()?), + None => Err(PoolError::Other(anyhow::anyhow!( "should have received result from op pool" )))?, } @@ -251,14 +305,15 @@ impl PoolServer for RemotePoolClient { .map(protos::EntityUpdate::from) .collect(), }) - .await? + .await + .map_err(anyhow::Error::from)? .into_inner() .result; match res { Some(update_entities_response::Result::Success(_)) => Ok(()), Some(update_entities_response::Result::Failure(f)) => Err(f.try_into()?), - None => Err(PoolServerError::Other(anyhow::anyhow!( + None => Err(PoolError::Other(anyhow::anyhow!( "should have received result from op pool" )))?, } @@ -267,6 +322,7 @@ impl PoolServer for RemotePoolClient { async fn debug_clear_state( &self, clear_mempool: bool, + clear_paymaster: bool, clear_reputation: bool, ) -> PoolResult<()> { let res = self @@ -274,16 +330,46 @@ impl PoolServer for RemotePoolClient { .clone() .debug_clear_state(DebugClearStateRequest { clear_mempool, + clear_paymaster, clear_reputation, }) - .await? + .await + .map_err(anyhow::Error::from)? .into_inner() .result; match res { Some(debug_clear_state_response::Result::Success(_)) => Ok(()), Some(debug_clear_state_response::Result::Failure(f)) => Err(f.try_into()?), - None => Err(PoolServerError::Other(anyhow::anyhow!( + None => Err(PoolError::Other(anyhow::anyhow!( + "should have received result from op pool" + )))?, + } + } + + async fn admin_set_tracking( + &self, + entry_point: Address, + paymaster: bool, + reputation: bool, + ) -> PoolResult<()> { + let res = self + .op_pool_client + .clone() + .admin_set_tracking(AdminSetTrackingRequest { + entry_point: entry_point.as_bytes().to_vec(), + reputation, + paymaster, + }) + .await + .map_err(anyhow::Error::from)? + .into_inner() + .result; + + match res { + Some(admin_set_tracking_response::Result::Success(_)) => Ok(()), + Some(admin_set_tracking_response::Result::Failure(f)) => Err(f.try_into()?), + None => Err(PoolError::Other(anyhow::anyhow!( "should have received result from op pool" )))?, } @@ -296,7 +382,8 @@ impl PoolServer for RemotePoolClient { .debug_dump_mempool(DebugDumpMempoolRequest { entry_point: entry_point.as_bytes().to_vec(), }) - .await? + .await + .map_err(anyhow::Error::from)? .into_inner() .result; @@ -304,11 +391,14 @@ impl PoolServer for RemotePoolClient { Some(debug_dump_mempool_response::Result::Success(s)) => s .ops .into_iter() - .map(PoolOperation::try_from) - .map(|res| res.map_err(PoolServerError::from)) + .map(|proto_uo| { + PoolOperation::try_uo_from_proto(proto_uo, &self.chain_spec) + .context("should convert proto uo to pool operation") + }) + .map(|res| res.map_err(PoolError::from)) .collect(), Some(debug_dump_mempool_response::Result::Failure(f)) => Err(f.try_into()?), - None => Err(PoolServerError::Other(anyhow::anyhow!( + None => Err(PoolError::Other(anyhow::anyhow!( "should have received result from op pool" )))?, } @@ -329,14 +419,15 @@ impl PoolServer for RemotePoolClient { .map(protos::Reputation::from) .collect(), }) - .await? + .await + .map_err(anyhow::Error::from)? .into_inner() .result; match res { Some(debug_set_reputation_response::Result::Success(_)) => Ok(()), Some(debug_set_reputation_response::Result::Failure(f)) => Err(f.try_into()?), - None => Err(PoolServerError::Other(anyhow::anyhow!( + None => Err(PoolError::Other(anyhow::anyhow!( "should have received result from op pool" )))?, } @@ -349,7 +440,8 @@ impl PoolServer for RemotePoolClient { .debug_dump_reputation(DebugDumpReputationRequest { entry_point: entry_point.as_bytes().to_vec(), }) - .await? + .await + .map_err(anyhow::Error::from)? .into_inner() .result; @@ -358,10 +450,39 @@ impl PoolServer for RemotePoolClient { .reputations .into_iter() .map(Reputation::try_from) - .map(|res| res.map_err(PoolServerError::from)) + .map(|res| res.map_err(anyhow::Error::from).map_err(PoolError::from)) .collect(), Some(debug_dump_reputation_response::Result::Failure(f)) => Err(f.try_into()?), - None => Err(PoolServerError::Other(anyhow::anyhow!( + None => Err(PoolError::Other(anyhow::anyhow!( + "should have received result from op pool" + )))?, + } + } + + async fn debug_dump_paymaster_balances( + &self, + entry_point: Address, + ) -> PoolResult> { + let res = self + .op_pool_client + .clone() + .debug_dump_paymaster_balances(DebugDumpPaymasterBalancesRequest { + entry_point: entry_point.as_bytes().to_vec(), + }) + .await + .map_err(anyhow::Error::from)? + .into_inner() + .result; + + match res { + Some(debug_dump_paymaster_balances_response::Result::Success(s)) => s + .balances + .into_iter() + .map(PaymasterMetadata::try_from) + .map(|res| res.map_err(anyhow::Error::from).map_err(PoolError::from)) + .collect(), + Some(debug_dump_paymaster_balances_response::Result::Failure(f)) => Err(f.try_into()?), + None => Err(PoolError::Other(anyhow::anyhow!( "should have received result from op pool" )))?, } @@ -379,16 +500,20 @@ impl PoolServer for RemotePoolClient { entry_point: entry_point.as_bytes().to_vec(), address: address.as_bytes().to_vec(), }) - .await? + .await + .map_err(anyhow::Error::from)? .into_inner() .result; match res { Some(get_reputation_status_response::Result::Success(s)) => { - Ok(ReputationStatus::try_from(s.status)?) + Ok(ProtoReputationStatus::try_from(s.status) + .map_err(anyhow::Error::from)? + .try_into() + .map_err(anyhow::Error::from)?) } Some(get_reputation_status_response::Result::Failure(f)) => Err(f.try_into()?), - None => Err(PoolServerError::Other(anyhow::anyhow!( + None => Err(PoolError::Other(anyhow::anyhow!( "should have received result from op pool" )))?, } @@ -406,7 +531,8 @@ impl PoolServer for RemotePoolClient { entry_point: entry_point.as_bytes().to_vec(), address: address.as_bytes().to_vec(), }) - .await? + .await + .map_err(anyhow::Error::from)? .into_inner() .result; @@ -415,7 +541,7 @@ impl PoolServer for RemotePoolClient { Ok(s.status.unwrap_or_default().try_into()?) } Some(get_stake_status_response::Result::Failure(f)) => Err(f.try_into()?), - None => Err(PoolServerError::Other(anyhow::anyhow!( + None => Err(PoolError::Other(anyhow::anyhow!( "should have received result from op pool" )))?, } diff --git a/crates/pool/src/server/remote/error.rs b/crates/pool/src/server/remote/error.rs index a64e2b48..4a616ddf 100644 --- a/crates/pool/src/server/remote/error.rs +++ b/crates/pool/src/server/remote/error.rs @@ -12,60 +12,53 @@ // If not, see https://www.gnu.org/licenses/. use anyhow::{bail, Context}; -use ethers::types::Opcode; -use rundler_sim::{NeedsStakeInformation, PrecheckViolation, SimulationViolation, ViolationOpCode}; -use rundler_task::grpc::protos::{from_bytes, to_le_bytes, ConversionError}; -use rundler_types::StorageSlot; +use rundler_task::grpc::protos::{from_bytes, ToProtoBytes}; +use rundler_types::{ + pool::{ + MempoolError, NeedsStakeInformation, PoolError, PrecheckViolation, SimulationViolation, + }, + Opcode, StorageSlot, Timestamp, ValidationRevert, ViolationOpCode, +}; use super::protos::{ - mempool_error, precheck_violation_error, simulation_violation_error, - AccessedUndeployedContract, AggregatorValidationFailed, AssociatedStorageIsAlternateSender, - CallGasLimitTooLow, CallHadValue, CalledBannedEntryPointMethod, CodeHashChanged, DidNotRevert, - DiscardedOnInsertError, Entity, EntityThrottledError, EntityType, ExistingSenderWithInitCode, - FactoryCalledCreate2Twice, FactoryIsNotContract, InitCodeTooShort, InvalidSignature, - InvalidStorageAccess, MaxFeePerGasTooLow, MaxOperationsReachedError, - MaxPriorityFeePerGasTooLow, MempoolError as ProtoMempoolError, MultipleRolesViolation, - NotStaked, OperationAlreadyKnownError, OutOfGas, PaymasterBalanceTooLow, - PaymasterDepositTooLow, PaymasterIsNotContract, PaymasterTooShort, PreVerificationGasTooLow, - PrecheckViolationError as ProtoPrecheckViolationError, ReplacementUnderpricedError, - SenderAddressUsedAsAlternateEntity, SenderFundsTooLow, SenderIsNotContractAndNoInitCode, - SimulationViolationError as ProtoSimulationViolationError, TotalGasLimitTooHigh, - UnintendedRevert, UnintendedRevertWithMessage, UnknownEntryPointError, UnstakedAggregator, - UnstakedPaymasterContext, UnsupportedAggregatorError, UsedForbiddenOpcode, - UsedForbiddenPrecompile, VerificationGasLimitTooHigh, WrongNumberOfPhases, + mempool_error, precheck_violation_error, simulation_violation_error, validation_revert, + AccessedUndeployedContract, AccessedUnsupportedContractType, AggregatorValidationFailed, + AssociatedStorageDuringDeploy, AssociatedStorageIsAlternateSender, CallGasLimitTooLow, + CallHadValue, CalledBannedEntryPointMethod, CodeHashChanged, DidNotRevert, + DiscardedOnInsertError, Entity, EntityThrottledError, EntityType, EntryPointRevert, + ExistingSenderWithInitCode, FactoryCalledCreate2Twice, FactoryIsNotContract, + InvalidAccountSignature, InvalidPaymasterSignature, InvalidSignature, InvalidStorageAccess, + InvalidTimeRange, MaxFeePerGasTooLow, MaxOperationsReachedError, MaxPriorityFeePerGasTooLow, + MempoolError as ProtoMempoolError, MultipleRolesViolation, NotStaked, + OperationAlreadyKnownError, OperationDropTooSoon, OperationRevert, OutOfGas, + PaymasterBalanceTooLow, PaymasterDepositTooLow, PaymasterIsNotContract, + PreVerificationGasTooLow, PrecheckViolationError as ProtoPrecheckViolationError, + ReplacementUnderpricedError, SenderAddressUsedAsAlternateEntity, SenderFundsTooLow, + SenderIsNotContractAndNoInitCode, SimulationViolationError as ProtoSimulationViolationError, + TotalGasLimitTooHigh, UnintendedRevert, UnintendedRevertWithMessage, UnknownEntryPointError, + UnknownRevert, UnstakedAggregator, UnstakedPaymasterContext, UnsupportedAggregatorError, + UsedForbiddenOpcode, UsedForbiddenPrecompile, ValidationRevert as ProtoValidationRevert, + VerificationGasLimitBufferTooLow, VerificationGasLimitTooHigh, WrongNumberOfPhases, }; -use crate::{mempool::MempoolError, server::error::PoolServerError}; - -impl From for PoolServerError { - fn from(value: tonic::Status) -> Self { - PoolServerError::Other(anyhow::anyhow!(value.to_string())) - } -} - -impl From for PoolServerError { - fn from(value: ConversionError) -> Self { - PoolServerError::Other(anyhow::anyhow!(value.to_string())) - } -} -impl TryFrom for PoolServerError { +impl TryFrom for PoolError { type Error = anyhow::Error; fn try_from(value: ProtoMempoolError) -> Result { - Ok(PoolServerError::MempoolError(value.try_into()?)) + Ok(PoolError::MempoolError(value.try_into()?)) } } -impl From for ProtoMempoolError { - fn from(value: PoolServerError) -> Self { +impl From for ProtoMempoolError { + fn from(value: PoolError) -> Self { match value { - PoolServerError::MempoolError(e) => e.into(), - PoolServerError::UnexpectedResponse => ProtoMempoolError { + PoolError::MempoolError(e) => e.into(), + PoolError::UnexpectedResponse => ProtoMempoolError { error: Some(mempool_error::Error::Internal( "unexpected response from pool server".to_string(), )), }, - PoolServerError::Other(e) => ProtoMempoolError { + PoolError::Other(e) => ProtoMempoolError { error: Some(mempool_error::Error::Internal(e.to_string())), }, } @@ -90,7 +83,7 @@ impl TryFrom for MempoolError { Some(mempool_error::Error::MaxOperationsReached(e)) => { MempoolError::MaxOperationsReached( e.num_ops as usize, - from_bytes(&e.entity_address)?, + (&e.entity.context("should have entity in error")?).try_into()?, ) } Some(mempool_error::Error::EntityThrottled(e)) => MempoolError::EntityThrottled( @@ -109,7 +102,30 @@ impl TryFrom for MempoolError { Some(mempool_error::Error::UnknownEntryPoint(e)) => { MempoolError::UnknownEntryPoint(from_bytes(&e.entry_point)?) } - _ => bail!("unknown proto mempool error"), + Some(mempool_error::Error::InvalidSignature(_)) => { + MempoolError::SimulationViolation(SimulationViolation::InvalidSignature) + } + Some(mempool_error::Error::PaymasterBalanceTooLow(e)) => { + MempoolError::PaymasterBalanceTooLow( + from_bytes(&e.current_balance)?, + from_bytes(&e.required_balance)?, + ) + } + Some(mempool_error::Error::AssociatedStorageIsAlternateSender(_)) => { + MempoolError::AssociatedStorageIsAlternateSender + } + Some(mempool_error::Error::SenderAddressUsedAsAlternateEntity(e)) => { + MempoolError::SenderAddressUsedAsAlternateEntity(from_bytes(&e.sender_address)?) + } + Some(mempool_error::Error::MultipleRolesViolation(e)) => { + MempoolError::MultipleRolesViolation( + (&e.entity.context("should have entity in error")?).try_into()?, + ) + } + Some(mempool_error::Error::OperationDropTooSoon(e)) => { + MempoolError::OperationDropTooSoon(e.added_at, e.attempted_at, e.must_wait) + } + None => bail!("unknown proto mempool error"), }) } } @@ -140,23 +156,23 @@ impl From for ProtoMempoolError { MempoolError::SenderAddressUsedAsAlternateEntity(addr) => ProtoMempoolError { error: Some(mempool_error::Error::SenderAddressUsedAsAlternateEntity( SenderAddressUsedAsAlternateEntity { - sender_address: addr.as_bytes().to_vec(), + sender_address: addr.to_proto_bytes(), }, )), }, MempoolError::ReplacementUnderpriced(fee, priority_fee) => ProtoMempoolError { error: Some(mempool_error::Error::ReplacementUnderpriced( ReplacementUnderpricedError { - current_fee: to_le_bytes(fee), - current_priority_fee: to_le_bytes(priority_fee), + current_fee: fee.to_proto_bytes(), + current_priority_fee: priority_fee.to_proto_bytes(), }, )), }, - MempoolError::MaxOperationsReached(ops, addr) => ProtoMempoolError { + MempoolError::MaxOperationsReached(ops, entity) => ProtoMempoolError { error: Some(mempool_error::Error::MaxOperationsReached( MaxOperationsReachedError { num_ops: ops as u64, - entity_address: addr.as_bytes().to_vec(), + entity: Some((&entity).into()), }, )), }, @@ -176,8 +192,8 @@ impl From for ProtoMempoolError { ProtoMempoolError { error: Some(mempool_error::Error::PaymasterBalanceTooLow( PaymasterBalanceTooLow { - current_balance: to_le_bytes(current_balance), - required_balance: to_le_bytes(required_balance), + current_balance: current_balance.to_proto_bytes(), + required_balance: required_balance.to_proto_bytes(), }, )), } @@ -191,17 +207,28 @@ impl From for ProtoMempoolError { MempoolError::UnsupportedAggregator(agg) => ProtoMempoolError { error: Some(mempool_error::Error::UnsupportedAggregator( UnsupportedAggregatorError { - aggregator_address: agg.as_bytes().to_vec(), + aggregator_address: agg.to_proto_bytes(), }, )), }, MempoolError::UnknownEntryPoint(entry_point) => ProtoMempoolError { error: Some(mempool_error::Error::UnknownEntryPoint( UnknownEntryPointError { - entry_point: entry_point.as_bytes().to_vec(), + entry_point: entry_point.to_proto_bytes(), }, )), }, + MempoolError::OperationDropTooSoon(added_at, attempted_at, must_wait) => { + ProtoMempoolError { + error: Some(mempool_error::Error::OperationDropTooSoon( + OperationDropTooSoon { + added_at, + attempted_at, + must_wait, + }, + )), + } + } } } } @@ -209,19 +236,12 @@ impl From for ProtoMempoolError { impl From for ProtoPrecheckViolationError { fn from(value: PrecheckViolation) -> Self { match value { - PrecheckViolation::InitCodeTooShort(length) => ProtoPrecheckViolationError { - violation: Some(precheck_violation_error::Violation::InitCodeTooShort( - InitCodeTooShort { - length: length as u64, - }, - )), - }, PrecheckViolation::SenderIsNotContractAndNoInitCode(addr) => { ProtoPrecheckViolationError { violation: Some( precheck_violation_error::Violation::SenderIsNotContractAndNoInitCode( SenderIsNotContractAndNoInitCode { - sender_address: addr.as_bytes().to_vec(), + sender_address: addr.to_proto_bytes(), }, ), ), @@ -231,7 +251,7 @@ impl From for ProtoPrecheckViolationError { violation: Some( precheck_violation_error::Violation::ExistingSenderWithInitCode( ExistingSenderWithInitCode { - sender_address: addr.as_bytes().to_vec(), + sender_address: addr.to_proto_bytes(), }, ), ), @@ -239,15 +259,15 @@ impl From for ProtoPrecheckViolationError { PrecheckViolation::FactoryIsNotContract(addr) => ProtoPrecheckViolationError { violation: Some(precheck_violation_error::Violation::FactoryIsNotContract( FactoryIsNotContract { - factory_address: addr.as_bytes().to_vec(), + factory_address: addr.to_proto_bytes(), }, )), }, PrecheckViolation::TotalGasLimitTooHigh(actual, max) => ProtoPrecheckViolationError { violation: Some(precheck_violation_error::Violation::TotalGasLimitTooHigh( TotalGasLimitTooHigh { - actual_gas: to_le_bytes(actual), - max_gas: to_le_bytes(max), + actual_gas: actual.to_proto_bytes(), + max_gas: max.to_proto_bytes(), }, )), }, @@ -256,8 +276,8 @@ impl From for ProtoPrecheckViolationError { violation: Some( precheck_violation_error::Violation::VerificationGasLimitTooHigh( VerificationGasLimitTooHigh { - actual_gas: to_le_bytes(actual), - max_gas: to_le_bytes(max), + actual_gas: actual.to_proto_bytes(), + max_gas: max.to_proto_bytes(), }, ), ), @@ -268,48 +288,41 @@ impl From for ProtoPrecheckViolationError { violation: Some( precheck_violation_error::Violation::PreVerificationGasTooLow( PreVerificationGasTooLow { - actual_gas: to_le_bytes(actual), - min_gas: to_le_bytes(min), + actual_gas: actual.to_proto_bytes(), + min_gas: min.to_proto_bytes(), }, ), ), } } - PrecheckViolation::PaymasterTooShort(length) => ProtoPrecheckViolationError { - violation: Some(precheck_violation_error::Violation::PaymasterTooShort( - PaymasterTooShort { - length: length as u64, - }, - )), - }, PrecheckViolation::PaymasterIsNotContract(addr) => ProtoPrecheckViolationError { violation: Some(precheck_violation_error::Violation::PaymasterIsNotContract( PaymasterIsNotContract { - paymaster_address: addr.as_bytes().to_vec(), + paymaster_address: addr.to_proto_bytes(), }, )), }, PrecheckViolation::PaymasterDepositTooLow(actual, min) => ProtoPrecheckViolationError { violation: Some(precheck_violation_error::Violation::PaymasterDepositTooLow( PaymasterDepositTooLow { - actual_deposit: to_le_bytes(actual), - min_deposit: to_le_bytes(min), + actual_deposit: actual.to_proto_bytes(), + min_deposit: min.to_proto_bytes(), }, )), }, PrecheckViolation::SenderFundsTooLow(actual, min) => ProtoPrecheckViolationError { violation: Some(precheck_violation_error::Violation::SenderFundsTooLow( SenderFundsTooLow { - actual_funds: to_le_bytes(actual), - min_funds: to_le_bytes(min), + actual_funds: actual.to_proto_bytes(), + min_funds: min.to_proto_bytes(), }, )), }, PrecheckViolation::MaxFeePerGasTooLow(actual, min) => ProtoPrecheckViolationError { violation: Some(precheck_violation_error::Violation::MaxFeePerGasTooLow( MaxFeePerGasTooLow { - actual_fee: to_le_bytes(actual), - min_fee: to_le_bytes(min), + actual_fee: actual.to_proto_bytes(), + min_fee: min.to_proto_bytes(), }, )), }, @@ -318,8 +331,8 @@ impl From for ProtoPrecheckViolationError { violation: Some( precheck_violation_error::Violation::MaxPriorityFeePerGasTooLow( MaxPriorityFeePerGasTooLow { - actual_fee: to_le_bytes(actual), - min_fee: to_le_bytes(min), + actual_fee: actual.to_proto_bytes(), + min_fee: min.to_proto_bytes(), }, ), ), @@ -328,8 +341,8 @@ impl From for ProtoPrecheckViolationError { PrecheckViolation::CallGasLimitTooLow(actual, min) => ProtoPrecheckViolationError { violation: Some(precheck_violation_error::Violation::CallGasLimitTooLow( CallGasLimitTooLow { - actual_gas_limit: to_le_bytes(actual), - min_gas_limit: to_le_bytes(min), + actual_gas_limit: actual.to_proto_bytes(), + min_gas_limit: min.to_proto_bytes(), }, )), }, @@ -342,9 +355,6 @@ impl TryFrom for PrecheckViolation { fn try_from(value: ProtoPrecheckViolationError) -> Result { Ok(match value.violation { - Some(precheck_violation_error::Violation::InitCodeTooShort(e)) => { - PrecheckViolation::InitCodeTooShort(e.length as usize) - } Some(precheck_violation_error::Violation::SenderIsNotContractAndNoInitCode(e)) => { PrecheckViolation::SenderIsNotContractAndNoInitCode(from_bytes(&e.sender_address)?) } @@ -372,9 +382,6 @@ impl TryFrom for PrecheckViolation { from_bytes(&e.min_gas)?, ) } - Some(precheck_violation_error::Violation::PaymasterTooShort(e)) => { - PrecheckViolation::PaymasterTooShort(e.length as usize) - } Some(precheck_violation_error::Violation::PaymasterIsNotContract(e)) => { PrecheckViolation::PaymasterIsNotContract(from_bytes(&e.paymaster_address)?) } @@ -423,6 +430,20 @@ impl From for ProtoSimulationViolationError { InvalidSignature {}, )), }, + SimulationViolation::InvalidAccountSignature => ProtoSimulationViolationError { + violation: Some( + simulation_violation_error::Violation::InvalidAccountSignature( + InvalidAccountSignature {}, + ), + ), + }, + SimulationViolation::InvalidPaymasterSignature => ProtoSimulationViolationError { + violation: Some( + simulation_violation_error::Violation::InvalidPaymasterSignature( + InvalidPaymasterSignature {}, + ), + ), + }, SimulationViolation::UnstakedPaymasterContext => ProtoSimulationViolationError { violation: Some( simulation_violation_error::Violation::UnstakedPaymasterContext( @@ -438,7 +459,7 @@ impl From for ProtoSimulationViolationError { entity: Some(Entity { kind: EntityType::from(et) as i32, address: maybe_address - .map_or(vec![], |addr| addr.as_bytes().to_vec()), + .map_or(vec![], |addr| addr.to_proto_bytes()), }), reason, }, @@ -451,7 +472,7 @@ impl From for ProtoSimulationViolationError { violation: Some(simulation_violation_error::Violation::UsedForbiddenOpcode( UsedForbiddenOpcode { entity: Some((&entity).into()), - contract_address: addr.as_bytes().to_vec(), + contract_address: addr.to_proto_bytes(), opcode: opcode.0 as u32, }, )), @@ -466,8 +487,8 @@ impl From for ProtoSimulationViolationError { simulation_violation_error::Violation::UsedForbiddenPrecompile( UsedForbiddenPrecompile { entity: Some((&entity).into()), - contract_address: contract_addr.as_bytes().to_vec(), - precompile_address: precompile_addr.as_bytes().to_vec(), + contract_address: contract_addr.to_proto_bytes(), + precompile_address: precompile_addr.to_proto_bytes(), }, ), ), @@ -476,18 +497,31 @@ impl From for ProtoSimulationViolationError { violation: Some( simulation_violation_error::Violation::FactoryCalledCreate2Twice( FactoryCalledCreate2Twice { - factory_address: addr.as_bytes().to_vec(), + factory_address: addr.to_proto_bytes(), }, ), ), }, + SimulationViolation::AssociatedStorageDuringDeploy(entity, slot) => { + ProtoSimulationViolationError { + violation: Some( + simulation_violation_error::Violation::AssociatedStorageDuringDeploy( + AssociatedStorageDuringDeploy { + entity: entity.as_ref().map(|e| e.into()), + contract_address: slot.address.to_proto_bytes(), + slot: slot.slot.to_proto_bytes(), + }, + ), + ), + } + } SimulationViolation::InvalidStorageAccess(entity, slot) => { ProtoSimulationViolationError { violation: Some(simulation_violation_error::Violation::InvalidStorageAccess( InvalidStorageAccess { entity: Some((&entity).into()), - contract_address: slot.address.as_bytes().to_vec(), - slot: to_le_bytes(slot.slot), + contract_address: slot.address.to_proto_bytes(), + slot: slot.slot.to_proto_bytes(), }, )), } @@ -495,12 +529,13 @@ impl From for ProtoSimulationViolationError { SimulationViolation::NotStaked(stake_data) => ProtoSimulationViolationError { violation: Some(simulation_violation_error::Violation::NotStaked( NotStaked { - entity: Some((&stake_data.entity).into()), - accessed_address: stake_data.accessed_address.as_bytes().to_vec(), + needs_stake: Some((&stake_data.needs_stake).into()), + accessing_entity: EntityType::from(stake_data.accessing_entity) as i32, + accessed_address: stake_data.accessed_address.to_proto_bytes(), accessed_entity: EntityType::from(stake_data.accessed_entity) as i32, - slot: to_le_bytes(stake_data.slot), - min_stake: to_le_bytes(stake_data.min_stake), - min_unstake_delay: to_le_bytes(stake_data.min_unstake_delay), + slot: stake_data.slot.to_proto_bytes(), + min_stake: stake_data.min_stake.to_proto_bytes(), + min_unstake_delay: stake_data.min_unstake_delay.to_proto_bytes(), }, )), }, @@ -510,13 +545,17 @@ impl From for ProtoSimulationViolationError { UnintendedRevert { entity: Some(Entity { kind: EntityType::from(et) as i32, - address: maybe_address - .map_or(vec![], |addr| addr.as_bytes().to_vec()), + address: maybe_address.map_or(vec![], |addr| addr.to_proto_bytes()), }), }, )), } } + SimulationViolation::ValidationRevert(revert) => ProtoSimulationViolationError { + violation: Some(simulation_violation_error::Violation::ValidationRevert( + revert.into(), + )), + }, SimulationViolation::DidNotRevert => ProtoSimulationViolationError { violation: Some(simulation_violation_error::Violation::DidNotRevert( DidNotRevert {}, @@ -550,7 +589,7 @@ impl From for ProtoSimulationViolationError { simulation_violation_error::Violation::AccessedUndeployedContract( AccessedUndeployedContract { entity: Some((&entity).into()), - contract_address: contract_addr.as_bytes().to_vec(), + contract_address: contract_addr.to_proto_bytes(), }, ), ), @@ -572,6 +611,16 @@ impl From for ProtoSimulationViolationError { CodeHashChanged {}, )), }, + SimulationViolation::InvalidTimeRange(valid_until, valid_after) => { + ProtoSimulationViolationError { + violation: Some(simulation_violation_error::Violation::InvalidTimeRange( + InvalidTimeRange { + valid_until: valid_until.seconds_since_epoch(), + valud_after: valid_after.seconds_since_epoch(), + }, + )), + } + } SimulationViolation::AggregatorValidationFailed => ProtoSimulationViolationError { violation: Some( simulation_violation_error::Violation::AggregatorValidationFailed( @@ -579,6 +628,30 @@ impl From for ProtoSimulationViolationError { ), ), }, + SimulationViolation::VerificationGasLimitBufferTooLow(limit, needed) => { + ProtoSimulationViolationError { + violation: Some( + simulation_violation_error::Violation::VerificationGasLimitBufferTooLow( + VerificationGasLimitBufferTooLow { + limit: limit.to_proto_bytes(), + needed: needed.to_proto_bytes(), + }, + ), + ), + } + } + SimulationViolation::AccessedUnsupportedContractType(contract_type, address) => { + ProtoSimulationViolationError { + violation: Some( + simulation_violation_error::Violation::AccessedUnsupportedContractType( + AccessedUnsupportedContractType { + contract_type, + contract_address: address.to_proto_bytes(), + }, + ), + ), + } + } } } } @@ -591,6 +664,18 @@ impl TryFrom for SimulationViolation { Some(simulation_violation_error::Violation::InvalidSignature(_)) => { SimulationViolation::InvalidSignature } + Some(simulation_violation_error::Violation::InvalidTimeRange(e)) => { + SimulationViolation::InvalidTimeRange( + Timestamp::new(e.valid_until), + Timestamp::new(e.valud_after), + ) + } + Some(simulation_violation_error::Violation::InvalidAccountSignature(_)) => { + SimulationViolation::InvalidAccountSignature + } + Some(simulation_violation_error::Violation::InvalidPaymasterSignature(_)) => { + SimulationViolation::InvalidPaymasterSignature + } Some(simulation_violation_error::Violation::UnstakedPaymasterContext(_)) => { SimulationViolation::UnstakedPaymasterContext } @@ -627,6 +712,15 @@ impl TryFrom for SimulationViolation { Some(simulation_violation_error::Violation::FactoryCalledCreate2Twice(e)) => { SimulationViolation::FactoryCalledCreate2Twice(from_bytes(&e.factory_address)?) } + Some(simulation_violation_error::Violation::AssociatedStorageDuringDeploy(e)) => { + SimulationViolation::AssociatedStorageDuringDeploy( + e.entity.as_ref().map(|e| e.try_into()).transpose()?, + StorageSlot { + address: from_bytes(&e.contract_address)?, + slot: from_bytes(&e.slot)?, + }, + ) + } Some(simulation_violation_error::Violation::InvalidStorageAccess(e)) => { SimulationViolation::InvalidStorageAccess( (&e.entity.context("should have entity in error")?).try_into()?, @@ -637,6 +731,10 @@ impl TryFrom for SimulationViolation { ) } Some(simulation_violation_error::Violation::NotStaked(e)) => { + let accessing_entity = rundler_types::EntityType::try_from( + EntityType::try_from(e.accessing_entity).context("unknown entity type")?, + ) + .context("invalid entity type")?; let accessed_entity = match rundler_types::EntityType::try_from( EntityType::try_from(e.accessed_entity).context("unknown entity type")?, ) { @@ -645,7 +743,9 @@ impl TryFrom for SimulationViolation { }; SimulationViolation::NotStaked(Box::new(NeedsStakeInformation { - entity: (&e.entity.context("should have entity in error")?).try_into()?, + needs_stake: (&e.needs_stake.context("should have entity in error")?) + .try_into()?, + accessing_entity, accessed_address: from_bytes(&e.accessed_address)?, accessed_entity, slot: from_bytes(&e.slot)?, @@ -667,6 +767,9 @@ impl TryFrom for SimulationViolation { }, ) } + Some(simulation_violation_error::Violation::ValidationRevert(e)) => { + SimulationViolation::ValidationRevert(e.try_into()?) + } Some(simulation_violation_error::Violation::DidNotRevert(_)) => { SimulationViolation::DidNotRevert } @@ -703,6 +806,18 @@ impl TryFrom for SimulationViolation { Some(simulation_violation_error::Violation::AggregatorValidationFailed(_)) => { SimulationViolation::AggregatorValidationFailed } + Some(simulation_violation_error::Violation::VerificationGasLimitBufferTooLow(e)) => { + SimulationViolation::VerificationGasLimitBufferTooLow( + from_bytes(&e.limit)?, + from_bytes(&e.needed)?, + ) + } + Some(simulation_violation_error::Violation::AccessedUnsupportedContractType(e)) => { + SimulationViolation::AccessedUnsupportedContractType( + e.contract_type, + from_bytes(&e.contract_address)?, + ) + } None => { bail!("unknown proto mempool simulation violation") } @@ -710,6 +825,56 @@ impl TryFrom for SimulationViolation { } } +impl From for ProtoValidationRevert { + fn from(revert: ValidationRevert) -> Self { + let inner = match revert { + ValidationRevert::EntryPoint(reason) => { + validation_revert::Revert::EntryPoint(EntryPointRevert { reason }) + } + ValidationRevert::Operation { + entry_point_reason, + inner_revert_data, + inner_revert_reason, + } => validation_revert::Revert::Operation(OperationRevert { + entry_point_reason, + inner_revert_data: inner_revert_data.to_vec(), + inner_revert_reason: inner_revert_reason.unwrap_or_default(), + }), + ValidationRevert::Unknown(revert_bytes) => { + validation_revert::Revert::Unknown(UnknownRevert { + revert_bytes: revert_bytes.to_vec(), + }) + } + }; + ProtoValidationRevert { + revert: Some(inner), + } + } +} + +impl TryFrom for ValidationRevert { + type Error = anyhow::Error; + + fn try_from(value: ProtoValidationRevert) -> Result { + Ok(match value.revert { + Some(validation_revert::Revert::EntryPoint(e)) => { + ValidationRevert::EntryPoint(e.reason) + } + Some(validation_revert::Revert::Operation(e)) => ValidationRevert::Operation { + entry_point_reason: e.entry_point_reason, + inner_revert_data: e.inner_revert_data.into(), + inner_revert_reason: Some(e.inner_revert_reason).filter(|s| !s.is_empty()), + }, + Some(validation_revert::Revert::Unknown(e)) => { + ValidationRevert::Unknown(e.revert_bytes.into()) + } + None => { + bail!("unknown proto validation revert") + } + }) + } +} + #[cfg(test)] mod tests { use super::*; @@ -727,12 +892,16 @@ mod tests { #[test] fn test_precheck_error() { - let error = MempoolError::PrecheckViolation(PrecheckViolation::InitCodeTooShort(0)); + let error = MempoolError::PrecheckViolation(PrecheckViolation::SenderFundsTooLow( + 0.into(), + 0.into(), + )); let proto_error: ProtoMempoolError = error.into(); let error2 = proto_error.try_into().unwrap(); match error2 { - MempoolError::PrecheckViolation(PrecheckViolation::InitCodeTooShort(v)) => { - assert_eq!(v, 0) + MempoolError::PrecheckViolation(PrecheckViolation::SenderFundsTooLow(x, y)) => { + assert_eq!(x, 0.into()); + assert_eq!(y, 0.into()); } _ => panic!("wrong error type"), } diff --git a/crates/pool/src/server/remote/protos.rs b/crates/pool/src/server/remote/protos.rs index 0a2bc848..f4ba8413 100644 --- a/crates/pool/src/server/remote/protos.rs +++ b/crates/pool/src/server/remote/protos.rs @@ -13,19 +13,17 @@ use anyhow::{anyhow, Context}; use ethers::types::{Address, H256}; -use rundler_task::grpc::protos::{from_bytes, to_le_bytes, ConversionError}; +use rundler_task::grpc::protos::{from_bytes, ConversionError, ToProtoBytes}; use rundler_types::{ - Entity as RundlerEntity, EntityType as RundlerEntityType, EntityUpdate as RundlerEntityUpdate, - EntityUpdateType as RundlerEntityUpdateType, UserOperation as RundlerUserOperation, - ValidTimeRange, -}; - -use crate::{ - mempool::{ - PoolOperation, Reputation as PoolReputation, ReputationStatus as PoolReputationStatus, - StakeInfo as RundlerStakeInfo, StakeStatus as RundlerStakeStatus, + chain::ChainSpec, + pool::{ + NewHead as PoolNewHead, PaymasterMetadata as PoolPaymasterMetadata, PoolOperation, + Reputation as PoolReputation, ReputationStatus as PoolReputationStatus, + StakeStatus as RundlerStakeStatus, }, - server::NewHead as PoolNewHead, + v0_6, v0_7, Entity as RundlerEntity, EntityInfos, EntityType as RundlerEntityType, + EntityUpdate as RundlerEntityUpdate, EntityUpdateType as RundlerEntityUpdateType, + StakeInfo as RundlerStakeInfo, UserOperationVariant, ValidTimeRange, }; tonic::include_proto!("op_pool"); @@ -33,29 +31,46 @@ tonic::include_proto!("op_pool"); pub const OP_POOL_FILE_DESCRIPTOR_SET: &[u8] = tonic::include_file_descriptor_set!("op_pool_descriptor"); -impl From<&RundlerUserOperation> for UserOperation { - fn from(op: &RundlerUserOperation) -> Self { +impl From<&UserOperationVariant> for UserOperation { + fn from(op: &UserOperationVariant) -> Self { + match op { + UserOperationVariant::V0_6(op) => op.into(), + UserOperationVariant::V0_7(op) => op.into(), + } + } +} + +impl From<&v0_6::UserOperation> for UserOperation { + fn from(op: &v0_6::UserOperation) -> Self { + let op = UserOperationV06 { + sender: op.sender.to_proto_bytes(), + nonce: op.nonce.to_proto_bytes(), + init_code: op.init_code.to_proto_bytes(), + call_data: op.call_data.to_proto_bytes(), + call_gas_limit: op.call_gas_limit.to_proto_bytes(), + verification_gas_limit: op.verification_gas_limit.to_proto_bytes(), + pre_verification_gas: op.pre_verification_gas.to_proto_bytes(), + max_fee_per_gas: op.max_fee_per_gas.to_proto_bytes(), + max_priority_fee_per_gas: op.max_priority_fee_per_gas.to_proto_bytes(), + paymaster_and_data: op.paymaster_and_data.to_proto_bytes(), + signature: op.signature.to_proto_bytes(), + }; UserOperation { - sender: op.sender.0.to_vec(), - nonce: to_le_bytes(op.nonce), - init_code: op.init_code.to_vec(), - call_data: op.call_data.to_vec(), - call_gas_limit: to_le_bytes(op.call_gas_limit), - verification_gas_limit: to_le_bytes(op.verification_gas_limit), - pre_verification_gas: to_le_bytes(op.pre_verification_gas), - max_fee_per_gas: to_le_bytes(op.max_fee_per_gas), - max_priority_fee_per_gas: to_le_bytes(op.max_priority_fee_per_gas), - paymaster_and_data: op.paymaster_and_data.to_vec(), - signature: op.signature.to_vec(), + uo: Some(user_operation::Uo::V06(op)), } } } -impl TryFrom for RundlerUserOperation { - type Error = ConversionError; +pub trait TryUoFromProto: Sized { + fn try_uo_from_proto(value: T, chain_spec: &ChainSpec) -> Result; +} - fn try_from(op: UserOperation) -> Result { - Ok(RundlerUserOperation { +impl TryUoFromProto for v0_6::UserOperation { + fn try_uo_from_proto( + op: UserOperationV06, + _chain_spec: &ChainSpec, + ) -> Result { + Ok(v0_6::UserOperation { sender: from_bytes(&op.sender)?, nonce: from_bytes(&op.nonce)?, init_code: op.init_code.into(), @@ -71,6 +86,90 @@ impl TryFrom for RundlerUserOperation { } } +impl From<&v0_7::UserOperation> for UserOperation { + fn from(op: &v0_7::UserOperation) -> Self { + let op = UserOperationV07 { + sender: op.sender.to_proto_bytes(), + nonce: op.nonce.to_proto_bytes(), + call_data: op.call_data.to_proto_bytes(), + call_gas_limit: op.call_gas_limit.to_proto_bytes(), + verification_gas_limit: op.verification_gas_limit.to_proto_bytes(), + pre_verification_gas: op.pre_verification_gas.to_proto_bytes(), + max_fee_per_gas: op.max_fee_per_gas.to_proto_bytes(), + max_priority_fee_per_gas: op.max_priority_fee_per_gas.to_proto_bytes(), + signature: op.signature.to_proto_bytes(), + paymaster: op.paymaster.map(|p| p.to_proto_bytes()).unwrap_or_default(), + paymaster_data: op.paymaster_data.to_proto_bytes(), + paymaster_verification_gas_limit: op.paymaster_verification_gas_limit.to_proto_bytes(), + paymaster_post_op_gas_limit: op.paymaster_post_op_gas_limit.to_proto_bytes(), + factory: op.factory.map(|f| f.to_proto_bytes()).unwrap_or_default(), + factory_data: op.factory_data.to_proto_bytes(), + entry_point: op.entry_point.to_proto_bytes(), + chain_id: op.chain_id, + }; + UserOperation { + uo: Some(user_operation::Uo::V07(op)), + } + } +} + +impl TryUoFromProto for v0_7::UserOperation { + fn try_uo_from_proto( + op: UserOperationV07, + chain_spec: &ChainSpec, + ) -> Result { + let mut builder = v0_7::UserOperationBuilder::new( + chain_spec, + v0_7::UserOperationRequiredFields { + sender: from_bytes(&op.sender)?, + nonce: from_bytes(&op.nonce)?, + call_data: op.call_data.into(), + call_gas_limit: from_bytes(&op.call_gas_limit)?, + verification_gas_limit: from_bytes(&op.verification_gas_limit)?, + pre_verification_gas: from_bytes(&op.pre_verification_gas)?, + max_priority_fee_per_gas: from_bytes(&op.max_priority_fee_per_gas)?, + max_fee_per_gas: from_bytes(&op.max_fee_per_gas)?, + signature: op.signature.into(), + }, + ); + + if !op.paymaster.is_empty() { + builder = builder.paymaster( + from_bytes(&op.paymaster)?, + from_bytes(&op.paymaster_verification_gas_limit)?, + from_bytes(&op.paymaster_post_op_gas_limit)?, + op.paymaster_data.into(), + ); + } + + if !op.factory.is_empty() { + builder = builder.factory(from_bytes(&op.factory)?, op.factory_data.into()); + } + + Ok(builder.build()) + } +} + +impl TryUoFromProto for UserOperationVariant { + fn try_uo_from_proto( + op: UserOperation, + chain_spec: &ChainSpec, + ) -> Result { + let op = op + .uo + .expect("User operation should contain user operation oneof"); + + match op { + user_operation::Uo::V06(op) => Ok(UserOperationVariant::V0_6( + v0_6::UserOperation::try_uo_from_proto(op, chain_spec)?, + )), + user_operation::Uo::V07(op) => Ok(UserOperationVariant::V0_7( + v0_7::UserOperation::try_uo_from_proto(op, chain_spec)?, + )), + } + } +} + impl TryFrom for RundlerEntityType { type Error = ConversionError; @@ -147,7 +246,7 @@ impl From<&RundlerEntity> for Entity { fn from(entity: &RundlerEntity) -> Self { Entity { kind: EntityType::from(entity.kind).into(), - address: entity.address.as_bytes().to_vec(), + address: entity.address.to_proto_bytes(), } } } @@ -180,29 +279,29 @@ impl From for ReputationStatus { } } +impl TryFrom for PoolReputationStatus { + type Error = ConversionError; + + fn try_from(status: ReputationStatus) -> Result { + match status { + ReputationStatus::Ok => Ok(PoolReputationStatus::Ok), + ReputationStatus::Throttled => Ok(PoolReputationStatus::Throttled), + ReputationStatus::Banned => Ok(PoolReputationStatus::Banned), + ReputationStatus::Unspecified => Err(ConversionError::InvalidEnumValue(status as i32)), + } + } +} + impl From for Reputation { fn from(rep: PoolReputation) -> Self { Reputation { - address: rep.address.as_bytes().to_vec(), + address: rep.address.to_proto_bytes(), ops_seen: rep.ops_seen, ops_included: rep.ops_included, } } } -impl TryFrom for PoolReputationStatus { - type Error = ConversionError; - - fn try_from(status: i32) -> Result { - match status { - x if x == ReputationStatus::Ok as i32 => Ok(Self::Ok), - x if x == ReputationStatus::Throttled as i32 => Ok(Self::Throttled), - x if x == ReputationStatus::Banned as i32 => Ok(Self::Banned), - _ => Err(ConversionError::InvalidEnumValue(status)), - } - } -} - impl TryFrom for PoolReputation { type Error = ConversionError; @@ -224,7 +323,7 @@ impl TryFrom for RundlerStakeStatus { is_staked: stake_status.is_staked, stake_info: RundlerStakeInfo { stake: stake_info.stake.into(), - unstake_delay_sec: stake_info.unstake_delay_sec, + unstake_delay_sec: stake_info.unstake_delay_sec.into(), }, }); } @@ -238,8 +337,8 @@ impl From for StakeStatus { StakeStatus { is_staked: stake_status.is_staked, stake_info: Some(StakeInfo { - stake: stake_status.stake_info.stake as u64, - unstake_delay_sec: stake_status.stake_info.unstake_delay_sec, + stake: stake_status.stake_info.stake.as_u64(), + unstake_delay_sec: stake_status.stake_info.unstake_delay_sec.as_u32(), }), } } @@ -249,28 +348,24 @@ impl From<&PoolOperation> for MempoolOp { fn from(op: &PoolOperation) -> Self { MempoolOp { uo: Some(UserOperation::from(&op.uo)), - entry_point: op.entry_point.as_bytes().to_vec(), - aggregator: op.aggregator.map_or(vec![], |a| a.as_bytes().to_vec()), + entry_point: op.entry_point.to_proto_bytes(), + aggregator: op.aggregator.map_or(vec![], |a| a.to_proto_bytes()), valid_after: op.valid_time_range.valid_after.seconds_since_epoch(), valid_until: op.valid_time_range.valid_until.seconds_since_epoch(), - expected_code_hash: op.expected_code_hash.as_bytes().to_vec(), - sim_block_hash: op.sim_block_hash.as_bytes().to_vec(), - entities_needing_stake: op - .entities_needing_stake - .iter() - .map(|e| EntityType::from(*e).into()) - .collect(), + expected_code_hash: op.expected_code_hash.to_proto_bytes(), + sim_block_hash: op.sim_block_hash.to_proto_bytes(), account_is_staked: op.account_is_staked, } } } pub const MISSING_USER_OP_ERR_STR: &str = "Mempool op should contain user operation"; -impl TryFrom for PoolOperation { - type Error = anyhow::Error; - - fn try_from(op: MempoolOp) -> Result { - let uo = op.uo.context(MISSING_USER_OP_ERR_STR)?.try_into()?; +impl TryUoFromProto for PoolOperation { + fn try_uo_from_proto(op: MempoolOp, chain_spec: &ChainSpec) -> Result { + let uo = UserOperationVariant::try_uo_from_proto( + op.uo.context(MISSING_USER_OP_ERR_STR)?, + chain_spec, + )?; let entry_point = from_bytes(&op.entry_point)?; @@ -284,15 +379,6 @@ impl TryFrom for PoolOperation { let expected_code_hash = H256::from_slice(&op.expected_code_hash); let sim_block_hash = H256::from_slice(&op.sim_block_hash); - let entities_needing_stake = op - .entities_needing_stake - .into_iter() - .map(|e| { - let pe = - EntityType::try_from(e).map_err(|_| ConversionError::InvalidEnumValue(e))?; - pe.try_into() - }) - .collect::, ConversionError>>()?; Ok(PoolOperation { uo, @@ -300,11 +386,10 @@ impl TryFrom for PoolOperation { aggregator, valid_time_range, expected_code_hash, - entities_needing_stake, sim_block_hash, sim_block_number: 0, account_is_staked: op.account_is_staked, - entity_infos: rundler_sim::EntityInfos::default(), + entity_infos: EntityInfos::default(), }) } } @@ -323,8 +408,30 @@ impl TryFrom for PoolNewHead { impl From for NewHead { fn from(head: PoolNewHead) -> Self { Self { - block_hash: head.block_hash.as_bytes().to_vec(), + block_hash: head.block_hash.to_proto_bytes(), block_number: head.block_number, } } } + +impl TryFrom for PoolPaymasterMetadata { + type Error = ConversionError; + + fn try_from(paymaster_balance: PaymasterBalance) -> Result { + Ok(Self { + address: from_bytes(&paymaster_balance.address)?, + confirmed_balance: from_bytes(&paymaster_balance.confirmed_balance)?, + pending_balance: from_bytes(&paymaster_balance.pending_balance)?, + }) + } +} + +impl From for PaymasterBalance { + fn from(paymaster_metadata: PoolPaymasterMetadata) -> Self { + Self { + address: paymaster_metadata.address.as_bytes().to_vec(), + confirmed_balance: paymaster_metadata.confirmed_balance.to_proto_bytes(), + pending_balance: paymaster_metadata.pending_balance.to_proto_bytes(), + } + } +} diff --git a/crates/pool/src/server/remote/server.rs b/crates/pool/src/server/remote/server.rs index 7438923d..bd2071bb 100644 --- a/crates/pool/src/server/remote/server.rs +++ b/crates/pool/src/server/remote/server.rs @@ -23,45 +23,50 @@ use async_trait::async_trait; use ethers::types::{Address, H256}; use futures_util::StreamExt; use rundler_task::grpc::{metrics::GrpcMetricsLayer, protos::from_bytes}; -use rundler_types::EntityUpdate; +use rundler_types::{ + chain::ChainSpec, + pool::{Pool, Reputation}, + EntityUpdate, UserOperationId, UserOperationVariant, +}; use tokio::{sync::mpsc, task::JoinHandle}; use tokio_stream::wrappers::UnboundedReceiverStream; use tokio_util::sync::CancellationToken; use tonic::{transport::Server, Request, Response, Result, Status}; use super::protos::{ - add_op_response, debug_clear_state_response, debug_dump_mempool_response, + add_op_response, admin_set_tracking_response, debug_clear_state_response, + debug_dump_mempool_response, debug_dump_paymaster_balances_response, debug_dump_reputation_response, debug_set_reputation_response, get_op_by_hash_response, get_ops_response, get_reputation_status_response, get_stake_status_response, op_pool_server::{OpPool, OpPoolServer}, - remove_ops_response, update_entities_response, AddOpRequest, AddOpResponse, AddOpSuccess, - DebugClearStateRequest, DebugClearStateResponse, DebugClearStateSuccess, - DebugDumpMempoolRequest, DebugDumpMempoolResponse, DebugDumpMempoolSuccess, - DebugDumpReputationRequest, DebugDumpReputationResponse, DebugDumpReputationSuccess, - DebugSetReputationRequest, DebugSetReputationResponse, DebugSetReputationSuccess, - GetOpByHashRequest, GetOpByHashResponse, GetOpByHashSuccess, GetOpsRequest, GetOpsResponse, - GetOpsSuccess, GetReputationStatusRequest, GetReputationStatusResponse, - GetReputationStatusSuccess, GetStakeStatusRequest, GetStakeStatusResponse, - GetStakeStatusSuccess, GetSupportedEntryPointsRequest, GetSupportedEntryPointsResponse, - MempoolOp, RemoveOpsRequest, RemoveOpsResponse, RemoveOpsSuccess, SubscribeNewHeadsRequest, - SubscribeNewHeadsResponse, UpdateEntitiesRequest, UpdateEntitiesResponse, - UpdateEntitiesSuccess, OP_POOL_FILE_DESCRIPTOR_SET, -}; -use crate::{ - mempool::Reputation, - server::{local::LocalPoolHandle, PoolServer}, + remove_op_by_id_response, remove_ops_response, update_entities_response, AddOpRequest, + AddOpResponse, AddOpSuccess, AdminSetTrackingRequest, AdminSetTrackingResponse, + AdminSetTrackingSuccess, DebugClearStateRequest, DebugClearStateResponse, + DebugClearStateSuccess, DebugDumpMempoolRequest, DebugDumpMempoolResponse, + DebugDumpMempoolSuccess, DebugDumpPaymasterBalancesRequest, DebugDumpPaymasterBalancesResponse, + DebugDumpPaymasterBalancesSuccess, DebugDumpReputationRequest, DebugDumpReputationResponse, + DebugDumpReputationSuccess, DebugSetReputationRequest, DebugSetReputationResponse, + DebugSetReputationSuccess, GetOpByHashRequest, GetOpByHashResponse, GetOpByHashSuccess, + GetOpsRequest, GetOpsResponse, GetOpsSuccess, GetReputationStatusRequest, + GetReputationStatusResponse, GetReputationStatusSuccess, GetStakeStatusRequest, + GetStakeStatusResponse, GetStakeStatusSuccess, GetSupportedEntryPointsRequest, + GetSupportedEntryPointsResponse, MempoolOp, RemoveOpByIdRequest, RemoveOpByIdResponse, + RemoveOpByIdSuccess, RemoveOpsRequest, RemoveOpsResponse, RemoveOpsSuccess, ReputationStatus, + SubscribeNewHeadsRequest, SubscribeNewHeadsResponse, TryUoFromProto, UpdateEntitiesRequest, + UpdateEntitiesResponse, UpdateEntitiesSuccess, OP_POOL_FILE_DESCRIPTOR_SET, }; +use crate::server::local::LocalPoolHandle; const MAX_REMOTE_BLOCK_SUBSCRIPTIONS: usize = 32; pub(crate) async fn spawn_remote_mempool_server( - chain_id: u64, + chain_spec: ChainSpec, local_pool: LocalPoolHandle, addr: SocketAddr, shutdown_token: CancellationToken, ) -> anyhow::Result>> { // gRPC server - let pool_impl = OpPoolImpl::new(chain_id, local_pool); + let pool_impl = OpPoolImpl::new(chain_spec, local_pool); let op_pool_server = OpPoolServer::new(pool_impl); let reflection_service = tonic_reflection::server::Builder::configure() .register_encoded_file_descriptor_set(OP_POOL_FILE_DESCRIPTOR_SET) @@ -89,15 +94,15 @@ pub(crate) async fn spawn_remote_mempool_server( } struct OpPoolImpl { - chain_id: u64, + chain_spec: ChainSpec, local_pool: LocalPoolHandle, num_block_subscriptions: Arc, } impl OpPoolImpl { - pub(crate) fn new(chain_id: u64, local_pool: LocalPoolHandle) -> Self { + pub(crate) fn new(chain_spec: ChainSpec, local_pool: LocalPoolHandle) -> Self { Self { - chain_id, + chain_spec, local_pool, num_block_subscriptions: Arc::new(AtomicUsize::new(0)), } @@ -121,7 +126,7 @@ impl OpPool for OpPoolImpl { ) -> Result> { let resp = match self.local_pool.get_supported_entry_points().await { Ok(entry_points) => GetSupportedEntryPointsResponse { - chain_id: self.chain_id, + chain_id: self.chain_spec.id, entry_points: entry_points .into_iter() .map(|ep| ep.as_bytes().to_vec()) @@ -142,9 +147,10 @@ impl OpPool for OpPoolImpl { let proto_op = req .op .ok_or_else(|| Status::invalid_argument("Operation is required in AddOpRequest"))?; - let uo = proto_op.try_into().map_err(|e| { - Status::invalid_argument(format!("Failed to convert to UserOperation: {e}")) - })?; + let uo = + UserOperationVariant::try_uo_from_proto(proto_op, &self.chain_spec).map_err(|e| { + Status::invalid_argument(format!("Failed to convert to UserOperation: {e}")) + })?; let resp = match self.local_pool.add_op(ep, uo).await { Ok(hash) => AddOpResponse { @@ -238,6 +244,41 @@ impl OpPool for OpPoolImpl { Ok(Response::new(resp)) } + async fn remove_op_by_id( + &self, + request: Request, + ) -> Result> { + let req = request.into_inner(); + let ep = self.get_entry_point(&req.entry_point)?; + + let resp = match self + .local_pool + .remove_op_by_id( + ep, + UserOperationId { + sender: from_bytes(&req.sender) + .map_err(|e| Status::invalid_argument(format!("Invalid sender: {e}")))?, + nonce: from_bytes(&req.nonce) + .map_err(|e| Status::invalid_argument(format!("Invalid nonce: {e}")))?, + }, + ) + .await + { + Ok(hash) => RemoveOpByIdResponse { + result: Some(remove_op_by_id_response::Result::Success( + RemoveOpByIdSuccess { + hash: hash.map_or(vec![], |h| h.as_bytes().to_vec()), + }, + )), + }, + Err(error) => RemoveOpByIdResponse { + result: Some(remove_op_by_id_response::Result::Failure(error.into())), + }, + }; + + Ok(Response::new(resp)) + } + async fn update_entities( &self, request: Request, @@ -274,7 +315,7 @@ impl OpPool for OpPoolImpl { let req = request.into_inner(); let resp = match self .local_pool - .debug_clear_state(req.clear_mempool, req.clear_reputation) + .debug_clear_state(req.clear_mempool, req.clear_paymaster, req.clear_reputation) .await { Ok(_) => DebugClearStateResponse { @@ -290,6 +331,30 @@ impl OpPool for OpPoolImpl { Ok(Response::new(resp)) } + async fn admin_set_tracking( + &self, + request: Request, + ) -> Result> { + let req = request.into_inner(); + let ep = self.get_entry_point(&req.entry_point)?; + let resp = match self + .local_pool + .admin_set_tracking(ep, req.paymaster, req.reputation) + .await + { + Ok(_) => AdminSetTrackingResponse { + result: Some(admin_set_tracking_response::Result::Success( + AdminSetTrackingSuccess {}, + )), + }, + Err(error) => AdminSetTrackingResponse { + result: Some(admin_set_tracking_response::Result::Failure(error.into())), + }, + }; + + Ok(Response::new(resp)) + } + async fn debug_dump_mempool( &self, request: Request, @@ -367,7 +432,7 @@ impl OpPool for OpPoolImpl { Ok(status) => GetReputationStatusResponse { result: Some(get_reputation_status_response::Result::Success( GetReputationStatusSuccess { - status: status as i32, + status: ReputationStatus::from(status).into(), }, )), }, @@ -431,6 +496,31 @@ impl OpPool for OpPoolImpl { Ok(Response::new(resp)) } + async fn debug_dump_paymaster_balances( + &self, + request: Request, + ) -> Result> { + let req = request.into_inner(); + let ep = self.get_entry_point(&req.entry_point)?; + + let resp = match self.local_pool.debug_dump_paymaster_balances(ep).await { + Ok(balances) => DebugDumpPaymasterBalancesResponse { + result: Some(debug_dump_paymaster_balances_response::Result::Success( + DebugDumpPaymasterBalancesSuccess { + balances: balances.into_iter().map(Into::into).collect(), + }, + )), + }, + Err(error) => DebugDumpPaymasterBalancesResponse { + result: Some(debug_dump_paymaster_balances_response::Result::Failure( + error.into(), + )), + }, + }; + + Ok(Response::new(resp)) + } + type SubscribeNewHeadsStream = UnboundedReceiverStream>; async fn subscribe_new_heads( diff --git a/crates/pool/src/task.rs b/crates/pool/src/task.rs index e2ac650d..83187926 100644 --- a/crates/pool/src/task.rs +++ b/crates/pool/src/task.rs @@ -16,37 +16,40 @@ use std::{collections::HashMap, net::SocketAddr, sync::Arc, time::Duration}; use anyhow::{bail, Context}; use async_trait::async_trait; use ethers::providers::Middleware; -use rundler_provider::{EntryPoint, PaymasterHelper, Provider}; +use rundler_provider::{EntryPointProvider, EthersEntryPointV0_6, EthersEntryPointV0_7, Provider}; use rundler_sim::{ - Prechecker, PrecheckerImpl, SimulateValidationTracerImpl, Simulator, SimulatorImpl, + simulation::{self, UnsafeSimulator}, + PrecheckerImpl, Simulator, }; use rundler_task::Task; -use rundler_types::contracts::{ - i_entry_point::IEntryPoint, paymaster_helper::PaymasterHelper as PaymasterHelperContract, -}; -use rundler_utils::{emit::WithEntryPoint, eth, handle}; +use rundler_types::{chain::ChainSpec, EntryPointVersion, UserOperation, UserOperationVariant}; +use rundler_utils::{emit::WithEntryPoint, handle}; use tokio::{sync::broadcast, try_join}; use tokio_util::sync::CancellationToken; -use super::mempool::{HourlyMovingAverageReputation, PoolConfig, ReputationParams}; +use super::mempool::PoolConfig; use crate::{ chain::{self, Chain}, emit::OpPoolEvent, - mempool::UoPool, + mempool::{ + AddressReputation, Mempool, PaymasterConfig, PaymasterTracker, ReputationParams, UoPool, + }, server::{spawn_remote_mempool_server, LocalPoolBuilder}, }; /// Arguments for the pool task. #[derive(Debug)] pub struct Args { + /// Chain specification. + pub chain_spec: ChainSpec, + /// True if using unsafe mode. + pub unsafe_mode: bool, /// HTTP URL for the full node. pub http_url: String, - /// Poll interval for full node requests. - pub http_poll_interval: Duration, - /// ID of the chain this pool is tracking - pub chain_id: u64, - /// Number of blocks to keep in the chain history. - pub chain_history_size: u64, + /// Interval to poll the chain for updates. + pub chain_poll_interval: Duration, + /// Number of times to retry a block sync at the `chain_poll_interval` before abandoning + pub chain_max_sync_retries: u64, /// Pool configurations. pub pool_configs: Vec, /// Address to bind the remote mempool server to, if any. @@ -67,22 +70,26 @@ pub struct PoolTask { #[async_trait] impl Task for PoolTask { async fn run(mut self: Box, shutdown_token: CancellationToken) -> anyhow::Result<()> { - let chain_id = self.args.chain_id; + let chain_id = self.args.chain_spec.id; tracing::info!("Chain id: {chain_id}"); tracing::info!("Http url: {:?}", self.args.http_url); // create chain let chain_settings = chain::Settings { - history_size: self.args.chain_history_size, - poll_interval: self.args.http_poll_interval, + history_size: self.args.chain_spec.chain_history_size, + poll_interval: self.args.chain_poll_interval, + max_sync_retries: self.args.chain_max_sync_retries, entry_point_addresses: self .args .pool_configs .iter() - .map(|config| config.entry_point) + .map(|config| (config.entry_point, config.entry_point_version)) .collect(), }; - let provider = eth::new_provider(&self.args.http_url, Some(self.args.http_poll_interval))?; + let provider = rundler_provider::new_provider( + &self.args.http_url, + Some(self.args.chain_poll_interval), + )?; let chain = Chain::new(provider.clone(), chain_settings); let (update_sender, _) = broadcast::channel(self.args.chain_update_channel_capacity); let chain_handle = chain.spawn_watcher(update_sender.clone(), shutdown_token.clone()); @@ -90,12 +97,35 @@ impl Task for PoolTask { // create mempools let mut mempools = HashMap::new(); for pool_config in &self.args.pool_configs { - let pool = - PoolTask::create_mempool(pool_config, self.event_sender.clone(), provider.clone()) - .await + match pool_config.entry_point_version { + EntryPointVersion::V0_6 => { + let pool = PoolTask::create_mempool_v0_6( + self.args.chain_spec.clone(), + pool_config, + self.args.unsafe_mode, + self.event_sender.clone(), + provider.clone(), + ) + .context("should have created mempool")?; + + mempools.insert(pool_config.entry_point, pool); + } + EntryPointVersion::V0_7 => { + let pool = PoolTask::create_mempool_v0_7( + self.args.chain_spec.clone(), + pool_config, + self.args.unsafe_mode, + self.event_sender.clone(), + provider.clone(), + ) .context("should have created mempool")?; - mempools.insert(pool_config.entry_point, Arc::new(pool)); + mempools.insert(pool_config.entry_point, pool); + } + EntryPointVersion::Unspecified => { + bail!("Unsupported entry point version"); + } + } } let pool_handle = self.pool_builder.get_handle(); @@ -105,8 +135,13 @@ impl Task for PoolTask { let remote_handle = match self.args.remote_address { Some(addr) => { - spawn_remote_mempool_server(self.args.chain_id, pool_handle, addr, shutdown_token) - .await? + spawn_remote_mempool_server( + self.args.chain_spec.clone(), + pool_handle, + addr, + shutdown_token, + ) + .await? } None => tokio::spawn(async { Ok(()) }), }; @@ -149,56 +184,149 @@ impl PoolTask { Box::new(self) } - async fn create_mempool( + fn create_mempool_v0_6( + chain_spec: ChainSpec, pool_config: &PoolConfig, + unsafe_mode: bool, event_sender: broadcast::Sender>, provider: Arc

, - ) -> anyhow::Result< - UoPool< - HourlyMovingAverageReputation, - impl Prechecker, - impl Simulator, - impl EntryPoint, - impl PaymasterHelper, - >, - > { - // Reputation manager - let reputation = Arc::new(HourlyMovingAverageReputation::new( - ReputationParams::bundler_default(), - pool_config.blocklist.clone(), - pool_config.allowlist.clone(), - )); - // Start reputation manager - let reputation_runner = Arc::clone(&reputation); - tokio::spawn(async move { reputation_runner.run().await }); + ) -> anyhow::Result> { + let ep = EthersEntryPointV0_6::new( + pool_config.entry_point, + &chain_spec, + pool_config.sim_settings.max_simulate_handle_ops_gas, + Arc::clone(&provider), + ); + + if unsafe_mode { + let simulator = UnsafeSimulator::new( + Arc::clone(&provider), + ep.clone(), + pool_config.sim_settings.clone(), + ); + Self::create_mempool( + chain_spec, + pool_config, + event_sender, + provider, + ep, + simulator, + ) + } else { + let simulator = simulation::new_v0_6_simulator( + Arc::clone(&provider), + ep.clone(), + pool_config.sim_settings.clone(), + pool_config.mempool_channel_configs.clone(), + ); + Self::create_mempool( + chain_spec, + pool_config, + event_sender, + provider, + ep, + simulator, + ) + } + } + + fn create_mempool_v0_7( + chain_spec: ChainSpec, + pool_config: &PoolConfig, + unsafe_mode: bool, + event_sender: broadcast::Sender>, + provider: Arc

, + ) -> anyhow::Result> { + let ep = EthersEntryPointV0_7::new( + pool_config.entry_point, + &chain_spec, + pool_config.sim_settings.max_simulate_handle_ops_gas, + Arc::clone(&provider), + ); - let i_entry_point = IEntryPoint::new(pool_config.entry_point, Arc::clone(&provider)); - let paymaster_helper = - PaymasterHelperContract::new(pool_config.entry_point, Arc::clone(&provider)); + if unsafe_mode { + let simulator = UnsafeSimulator::new( + Arc::clone(&provider), + ep.clone(), + pool_config.sim_settings.clone(), + ); + Self::create_mempool( + chain_spec, + pool_config, + event_sender, + provider, + ep, + simulator, + ) + } else { + let simulator = simulation::new_v0_7_simulator( + Arc::clone(&provider), + ep.clone(), + pool_config.sim_settings.clone(), + pool_config.mempool_channel_configs.clone(), + ); + Self::create_mempool( + chain_spec, + pool_config, + event_sender, + provider, + ep, + simulator, + ) + } + } - let simulate_validation_tracer = - SimulateValidationTracerImpl::new(Arc::clone(&provider), i_entry_point.clone()); + fn create_mempool( + chain_spec: ChainSpec, + pool_config: &PoolConfig, + event_sender: broadcast::Sender>, + provider: Arc

, + ep: E, + simulator: S, + ) -> anyhow::Result> + where + UO: UserOperation + From + Into, + UserOperationVariant: From, + P: Provider, + E: EntryPointProvider + Clone, + S: Simulator, + { let prechecker = PrecheckerImpl::new( + chain_spec, Arc::clone(&provider), - i_entry_point.clone(), + ep.clone(), pool_config.precheck_settings, ); - let simulator = SimulatorImpl::new( - Arc::clone(&provider), - i_entry_point.address(), - simulate_validation_tracer, - pool_config.sim_settings, - pool_config.mempool_channel_configs.clone(), + + let reputation = Arc::new(AddressReputation::new( + ReputationParams::new(pool_config.reputation_tracking_enabled), + pool_config.blocklist.clone().unwrap_or_default(), + pool_config.allowlist.clone().unwrap_or_default(), + )); + + // Start reputation manager + let reputation_runner = Arc::clone(&reputation); + tokio::spawn(async move { reputation_runner.run().await }); + + let paymaster = PaymasterTracker::new( + ep.clone(), + PaymasterConfig::new( + pool_config.sim_settings.min_stake_value, + pool_config.sim_settings.min_unstake_delay, + pool_config.paymaster_tracking_enabled, + pool_config.paymaster_cache_length, + ), ); - Ok(UoPool::new( + let uo_pool = UoPool::new( pool_config.clone(), - Arc::clone(&reputation), event_sender, prechecker, simulator, - i_entry_point, - paymaster_helper, - )) + paymaster, + reputation, + ); + + Ok(Arc::new(uo_pool)) } } diff --git a/crates/provider/Cargo.toml b/crates/provider/Cargo.toml index d4d40c9e..26394568 100644 --- a/crates/provider/Cargo.toml +++ b/crates/provider/Cargo.toml @@ -12,12 +12,20 @@ rundler-utils = { path = "../utils" } anyhow.workspace = true async-trait.workspace = true +auto_impl = "1.2.0" ethers.workspace = true +metrics.workspace = true +reqwest.workspace = true serde.workspace = true tokio.workspace = true thiserror.workspace = true +tracing.workspace = true +parse-display.workspace = true mockall = {workspace = true, optional = true } [features] test-utils = [ "mockall" ] + +[dev-dependencies] +rundler-provider = { path = ".", features = ["test-utils"] } diff --git a/crates/provider/src/ethers/entry_point.rs b/crates/provider/src/ethers/entry_point.rs deleted file mode 100644 index a5ea47b2..00000000 --- a/crates/provider/src/ethers/entry_point.rs +++ /dev/null @@ -1,211 +0,0 @@ -// This file is part of Rundler. -// -// Rundler is free software: you can redistribute it and/or modify it under the -// terms of the GNU Lesser General Public License as published by the Free Software -// Foundation, either version 3 of the License, or (at your option) any later version. -// -// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; -// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -// See the GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License along with Rundler. -// If not, see https://www.gnu.org/licenses/. - -use std::{ops::Deref, sync::Arc}; - -use anyhow::Context; -use ethers::{ - abi::AbiDecode, - contract::{ContractError, FunctionCall}, - providers::{spoof, Middleware, RawCall}, - types::{ - transaction::eip2718::TypedTransaction, Address, BlockId, Bytes, Eip1559TransactionRequest, - H256, U256, - }, -}; - -use rundler_types::hybrid_compute; - - -use rundler_types::{ - contracts::{ - i_entry_point::{ExecutionResult, FailedOp, IEntryPoint, SignatureValidationFailed}, - shared_types::UserOpsPerAggregator, - }, - GasFees, UserOperation, -}; -use rundler_utils::eth::{self, ContractRevertError}; - -use crate::traits::{EntryPoint, HandleOpsOut}; - -#[async_trait::async_trait] -impl EntryPoint for IEntryPoint -where - M: Middleware + 'static, -{ - fn address(&self) -> Address { - self.deref().address() - } - - async fn simulate_validation( - &self, - user_op: UserOperation, - max_validation_gas: u64, - ) -> anyhow::Result { - //let pvg = user_op.pre_verification_gas; - - let gas_price = user_op.max_fee_per_gas; - let mut tx = self - .simulate_validation(user_op) - .gas(U256::from(max_validation_gas)) - .tx; - let from_addr = hybrid_compute::HC_CONFIG.lock().unwrap().from_addr; - tx.set_from(from_addr); - tx.set_gas_price(gas_price); - //println!("HC entry_point.rs s_v {:?} {:?} {:?} {:?} gas_price", max_validation_gas, pvg, tx, gas_price); - - Ok(tx) - } - - - async fn call_handle_ops( - &self, - ops_per_aggregator: Vec, - beneficiary: Address, - gas: U256, - ) -> anyhow::Result { - - println!("HC entry_point call_handle_ops 1, len {:?} gas {:?}", ops_per_aggregator[0].user_ops.len(), gas); - - let result = get_handle_ops_call(self, ops_per_aggregator.clone(), beneficiary, gas) - .call() - .await; - println!("HC entry_point call_handle_ops 2 result{:?}", result); - let error = match result { - Ok(()) => return Ok(HandleOpsOut::Success), - Err(error) => error, - }; - if let ContractError::Revert(revert_data) = &error { - if let Ok(FailedOp { op_index, reason }) = FailedOp::decode(revert_data) { - match &reason[..4] { - "AA95" => anyhow::bail!("Handle ops called with insufficient gas; {:?}", gas), - _ => { - println!("HC AA95 at index {:?}", op_index); - return Ok(HandleOpsOut::FailedOp(op_index.as_usize(), reason)); - }, - } - } - if let Ok(failure) = SignatureValidationFailed::decode(revert_data) { - return Ok(HandleOpsOut::SignatureValidationFailed(failure.aggregator)); - } - // Special handling for a bug in the 0.6 entry point contract to detect the bug where - // the `returndatacopy` opcode reverts due to a postOp revert and the revert data is too short. - // See https://github.com/eth-infinitism/account-abstraction/pull/325 for more details. - // NOTE: this error message is copied directly from Geth and assumes it will not change. - if error.to_string().contains("return data out of bounds") { - return Ok(HandleOpsOut::PostOpRevert); - } - } - Err(error)? - } - - async fn balance_of( - &self, - address: Address, - block_id: Option, - ) -> anyhow::Result { - block_id - .map_or(self.balance_of(address), |bid| { - self.balance_of(address).block(bid) - }) - .call() - .await - .context("entry point should return balance") - } - - async fn call_spoofed_simulate_op( - &self, - op: UserOperation, - target: Address, - target_call_data: Bytes, - block_hash: H256, - gas: U256, - spoofed_state: &spoof::State, - ) -> anyhow::Result> { - //println!("HC entry_point call_spoofed_simOp op {:?} {:?}", op.sender, op.nonce); - - let contract_error = self - .simulate_handle_op(op, target, target_call_data) - .block(block_hash) - .gas(gas) - .call_raw() - .state(spoofed_state) - .await - .err() - .context("simulateHandleOp succeeded, but should always revert")?; - let revert_data = eth::get_revert_bytes(contract_error) - .context("simulateHandleOps should return revert data")?; -// println!("HC entry_point call_spoofed_simOp revertData {:?}", revert_data); - return Ok(self.decode_simulate_handle_ops_revert(revert_data)); - } - - fn get_send_bundle_transaction( - &self, - ops_per_aggregator: Vec, - beneficiary: Address, - gas: U256, - gas_fees: GasFees, - ) -> TypedTransaction { - - println!("HC starting get_send_bundle_transaction, len {} gas {:?} maxfees {:?}", ops_per_aggregator[0].user_ops.len(), gas, gas_fees); - - let tx: Eip1559TransactionRequest = - get_handle_ops_call(self, ops_per_aggregator, beneficiary, gas) - .tx - .into(); - tx.max_fee_per_gas(gas_fees.max_fee_per_gas) - .max_priority_fee_per_gas(gas_fees.max_priority_fee_per_gas) - .into() - } - - fn decode_simulate_handle_ops_revert( - &self, - revert_data: Bytes, - ) -> Result { - if let Ok(result) = ExecutionResult::decode(&revert_data) { - //println!("HC decodeSHO OK_result {:?}", result); - Ok(result) - } else if let Ok(failed_op) = FailedOp::decode(&revert_data) { - //println!("HC decodeSHO failedOp {:?}", failed_op.reason); - Err(failed_op.reason) - } else if let Ok(err) = ContractRevertError::decode(&revert_data) { - println!("HC decodeSHO errReason {:?}", err.reason); - Err(err.reason) - } else { - println!("HC decodeSHO errGeneric"); - Err(String::new()) - } - } - - async fn get_nonce(&self, address: Address, key: ::ethers::core::types::U256) -> Result<::ethers::core::types::U256, String> { - let ret = IEntryPoint::get_nonce(self, address, key).await; - Ok(ret.unwrap()) - } -} - -fn get_handle_ops_call( - entry_point: &IEntryPoint, - mut ops_per_aggregator: Vec, - beneficiary: Address, - gas: U256, -) -> FunctionCall, M, ()> { - let call = - if ops_per_aggregator.len() == 1 && ops_per_aggregator[0].aggregator == Address::zero() { - //println!("HC get_handle_ops_call will use entry_point.handle_ops"); - entry_point.handle_ops(ops_per_aggregator.swap_remove(0).user_ops, beneficiary) - } else { - //println!("HC get_handle_ops_call will use entry_point.handle_aggregated_ops"); - entry_point.handle_aggregated_ops(ops_per_aggregator, beneficiary) - }; - call.gas(gas) -} diff --git a/crates/provider/src/ethers/entry_point/mod.rs b/crates/provider/src/ethers/entry_point/mod.rs new file mode 100644 index 00000000..21ad5200 --- /dev/null +++ b/crates/provider/src/ethers/entry_point/mod.rs @@ -0,0 +1,115 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use std::sync::Arc; + +use ethers::{ + providers::Middleware, + types::{Address, Bytes, Eip1559TransactionRequest, U256, U64}, +}; +use rundler_types::{ + chain::{ChainSpec, L1GasOracleContractType}, + contracts::{ + arbitrum::node_interface::NodeInterface, optimism::gas_price_oracle::GasPriceOracle, + }, +}; + +pub(crate) mod v0_6; +pub(crate) mod v0_7; + +#[derive(Debug, Default)] +pub(crate) enum L1GasOracle

{ + ArbitrumNitro(NodeInterface

), + OptimismBedrock(GasPriceOracle

), + #[default] + None, +} + +impl

L1GasOracle

+where + P: Middleware + 'static, +{ + fn new(chain_spec: &ChainSpec, provider: Arc

) -> L1GasOracle

{ + match chain_spec.l1_gas_oracle_contract_type { + L1GasOracleContractType::ArbitrumNitro => L1GasOracle::ArbitrumNitro( + NodeInterface::new(chain_spec.l1_gas_oracle_contract_address, provider), + ), + L1GasOracleContractType::OptimismBedrock => L1GasOracle::OptimismBedrock( + GasPriceOracle::new(chain_spec.l1_gas_oracle_contract_address, provider), + ), + L1GasOracleContractType::None => L1GasOracle::None, + } + } + + async fn estimate_l1_gas( + &self, + address: Address, + data: Bytes, + gas_price: U256, + ) -> anyhow::Result { + match self { + L1GasOracle::ArbitrumNitro(arb_node) => { + estimate_arbitrum_l1_gas(arb_node, address, data).await + } + L1GasOracle::OptimismBedrock(opt_oracle) => { + estimate_optimism_l1_gas(opt_oracle, address, data, gas_price).await + } + L1GasOracle::None => Ok(U256::zero()), + } + } +} + +impl

Clone for L1GasOracle

{ + fn clone(&self) -> Self { + match self { + L1GasOracle::ArbitrumNitro(node) => L1GasOracle::ArbitrumNitro(node.clone()), + L1GasOracle::OptimismBedrock(oracle) => L1GasOracle::OptimismBedrock(oracle.clone()), + L1GasOracle::None => L1GasOracle::None, + } + } +} + +async fn estimate_arbitrum_l1_gas( + arb_node: &NodeInterface

, + address: Address, + data: Bytes, +) -> anyhow::Result { + let gas = arb_node + .gas_estimate_l1_component(address, false, data) + .call() + .await?; + Ok(U256::from(gas.0)) +} + +async fn estimate_optimism_l1_gas( + opt_oracle: &GasPriceOracle

, + address: Address, + data: Bytes, + gas_price: U256, +) -> anyhow::Result { + // construct an unsigned transaction with default values just for L1 gas estimation + let tx = Eip1559TransactionRequest::new() + .from(Address::random()) + .to(address) + .gas(U256::from(1_000_000)) + .max_priority_fee_per_gas(U256::from(100_000_000)) + .max_fee_per_gas(U256::from(100_000_000)) + .value(U256::from(0)) + .data(data) + .nonce(U256::from(100_000)) + .chain_id(U64::from(100_000)) + .rlp(); + + let l1_fee = opt_oracle.get_l1_fee(tx).call().await?; + Ok(l1_fee.checked_div(gas_price).unwrap_or(U256::MAX)) +} diff --git a/crates/provider/src/ethers/entry_point/v0_6.rs b/crates/provider/src/ethers/entry_point/v0_6.rs new file mode 100644 index 00000000..b2fed43a --- /dev/null +++ b/crates/provider/src/ethers/entry_point/v0_6.rs @@ -0,0 +1,449 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use std::sync::Arc; + +use anyhow::Context; +use ethers::{ + abi::AbiDecode, + contract::{ContractError, EthCall, FunctionCall}, + providers::{spoof, Middleware, RawCall}, + types::{ + transaction::eip2718::TypedTransaction, Address, BlockId, Bytes, Eip1559TransactionRequest, + H256, U256, + }, +}; +use rundler_types::hybrid_compute; + +use rundler_types::{ + chain::ChainSpec, + contracts::v0_6::{ + get_balances::{GetBalancesResult, GETBALANCES_BYTECODE}, + i_aggregator::IAggregator, + i_entry_point::{ + self, DepositInfo as DepositInfoV0_6, ExecutionResult as ExecutionResultV0_6, FailedOp, + IEntryPoint, SignatureValidationFailed, + UserOpsPerAggregator as UserOpsPerAggregatorV0_6, + }, + }, + v0_6::UserOperation, + GasFees, UserOpsPerAggregator, ValidationError, ValidationOutput, ValidationRevert, +}; +use rundler_utils::eth::{self, ContractRevertError}; + +use super::L1GasOracle; +use crate::{ + traits::HandleOpsOut, AggregatorOut, AggregatorSimOut, BundleHandler, DepositInfo, + EntryPoint as EntryPointTrait, EntryPointProvider, ExecutionResult, L1GasProvider, Provider, + SignatureAggregator, SimulateOpCallData, SimulationProvider, +}; + +/// Implementation of the `EntryPoint` trait for the v0.6 version of the entry point contract using ethers +#[derive(Debug)] +pub struct EntryPoint { + i_entry_point: IEntryPoint

, + provider: Arc

, + l1_gas_oracle: L1GasOracle

, + max_aggregation_gas: u64, +} + +impl

Clone for EntryPoint

+where + P: Provider + Middleware, +{ + fn clone(&self) -> Self { + Self { + i_entry_point: self.i_entry_point.clone(), + provider: self.provider.clone(), + l1_gas_oracle: self.l1_gas_oracle.clone(), + max_aggregation_gas: self.max_aggregation_gas, + } + } +} + +impl

EntryPoint

+where + P: Provider + Middleware, +{ + /// Create a new `EntryPointV0_6` instance + pub fn new( + entry_point_address: Address, + chain_spec: &ChainSpec, + max_aggregation_gas: u64, + provider: Arc

, + ) -> Self { + Self { + i_entry_point: IEntryPoint::new(entry_point_address, Arc::clone(&provider)), + provider: Arc::clone(&provider), + l1_gas_oracle: L1GasOracle::new(chain_spec, provider), + max_aggregation_gas, + } + } +} + +#[async_trait::async_trait] +impl

EntryPointTrait for EntryPoint

+where + P: Provider + Middleware + Send + Sync + 'static, +{ + fn address(&self) -> Address { + self.i_entry_point.address() + } + + async fn balance_of( + &self, + address: Address, + block_id: Option, + ) -> anyhow::Result { + block_id + .map_or(self.i_entry_point.balance_of(address), |bid| { + self.i_entry_point.balance_of(address).block(bid) + }) + .call() + .await + .context("entry point should return balance") + } + + async fn get_deposit_info(&self, address: Address) -> anyhow::Result { + Ok(self + .i_entry_point + .get_deposit_info(address) + .await + .context("should get deposit info")? + .into()) + } + + async fn get_balances(&self, addresses: Vec

) -> anyhow::Result> { + let out: GetBalancesResult = self + .provider + .call_constructor( + &GETBALANCES_BYTECODE, + (self.address(), addresses), + None, + &spoof::state(), + ) + .await + .context("should compute balances")?; + Ok(out.balances) + } +} + +#[async_trait::async_trait] +impl

SignatureAggregator for EntryPoint

+where + P: Provider + Middleware + Send + Sync + 'static, +{ + type UO = UserOperation; + + async fn aggregate_signatures( + &self, + aggregator_address: Address, + ops: Vec, + ) -> anyhow::Result> { + let aggregator = IAggregator::new(aggregator_address, Arc::clone(&self.provider)); + let result = aggregator + .aggregate_signatures(ops) + .gas(self.max_aggregation_gas) + .call() + .await; + match result { + Ok(bytes) => Ok(Some(bytes)), + Err(ContractError::Revert(_)) => Ok(None), + Err(error) => Err(error).context("aggregator contract should aggregate signatures")?, + } + } + + async fn validate_user_op_signature( + &self, + aggregator_address: Address, + user_op: UserOperation, + gas_cap: u64, + ) -> anyhow::Result { + let aggregator = IAggregator::new(aggregator_address, Arc::clone(&self.provider)); + + let result = aggregator + .validate_user_op_signature(user_op) + .gas(gas_cap) + .call() + .await; + + match result { + Ok(sig) => Ok(AggregatorOut::SuccessWithInfo(AggregatorSimOut { + address: aggregator_address, + signature: sig, + })), + Err(ContractError::Revert(_)) => Ok(AggregatorOut::ValidationReverted), + Err(error) => Err(error).context("should call aggregator to validate signature")?, + } + } +} + +#[async_trait::async_trait] +impl

BundleHandler for EntryPoint

+where + P: Provider + Middleware + Send + Sync + 'static, +{ + type UO = UserOperation; + + async fn call_handle_ops( + &self, + ops_per_aggregator: Vec>, + beneficiary: Address, + gas: U256, + ) -> anyhow::Result { + let result = get_handle_ops_call(&self.i_entry_point, ops_per_aggregator, beneficiary, gas) + .call() + .await; + let error = match result { + Ok(()) => return Ok(HandleOpsOut::Success), + Err(error) => error, + }; + if let ContractError::Revert(revert_data) = &error { + if let Ok(FailedOp { op_index, reason }) = FailedOp::decode(revert_data) { + match &reason[..4] { + "AA95" => anyhow::bail!("Handle ops called with insufficient gas"), + _ => return Ok(HandleOpsOut::FailedOp(op_index.as_usize(), reason)), + } + } + if let Ok(failure) = SignatureValidationFailed::decode(revert_data) { + return Ok(HandleOpsOut::SignatureValidationFailed(failure.aggregator)); + } + // Special handling for a bug in the 0.6 entry point contract to detect the bug where + // the `returndatacopy` opcode reverts due to a postOp revert and the revert data is too short. + // See https://github.com/eth-infinitism/account-abstraction/pull/325 for more details. + // NOTE: this error message is copied directly from Geth and assumes it will not change. + if error.to_string().contains("return data out of bounds") { + return Ok(HandleOpsOut::PostOpRevert); + } + } + Err(error)? + } + + fn get_send_bundle_transaction( + &self, + ops_per_aggregator: Vec>, + beneficiary: Address, + gas: U256, + gas_fees: GasFees, + ) -> TypedTransaction { + let tx: Eip1559TransactionRequest = + get_handle_ops_call(&self.i_entry_point, ops_per_aggregator, beneficiary, gas) + .tx + .into(); + tx.max_fee_per_gas(gas_fees.max_fee_per_gas) + .max_priority_fee_per_gas(gas_fees.max_priority_fee_per_gas) + .into() + } +} + +#[async_trait::async_trait] +impl

L1GasProvider for EntryPoint

+where + P: Provider + Middleware + Send + Sync + 'static, +{ + type UO = UserOperation; + + async fn calc_l1_gas( + &self, + entry_point_address: Address, + user_op: UserOperation, + gas_price: U256, + ) -> anyhow::Result { + let data = self + .i_entry_point + .handle_ops(vec![user_op], Address::random()) + .calldata() + .context("should get calldata for entry point handle ops")?; + + self.l1_gas_oracle + .estimate_l1_gas(entry_point_address, data, gas_price) + .await + } +} + +#[async_trait::async_trait] +impl

SimulationProvider for EntryPoint

+where + P: Provider + Middleware + Send + Sync + 'static, +{ + type UO = UserOperation; + + fn get_tracer_simulate_validation_call( + &self, + user_op: UserOperation, + max_validation_gas: u64, + ) -> (TypedTransaction, spoof::State) { + let pvg = user_op.pre_verification_gas; // FIXME HC + let gas_price = user_op.max_fee_per_gas; + let mut call = self + .i_entry_point + .simulate_validation(user_op) + .gas(U256::from(max_validation_gas) + pvg) // FIXME HC + .tx; + let from_addr = hybrid_compute::HC_CONFIG.lock().unwrap().from_addr; + call.set_from(from_addr); + call.set_gas_price(gas_price); + //println!("HC entry_point.rs s_v {:?} {:?} {:?} {:?} gas_price", max_validation_gas, pvg, tx, gas_price); + (call, spoof::State::default()) + } + + async fn call_simulate_validation( + &self, + user_op: UserOperation, + max_validation_gas: u64, + block_hash: Option, + ) -> Result { + let pvg = user_op.pre_verification_gas; + let blockless = self + .i_entry_point + .simulate_validation(user_op) + .gas(U256::from(max_validation_gas) + pvg); + let call = match block_hash { + Some(block_hash) => blockless.block(block_hash), + None => blockless, + }; + + match call.call().await { + Ok(()) => Err(anyhow::anyhow!("simulateValidation should always revert"))?, + Err(ContractError::Revert(revert_data)) => { + if let Ok(result) = ValidationOutput::decode_v0_6(&revert_data) { + Ok(result) + } else if let Ok(failed_op) = FailedOp::decode(&revert_data) { + Err(ValidationRevert::from(failed_op))? + } else if let Ok(err) = ContractRevertError::decode(&revert_data) { + Err(ValidationRevert::from(err))? + } else { + Err(ValidationRevert::Unknown(revert_data))? + } + } + Err(error) => Err(error).context("call simulation RPC failed")?, + } + } + + fn decode_simulate_handle_ops_revert( + &self, + revert_data: Bytes, + ) -> Result { + if let Ok(result) = ExecutionResultV0_6::decode(&revert_data) { + Ok(result.into()) + } else if let Ok(failed_op) = FailedOp::decode(&revert_data) { + Err(ValidationRevert::EntryPoint(failed_op.reason)) + } else if let Ok(err) = ContractRevertError::decode(&revert_data) { + Err(ValidationRevert::EntryPoint(err.reason)) + } else { + Err(ValidationRevert::Unknown(revert_data)) + } + } + + fn get_simulate_op_call_data( + &self, + op: UserOperation, + spoofed_state: &spoof::State, + ) -> SimulateOpCallData { + let call_data = eth::call_data_of( + i_entry_point::SimulateHandleOpCall::selector(), + (op.clone(), Address::zero(), Bytes::new()), + ); + SimulateOpCallData { + call_data, + spoofed_state: spoofed_state.clone(), + } + } + + async fn call_spoofed_simulate_op( + &self, + user_op: UserOperation, + target: Address, + target_call_data: Bytes, + block_hash: H256, + gas: U256, + spoofed_state: &spoof::State, + ) -> anyhow::Result> { + let contract_error = self + .i_entry_point + .simulate_handle_op(user_op, target, target_call_data) + .block(block_hash) + .gas(gas) + .call_raw() + .state(spoofed_state) + .await + .err() + .context("simulateHandleOp succeeded, but should always revert")?; + let revert_data = eth::get_revert_bytes(contract_error) + .context("simulateHandleOps should return revert data")?; + return Ok(self.decode_simulate_handle_ops_revert(revert_data)); + } + + fn simulation_should_revert(&self) -> bool { + true + } + + async fn get_nonce(&self, address: Address, key: ::ethers::core::types::U256) -> Result<::ethers::core::types::U256, String> { + let ret = self.i_entry_point.get_nonce(address, key).await; + Ok(ret.unwrap()) + } + +} + +impl

EntryPointProvider for EntryPoint

where + P: Provider + Middleware + Send + Sync + 'static +{ +} + +fn get_handle_ops_call( + entry_point: &IEntryPoint, + ops_per_aggregator: Vec>, + beneficiary: Address, + gas: U256, +) -> FunctionCall, M, ()> { + let mut ops_per_aggregator: Vec = ops_per_aggregator + .into_iter() + .map(|uoa| UserOpsPerAggregatorV0_6 { + user_ops: uoa.user_ops, + aggregator: uoa.aggregator, + signature: uoa.signature, + }) + .collect(); + let call = + if ops_per_aggregator.len() == 1 && ops_per_aggregator[0].aggregator == Address::zero() { + entry_point.handle_ops(ops_per_aggregator.swap_remove(0).user_ops, beneficiary) + } else { + entry_point.handle_aggregated_ops(ops_per_aggregator, beneficiary) + }; + call.gas(gas) +} + +impl From for ExecutionResult { + fn from(result: ExecutionResultV0_6) -> Self { + ExecutionResult { + pre_op_gas: result.pre_op_gas, + paid: result.paid, + valid_after: result.valid_after.into(), + valid_until: result.valid_until.into(), + target_success: result.target_success, + target_result: result.target_result, + } + } +} + +impl From for DepositInfo { + fn from(info: DepositInfoV0_6) -> Self { + DepositInfo { + deposit: info.deposit.into(), + staked: info.staked, + stake: info.stake, + unstake_delay_sec: info.unstake_delay_sec, + withdraw_time: info.withdraw_time, + } + } +} diff --git a/crates/provider/src/ethers/entry_point/v0_7.rs b/crates/provider/src/ethers/entry_point/v0_7.rs new file mode 100644 index 00000000..074ad1fc --- /dev/null +++ b/crates/provider/src/ethers/entry_point/v0_7.rs @@ -0,0 +1,488 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use std::sync::Arc; + +use anyhow::Context; +use ethers::{ + abi::AbiDecode, + contract::{ContractError, EthCall, FunctionCall}, + providers::{Middleware, RawCall}, + types::{ + spoof, transaction::eip2718::TypedTransaction, Address, BlockId, Bytes, + Eip1559TransactionRequest, H256, U256, + }, +}; +use rundler_types::{ + chain::ChainSpec, + contracts::v0_7::{ + entry_point_simulations::{ + self, EntryPointSimulations, ExecutionResult as ExecutionResultV0_7, FailedOp, + FailedOpWithRevert, ENTRYPOINTSIMULATIONS_DEPLOYED_BYTECODE, + }, + get_balances::{GetBalancesResult, GETBALANCES_BYTECODE}, + i_aggregator::IAggregator, + i_entry_point::{ + DepositInfo as DepositInfoV0_7, IEntryPoint, SignatureValidationFailed, + UserOpsPerAggregator as UserOpsPerAggregatorV0_7, + }, + }, + v0_7::UserOperation, + GasFees, UserOpsPerAggregator, ValidationError, ValidationOutput, ValidationRevert, +}; +use rundler_utils::eth::{self, ContractRevertError}; + +use super::L1GasOracle; +use crate::{ + AggregatorOut, AggregatorSimOut, BundleHandler, DepositInfo, EntryPoint as EntryPointTrait, + EntryPointProvider, ExecutionResult, HandleOpsOut, L1GasProvider, Provider, + SignatureAggregator, SimulateOpCallData, SimulationProvider, +}; + +/// Entry point for the v0.7 contract. +#[derive(Debug)] +pub struct EntryPoint

{ + i_entry_point: IEntryPoint

, + provider: Arc

, + l1_gas_oracle: L1GasOracle

, + max_aggregation_gas: u64, +} + +impl

EntryPoint

+where + P: Middleware + 'static, +{ + /// Create a new `EntryPoint` instance for v0.7 + pub fn new( + entry_point_address: Address, + chain_spec: &ChainSpec, + max_aggregation_gas: u64, + provider: Arc

, + ) -> Self { + Self { + i_entry_point: IEntryPoint::new(entry_point_address, Arc::clone(&provider)), + provider: Arc::clone(&provider), + l1_gas_oracle: L1GasOracle::new(chain_spec, provider), + max_aggregation_gas, + } + } +} + +impl

Clone for EntryPoint

+where + P: Provider + Middleware, +{ + fn clone(&self) -> Self { + Self { + i_entry_point: self.i_entry_point.clone(), + provider: self.provider.clone(), + l1_gas_oracle: self.l1_gas_oracle.clone(), + max_aggregation_gas: self.max_aggregation_gas, + } + } +} + +#[async_trait::async_trait] +impl

EntryPointTrait for EntryPoint

+where + P: Provider + Middleware + Send + Sync + 'static, +{ + fn address(&self) -> Address { + self.i_entry_point.address() + } + + async fn balance_of( + &self, + address: Address, + block_id: Option, + ) -> anyhow::Result { + block_id + .map_or(self.i_entry_point.balance_of(address), |bid| { + self.i_entry_point.balance_of(address).block(bid) + }) + .call() + .await + .context("entry point should return balance") + } + + async fn get_deposit_info(&self, address: Address) -> anyhow::Result { + Ok(self + .i_entry_point + .get_deposit_info(address) + .await + .context("should get deposit info")? + .into()) + } + + async fn get_balances(&self, addresses: Vec

) -> anyhow::Result> { + let out: GetBalancesResult = self + .provider + .call_constructor( + &GETBALANCES_BYTECODE, + (self.address(), addresses), + None, + &spoof::state(), + ) + .await + .context("should compute balances")?; + Ok(out.balances) + } +} + +#[async_trait::async_trait] +impl

SignatureAggregator for EntryPoint

+where + P: Provider + Middleware + Send + Sync + 'static, +{ + type UO = UserOperation; + + async fn aggregate_signatures( + &self, + aggregator_address: Address, + ops: Vec, + ) -> anyhow::Result> { + let aggregator = IAggregator::new(aggregator_address, Arc::clone(&self.provider)); + + // pack the ops + let packed_ops = ops.into_iter().map(|op| op.pack()).collect(); + + let result = aggregator + .aggregate_signatures(packed_ops) + .gas(self.max_aggregation_gas) + .call() + .await; + match result { + Ok(bytes) => Ok(Some(bytes)), + Err(ContractError::Revert(_)) => Ok(None), + Err(error) => Err(error).context("aggregator contract should aggregate signatures")?, + } + } + + async fn validate_user_op_signature( + &self, + aggregator_address: Address, + user_op: UserOperation, + gas_cap: u64, + ) -> anyhow::Result { + let aggregator = IAggregator::new(aggregator_address, Arc::clone(&self.provider)); + + let result = aggregator + .validate_user_op_signature(user_op.pack()) + .gas(gas_cap) + .call() + .await; + + match result { + Ok(sig) => Ok(AggregatorOut::SuccessWithInfo(AggregatorSimOut { + address: aggregator_address, + signature: sig, + })), + Err(ContractError::Revert(_)) => Ok(AggregatorOut::ValidationReverted), + Err(error) => Err(error).context("should call aggregator to validate signature")?, + } + } +} + +#[async_trait::async_trait] +impl

BundleHandler for EntryPoint

+where + P: Provider + Middleware + Send + Sync + 'static, +{ + type UO = UserOperation; + + async fn call_handle_ops( + &self, + ops_per_aggregator: Vec>, + beneficiary: Address, + gas: U256, + ) -> anyhow::Result { + let result = get_handle_ops_call(&self.i_entry_point, ops_per_aggregator, beneficiary, gas) + .call() + .await; + let error = match result { + Ok(()) => return Ok(HandleOpsOut::Success), + Err(error) => error, + }; + if let ContractError::Revert(revert_data) = &error { + if let Ok(FailedOp { op_index, reason }) = FailedOp::decode(revert_data) { + match &reason[..4] { + // This revert is a bundler issue, not a user op issue, handle it differently + "AA95" => anyhow::bail!("Handle ops called with insufficient gas"), + _ => return Ok(HandleOpsOut::FailedOp(op_index.as_usize(), reason)), + } + } + if let Ok(failure) = SignatureValidationFailed::decode(revert_data) { + return Ok(HandleOpsOut::SignatureValidationFailed(failure.aggregator)); + } + } + Err(error)? + } + + fn get_send_bundle_transaction( + &self, + ops_per_aggregator: Vec>, + beneficiary: Address, + gas: U256, + gas_fees: GasFees, + ) -> TypedTransaction { + let tx: Eip1559TransactionRequest = + get_handle_ops_call(&self.i_entry_point, ops_per_aggregator, beneficiary, gas) + .tx + .into(); + tx.max_fee_per_gas(gas_fees.max_fee_per_gas) + .max_priority_fee_per_gas(gas_fees.max_priority_fee_per_gas) + .into() + } +} + +#[async_trait::async_trait] +impl

L1GasProvider for EntryPoint

+where + P: Provider + Middleware + Send + Sync + 'static, +{ + type UO = UserOperation; + + async fn calc_l1_gas( + &self, + entry_point_address: Address, + user_op: UserOperation, + gas_price: U256, + ) -> anyhow::Result { + let data = self + .i_entry_point + .handle_ops(vec![user_op.pack()], Address::random()) + .calldata() + .context("should get calldata for entry point handle ops")?; + + self.l1_gas_oracle + .estimate_l1_gas(entry_point_address, data, gas_price) + .await + } +} + +#[async_trait::async_trait] +impl

SimulationProvider for EntryPoint

+where + P: Provider + Middleware + Send + Sync + 'static, +{ + type UO = UserOperation; + + fn get_tracer_simulate_validation_call( + &self, + user_op: UserOperation, + max_validation_gas: u64, + ) -> (TypedTransaction, spoof::State) { + let addr = self.i_entry_point.address(); + let pvg = user_op.pre_verification_gas; + let mut spoof_ep = spoof::State::default(); + spoof_ep + .account(addr) + .code(ENTRYPOINTSIMULATIONS_DEPLOYED_BYTECODE.clone()); + let ep_simulations = EntryPointSimulations::new(addr, Arc::clone(&self.provider)); + + let call = ep_simulations + .simulate_validation(user_op.pack()) + .gas(U256::from(max_validation_gas) + pvg) + .tx; + + (call, spoof_ep) + } + + async fn call_simulate_validation( + &self, + user_op: UserOperation, + max_validation_gas: u64, + block_hash: Option, + ) -> Result { + let addr = self.i_entry_point.address(); + let pvg = user_op.pre_verification_gas; + let mut spoof_ep = spoof::State::default(); + spoof_ep + .account(addr) + .code(ENTRYPOINTSIMULATIONS_DEPLOYED_BYTECODE.clone()); + + let ep_simulations = EntryPointSimulations::new(addr, Arc::clone(&self.provider)); + let blockless = ep_simulations + .simulate_validation(user_op.clone().pack()) + .gas(U256::from(max_validation_gas) + pvg); + let call = match block_hash { + Some(block_hash) => blockless.block(block_hash), + None => blockless, + }; + + match call.call_raw().state(&spoof_ep).await { + Ok(output) => Ok(output.into()), + Err(ContractError::Revert(revert_data)) => { + Err(decode_simulate_validation_revert(revert_data))? + } + Err(error) => Err(error).context("call simulation RPC failed")?, + } + } + + // Always returns `Err(ValidationRevert)`. The v0.7 entry point does not use + // reverts to indicate successful simulations. + fn decode_simulate_handle_ops_revert( + &self, + revert_data: Bytes, + ) -> Result { + Err(decode_simulate_validation_revert(revert_data)) + } + + fn get_simulate_op_call_data( + &self, + op: UserOperation, + spoofed_state: &spoof::State, + ) -> SimulateOpCallData { + let call_data = eth::call_data_of( + entry_point_simulations::SimulateHandleOpCall::selector(), + (op.pack(), Address::zero(), Bytes::new()), + ); + SimulateOpCallData { + call_data, + spoofed_state: self.get_simulate_op_spoofed_state(spoofed_state), + } + } + + // NOTE: A spoof of the entry point code will be ignored by this function. + async fn call_spoofed_simulate_op( + &self, + user_op: UserOperation, + target: Address, + target_call_data: Bytes, + block_hash: H256, + gas: U256, + spoofed_state: &spoof::State, + ) -> anyhow::Result> { + let addr = self.i_entry_point.address(); + let spoofed_state = &self.get_simulate_op_spoofed_state(spoofed_state); + let ep_simulations = EntryPointSimulations::new(addr, Arc::clone(&self.provider)); + + let contract_error = ep_simulations + .simulate_handle_op(user_op.pack(), target, target_call_data) + .block(block_hash) + .gas(gas) + .call_raw() + .state(spoofed_state) + .await; + Ok(match contract_error { + Ok(execution_result) => Ok(execution_result.into()), + Err(contract_error) => { + let revert_data = eth::get_revert_bytes(contract_error) + .context("simulateHandleOps should return revert data")?; + self.decode_simulate_handle_ops_revert(revert_data) + } + }) + } + + fn simulation_should_revert(&self) -> bool { + false + } + + async fn get_nonce(&self, address: Address, key: ::ethers::core::types::U256) -> Result<::ethers::core::types::U256, String> { + let ret = self.i_entry_point.get_nonce(address, key).await; + Ok(ret.unwrap()) + } +} + +// Private helper functions for `SimulationProvider`. +impl

EntryPoint

+where + P: Provider + Middleware + Send + Sync + 'static, +{ + fn get_simulate_op_spoofed_state(&self, base_state: &spoof::State) -> spoof::State { + let mut state_overrides = base_state.clone(); + let entry_point_overrides = state_overrides.account(self.address()); + // Do nothing if the caller has already overridden the entry point code. + // We'll trust they know what they're doing and not replace their code. + // This is needed for call gas estimation, where the entry point is + // replaced with a proxy and the simulations bytecode is elsewhere. + if entry_point_overrides.code.is_none() { + state_overrides + .account(self.address()) + .code(ENTRYPOINTSIMULATIONS_DEPLOYED_BYTECODE.clone()); + } + state_overrides + } +} + +impl

EntryPointProvider for EntryPoint

where + P: Provider + Middleware + Send + Sync + 'static +{ +} + +// Parses the revert data into a structured error +fn decode_simulate_validation_revert(revert_data: Bytes) -> ValidationRevert { + if let Ok(failed_op_with_revert) = FailedOpWithRevert::decode(&revert_data) { + failed_op_with_revert.into() + } else if let Ok(failed_op) = FailedOp::decode(&revert_data) { + failed_op.into() + } else if let Ok(err) = ContractRevertError::decode(&revert_data) { + err.into() + } else { + ValidationRevert::Unknown(revert_data) + } +} + +fn get_handle_ops_call( + entry_point: &IEntryPoint, + ops_per_aggregator: Vec>, + beneficiary: Address, + gas: U256, +) -> FunctionCall, M, ()> { + let mut ops_per_aggregator: Vec = ops_per_aggregator + .into_iter() + .map(|uoa| UserOpsPerAggregatorV0_7 { + user_ops: uoa.user_ops.into_iter().map(|op| op.pack()).collect(), + aggregator: uoa.aggregator, + signature: uoa.signature, + }) + .collect(); + let call = + if ops_per_aggregator.len() == 1 && ops_per_aggregator[0].aggregator == Address::zero() { + entry_point.handle_ops(ops_per_aggregator.swap_remove(0).user_ops, beneficiary) + } else { + entry_point.handle_aggregated_ops(ops_per_aggregator, beneficiary) + }; + call.gas(gas) +} + +impl From for ExecutionResult { + fn from(result: ExecutionResultV0_7) -> Self { + let account = rundler_types::parse_validation_data(result.account_validation_data); + let paymaster = rundler_types::parse_validation_data(result.paymaster_validation_data); + let intersect_range = account + .valid_time_range() + .intersect(paymaster.valid_time_range()); + + ExecutionResult { + pre_op_gas: result.pre_op_gas, + paid: result.paid, + valid_after: intersect_range.valid_after, + valid_until: intersect_range.valid_until, + target_success: result.target_success, + target_result: result.target_result, + } + } +} + +impl From for DepositInfo { + fn from(deposit_info: DepositInfoV0_7) -> Self { + Self { + deposit: deposit_info.deposit, + staked: deposit_info.staked, + stake: deposit_info.stake, + unstake_delay_sec: deposit_info.unstake_delay_sec, + withdraw_time: deposit_info.withdraw_time, + } + } +} diff --git a/crates/provider/src/ethers/metrics_middleware.rs b/crates/provider/src/ethers/metrics_middleware.rs new file mode 100644 index 00000000..00c3eefd --- /dev/null +++ b/crates/provider/src/ethers/metrics_middleware.rs @@ -0,0 +1,137 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use core::fmt::Debug; +use std::time::Duration; + +use async_trait::async_trait; +use ethers::providers::{HttpClientError, JsonRpcClient}; +use metrics::{counter, histogram}; +use parse_display::Display; +use reqwest::StatusCode; +use serde::{de::DeserializeOwned, Serialize}; +use tokio::time::Instant; + +#[derive(Display)] +#[display(style = "snake_case")] +enum RpcCode { + ServerError, + InternalError, + InvalidParams, + MethodNotFound, + InvalidRequest, + ParseError, + ExecutionFailed, + Success, + Other, +} + +#[derive(Display)] +#[display(style = "snake_case")] +enum HttpCode { + TwoHundreds, + FourHundreds, + FiveHundreds, + Other, +} + +#[derive(Debug)] +/// Metrics middleware struct to hold the inner http client +pub struct MetricsMiddleware { + inner: C, +} + +impl MetricsMiddleware +where + C: JsonRpcClient, +{ + /// Constructor for middleware + pub fn new(inner: C) -> Self { + Self { inner } + } + + fn instrument_request( + &self, + method: &str, + duration: Duration, + request: &Result, + ) { + let method_str = method.to_string(); + + let mut http_code = StatusCode::OK.as_u16() as u64; + let mut rpc_code = 0; + + if let Err(error) = request { + match error { + HttpClientError::ReqwestError(req_err) => { + http_code = req_err.status().unwrap_or_default().as_u16() as u64; + } + HttpClientError::JsonRpcError(rpc_err) => { + rpc_code = rpc_err.code; + } + _ => {} + } + } + + let http = match http_code { + x if (500..=599).contains(&x) => HttpCode::FiveHundreds, + x if (400..=499).contains(&x) => HttpCode::FourHundreds, + x if (200..=299).contains(&x) => HttpCode::TwoHundreds, + _ => HttpCode::Other, + }; + + let rpc = match rpc_code { + -32700 => RpcCode::ParseError, + -32000 => RpcCode::ExecutionFailed, + -32600 => RpcCode::InvalidRequest, + -32601 => RpcCode::MethodNotFound, + -32602 => RpcCode::InvalidParams, + -32603 => RpcCode::InternalError, + x if (-32099..=-32000).contains(&x) => RpcCode::ServerError, + x if x >= 0 => RpcCode::Success, + _ => RpcCode::Other, + }; + + counter!( + "internal_http_response_code", + &[("method", method_str.clone()), ("status", http.to_string())] + ) + .increment(1); + + counter!( + "internal_rpc_response_code", + &[("method", method_str.clone()), ("status", rpc.to_string())] + ) + .increment(1); + + histogram!("internal_rpc_method_response_time", "method" => method_str).record(duration); + } +} + +#[async_trait] +impl> JsonRpcClient for MetricsMiddleware { + type Error = HttpClientError; + + async fn request(&self, method: &str, params: T) -> Result + where + T: Debug + Serialize + Send + Sync, + R: DeserializeOwned + Send, + { + let start_time = Instant::now(); + let result: Result = self.inner.request(method, params).await; + let duration = start_time.elapsed(); + self.instrument_request(method, duration, &result); + + result + } +} diff --git a/crates/provider/src/ethers/mod.rs b/crates/provider/src/ethers/mod.rs index 777b5fda..ad0f669b 100644 --- a/crates/provider/src/ethers/mod.rs +++ b/crates/provider/src/ethers/mod.rs @@ -14,7 +14,6 @@ //! Provider implementations using [ethers-rs](https://github.com/gakonst/ethers-rs) mod entry_point; -mod paymaster_helper; -mod provider; -mod stake_manager; -mod nonce_manager; +pub use entry_point::{v0_6::EntryPoint as EntryPointV0_6, v0_7::EntryPoint as EntryPointV0_7}; +mod metrics_middleware; +pub(crate) mod provider; diff --git a/crates/provider/src/ethers/paymaster_helper.rs b/crates/provider/src/ethers/paymaster_helper.rs deleted file mode 100644 index dad1833f..00000000 --- a/crates/provider/src/ethers/paymaster_helper.rs +++ /dev/null @@ -1,36 +0,0 @@ -// This file is part of Rundler. -// -// Rundler is free software: you can redistribute it and/or modify it under the -// terms of the GNU Lesser General Public License as published by the Free Software -// Foundation, either version 3 of the License, or (at your option) any later version. -// -// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; -// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -// See the GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License along with Rundler. -// If not, see https://www.gnu.org/licenses/. - -use anyhow::Result; -use ethers::{ - providers::Middleware, - types::{Address, U256}, -}; -use rundler_types::{ - contracts::paymaster_helper::PaymasterHelper as PaymasterHelperContract, DepositInfo, -}; - -use crate::PaymasterHelper; - -#[async_trait::async_trait] -impl PaymasterHelper for PaymasterHelperContract -where - M: Middleware + 'static, -{ - async fn get_balances(&self, addresses: Vec

) -> Result> { - Ok(PaymasterHelperContract::get_balances(self, addresses).await?) - } - async fn get_deposit_info(&self, address: Address) -> Result { - Ok(PaymasterHelperContract::get_deposit_info(self, address).await?) - } -} diff --git a/crates/provider/src/ethers/provider.rs b/crates/provider/src/ethers/provider.rs index 6734acf7..bc2cb73c 100644 --- a/crates/provider/src/ethers/provider.rs +++ b/crates/provider/src/ethers/provider.rs @@ -11,41 +11,32 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -use std::{fmt::Debug, sync::Arc}; +use std::{fmt::Debug, sync::Arc, time::Duration}; use anyhow::Context; use ethers::{ - contract::ContractError, + abi::{AbiDecode, AbiEncode}, prelude::ContractError as EthersContractError, providers::{ - JsonRpcClient, Middleware, Provider as EthersProvider, - ProviderError as EthersProviderError, RawCall, + Http, HttpRateLimitRetryPolicy, JsonRpcClient, Middleware, Provider as EthersProvider, + ProviderError as EthersProviderError, RawCall, RetryClient, RetryClientBuilder, }, types::{ spoof, transaction::eip2718::TypedTransaction, Address, Block, BlockId, BlockNumber, Bytes, Eip1559TransactionRequest, FeeHistory, Filter, GethDebugTracingCallOptions, - GethDebugTracingOptions, GethTrace, Log, Transaction, TransactionReceipt, TxHash, H160, - H256, U256, U64, + GethDebugTracingOptions, GethTrace, Log, Transaction, TransactionReceipt, TxHash, H256, + U256, U64, }, }; -use rundler_types::{ - contracts::{ - gas_price_oracle::GasPriceOracle, i_aggregator::IAggregator, i_entry_point::IEntryPoint, - node_interface::NodeInterface, - }, - UserOperation, +use reqwest::Url; +use rundler_types::contracts::utils::{ + get_gas_used::{GasUsedResult, GetGasUsed, GETGASUSED_DEPLOYED_BYTECODE}, + storage_loader::STORAGELOADER_DEPLOYED_BYTECODE, }; use serde::{de::DeserializeOwned, Serialize}; -use crate::{AggregatorOut, AggregatorSimOut, Provider, ProviderError, ProviderResult}; - -const ARBITRUM_NITRO_NODE_INTERFACE_ADDRESS: Address = H160([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xc8, -]); - -const OPTIMISM_BEDROCK_GAS_ORACLE_ADDRESS: Address = H160([ - 0x42, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x0F, -]); +use super::metrics_middleware::MetricsMiddleware; +use crate::{Provider, ProviderError, ProviderResult}; #[async_trait::async_trait] impl Provider for EthersProvider { @@ -75,6 +66,30 @@ impl Provider for EthersProvider { Ok(call.await?) } + async fn call_constructor( + &self, + bytecode: &Bytes, + args: A, + block_id: Option, + state_overrides: &spoof::State, + ) -> anyhow::Result + where + A: AbiEncode + Send + Sync + 'static, + R: AbiDecode + Send + Sync + 'static, + { + let mut data = bytecode.to_vec(); + data.extend(AbiEncode::encode(args)); + let tx = Eip1559TransactionRequest { + data: Some(data.into()), + ..Default::default() + }; + let error = Provider::call(self, &tx.into(), block_id, state_overrides) + .await + .err() + .context("called constructor should revert")?; + get_revert_data(error).context("should decode revert data from called constructor") + } + async fn fee_history + Send + Sync + Serialize + 'static>( &self, t: T, @@ -117,7 +132,6 @@ impl Provider for EthersProvider { tx_hash: TxHash, trace_options: GethDebugTracingOptions, ) -> ProviderResult { - //println!("HC debug_trace_transaction"); Ok(Middleware::debug_trace_transaction(self, tx_hash, trace_options).await?) } @@ -131,7 +145,7 @@ impl Provider for EthersProvider { println!("HC will use BlockNumber::Latest instead of {:?}", block_id); let ret = Middleware::debug_trace_call(self, tx, Some(ethers::types::BlockId::Number(BlockNumber::Latest)), trace_options).await; println!("HC debug_trace_call ret {:?}", ret); - Ok(ret?) + Ok(ret?) } async fn get_balance(&self, address: Address, block: Option) -> ProviderResult { @@ -170,44 +184,6 @@ impl Provider for EthersProvider { Ok(Middleware::get_logs(self, filter).await?) } - async fn aggregate_signatures( - self: Arc, - aggregator_address: Address, - ops: Vec, - ) -> ProviderResult> { - let aggregator = IAggregator::new(aggregator_address, self); - // TODO: Cap the gas here. - let result = aggregator.aggregate_signatures(ops).call().await; - match result { - Ok(bytes) => Ok(Some(bytes)), - Err(ContractError::Revert(_)) => Ok(None), - Err(error) => Err(error).context("aggregator contract should aggregate signatures")?, - } - } - - async fn validate_user_op_signature( - self: Arc, - aggregator_address: Address, - user_op: UserOperation, - gas_cap: u64, - ) -> ProviderResult { - let aggregator = IAggregator::new(aggregator_address, self); - let result = aggregator - .validate_user_op_signature(user_op) - .gas(gas_cap) - .call() - .await; - - match result { - Ok(sig) => Ok(AggregatorOut::SuccessWithInfo(AggregatorSimOut { - address: aggregator_address, - signature: sig, - })), - Err(ContractError::Revert(_)) => Ok(AggregatorOut::ValidationReverted), - Err(error) => Err(error).context("should call aggregator to validate signature")?, - } - } - async fn get_code(&self, address: Address, block_hash: Option) -> ProviderResult { Ok(Middleware::get_code(self, address, block_hash.map(|b| b.into())).await?) } @@ -216,55 +192,67 @@ impl Provider for EthersProvider { Ok(Middleware::get_transaction_count(self, address, None).await?) } - async fn calc_arbitrum_l1_gas( - self: Arc, - entry_point_address: Address, - op: UserOperation, - ) -> ProviderResult { - let entry_point = IEntryPoint::new(entry_point_address, Arc::clone(&self)); - let data = entry_point - .handle_ops(vec![op], Address::random()) - .calldata() - .context("should get calldata for entry point handle ops")?; - - let arb_node = NodeInterface::new(ARBITRUM_NITRO_NODE_INTERFACE_ADDRESS, self); - let gas = arb_node - .gas_estimate_l1_component(entry_point_address, false, data) - .call() - .await?; - Ok(U256::from(gas.0)) + async fn get_gas_used( + self: &Arc, + target: Address, + value: U256, + data: Bytes, + mut state_overrides: spoof::State, + ) -> ProviderResult { + let helper_addr = Address::random(); + let helper = GetGasUsed::new(helper_addr, Arc::clone(self)); + + state_overrides + .account(helper_addr) + .code(GETGASUSED_DEPLOYED_BYTECODE.clone()); + + Ok(helper + .get_gas(target, value, data) + .call_raw() + .state(&state_overrides) + .await + .context("should get gas used")?) } - async fn calc_optimism_l1_gas( - self: Arc, - entry_point_address: Address, - op: UserOperation, - gas_price: U256, - ) -> ProviderResult { - let entry_point = IEntryPoint::new(entry_point_address, Arc::clone(&self)); - let data = entry_point - .handle_ops(vec![op], Address::random()) - .calldata() - .context("should get calldata for entry point handle ops")?; - - // construct an unsigned transaction with default values just for L1 gas estimation - let tx = Eip1559TransactionRequest::new() - .from(Address::random()) - .to(entry_point_address) - .gas(U256::from(1_000_000)) - .max_priority_fee_per_gas(U256::from(100_000_000)) - .max_fee_per_gas(U256::from(100_000_000)) - .value(U256::from(0)) - .data(data) - .nonce(U256::from(100_000)) - .chain_id(U64::from(100_000)) - .rlp(); - - let gas_oracle = - GasPriceOracle::new(OPTIMISM_BEDROCK_GAS_ORACLE_ADDRESS, Arc::clone(&self)); - - let l1_fee = gas_oracle.get_l1_fee(tx).call().await?; - Ok(l1_fee.checked_div(gas_price).unwrap_or(U256::MAX)) + async fn batch_get_storage_at( + &self, + address: Address, + slots: Vec, + ) -> ProviderResult> { + let mut state_overrides = spoof::State::default(); + state_overrides + .account(address) + .code(STORAGELOADER_DEPLOYED_BYTECODE.clone()); + + let expected_ret_size = slots.len() * 32; + let slot_data = slots + .into_iter() + .flat_map(|slot| slot.to_fixed_bytes()) + .collect::>(); + + let tx: TypedTransaction = Eip1559TransactionRequest { + to: Some(address.into()), + data: Some(slot_data.into()), + ..Default::default() + } + .into(); + + let result_bytes = self + .call_raw(&tx) + .state(&state_overrides) + .await + .context("should call storage loader")?; + + if result_bytes.len() != expected_ret_size { + return Err(anyhow::anyhow!( + "expected {} bytes, got {}", + expected_ret_size, + result_bytes.len() + ) + .into()); + } + + Ok(result_bytes.chunks(32).map(H256::from_slice).collect()) } } @@ -288,3 +276,49 @@ impl From> for ProviderError { ProviderError::ContractError(e.to_string()) } } + +// Gets and decodes the revert data from a provider error, if it is a revert error. +fn get_revert_data(error: ProviderError) -> Result { + let ProviderError::JsonRpcError(jsonrpc_error) = &error else { + return Err(error); + }; + if !jsonrpc_error.is_revert() { + return Err(error); + } + match jsonrpc_error.decode_revert_data() { + Some(ret) => Ok(ret), + None => Err(error), + } +} + +/// Construct a new Ethers provider from a URL and a poll interval. +/// +/// Creates a provider with a retry client that retries 10 times, with an initial backoff of 500ms. +pub fn new_provider( + url: &str, + poll_interval: Option, +) -> anyhow::Result>>>> { + let parsed_url = Url::parse(url).context("provider url should be valid")?; + + let http_client = reqwest::Client::builder() + .connect_timeout(Duration::from_secs(1)) + .build() + .context("failed to build reqwest client")?; + let http = MetricsMiddleware::new(Http::new_with_client(parsed_url, http_client)); + + let client = RetryClientBuilder::default() + // these retries are if the server returns a 429 + .rate_limit_retries(10) + // these retries are if the connection is dubious + .timeout_retries(3) + .initial_backoff(Duration::from_millis(500)) + .build(http, Box::::default()); + + let mut provider = EthersProvider::new(client); + + if let Some(poll_interval) = poll_interval { + provider = provider.interval(poll_interval); + } + + Ok(Arc::new(provider)) +} diff --git a/crates/provider/src/lib.rs b/crates/provider/src/lib.rs index d24e1fd1..4b98ddcb 100644 --- a/crates/provider/src/lib.rs +++ b/crates/provider/src/lib.rs @@ -22,11 +22,14 @@ //! A provider is a type that provides access to blockchain data and functions mod ethers; +pub use ethers::{ + provider::new_provider, EntryPointV0_6 as EthersEntryPointV0_6, + EntryPointV0_7 as EthersEntryPointV0_7, +}; mod traits; -pub use traits::{ - AggregatorOut, AggregatorSimOut, EntryPoint, HandleOpsOut, PaymasterHelper, Provider, - ProviderError, ProviderResult, StakeManager, NonceManager, -}; #[cfg(any(test, feature = "test-utils"))] -pub use traits::{MockEntryPoint, MockPaymasterHelper, MockProvider, MockStakeManager, MockNonceManager}; +pub use traits::test_utils::*; +#[cfg(any(test, feature = "test-utils"))] +pub use traits::MockProvider; +pub use traits::*; diff --git a/crates/provider/src/traits/entry_point.rs b/crates/provider/src/traits/entry_point.rs index f06173df..67ab23d8 100644 --- a/crates/provider/src/traits/entry_point.rs +++ b/crates/provider/src/traits/entry_point.rs @@ -14,13 +14,31 @@ use ethers::types::{ spoof, transaction::eip2718::TypedTransaction, Address, BlockId, Bytes, H256, U256, }; -#[cfg(feature = "test-utils")] -use mockall::automock; use rundler_types::{ - contracts::{i_entry_point::ExecutionResult, shared_types::UserOpsPerAggregator}, - GasFees, UserOperation, + GasFees, Timestamp, UserOperation, UserOpsPerAggregator, ValidationError, ValidationOutput, + ValidationRevert, }; +/// Output of a successful signature aggregator simulation call +#[derive(Clone, Debug, Default)] +pub struct AggregatorSimOut { + /// Address of the aggregator contract + pub address: Address, + /// Aggregated signature + pub signature: Bytes, +} + +/// Result of a signature aggregator call +#[derive(Debug)] +pub enum AggregatorOut { + /// No aggregator used + NotNeeded, + /// Successful call + SuccessWithInfo(AggregatorSimOut), + /// Aggregator validation function reverted + ValidationReverted, +} + /// Result of an entry point handle ops call #[derive(Clone, Debug)] pub enum HandleOpsOut { @@ -35,61 +53,198 @@ pub enum HandleOpsOut { PostOpRevert, } +/// Deposit info for an address from the entry point contract +#[derive(Clone, Debug, Default)] +pub struct DepositInfo { + /// Amount deposited on the entry point + pub deposit: U256, + /// Whether the address has staked + pub staked: bool, + /// Amount staked on the entry point + pub stake: u128, + /// The amount of time in sections that must pass before the stake can be withdrawn + pub unstake_delay_sec: u32, + /// The time at which the stake can be withdrawn + pub withdraw_time: u64, +} + +/// Result of an execution +#[derive(Clone, Debug, Default)] +pub struct ExecutionResult { + /// Gas used before the operation execution + pub pre_op_gas: U256, + /// Amount paid by the operation + pub paid: U256, + /// Time which the operation is valid after + pub valid_after: Timestamp, + /// Time which the operation is valid until + pub valid_until: Timestamp, + /// True if the operation execution succeeded + pub target_success: bool, + /// Result of the operation execution + pub target_result: Bytes, +} + /// Trait for interacting with an entry point contract. -/// Implemented for the v0.6 version of the entry point contract. -/// [Contracts can be found here](https://github.com/eth-infinitism/account-abstraction/tree/v0.6.0). -#[cfg_attr(feature = "test-utils", automock)] #[async_trait::async_trait] +#[auto_impl::auto_impl(&, Arc)] pub trait EntryPoint: Send + Sync + 'static { /// Get the address of the entry point contract fn address(&self) -> Address; + /// Get the balance of an address + async fn balance_of(&self, address: Address, block_id: Option) + -> anyhow::Result; + + /// Get the deposit info for an address + async fn get_deposit_info(&self, address: Address) -> anyhow::Result; + + /// Get the balances of a list of addresses in order + async fn get_balances(&self, addresses: Vec
) -> anyhow::Result>; +} + +/// Trait for handling signature aggregators +#[async_trait::async_trait] +#[auto_impl::auto_impl(&, Arc)] +pub trait SignatureAggregator: Send + Sync + 'static { + /// The type of user operation used by this entry point + type UO: UserOperation; + + /// Call an aggregator to aggregate signatures for a set of operations + async fn aggregate_signatures( + &self, + aggregator_address: Address, + ops: Vec, + ) -> anyhow::Result>; + + /// Validate a user operation signature using an aggregator + async fn validate_user_op_signature( + &self, + aggregator_address: Address, + user_op: Self::UO, + gas_cap: u64, + ) -> anyhow::Result; +} + +/// Trait for submitting bundles of operations to an entry point contract +#[async_trait::async_trait] +#[auto_impl::auto_impl(&, Arc)] +pub trait BundleHandler: Send + Sync + 'static { + /// The type of user operation used by this entry point + type UO: UserOperation; + /// Call the entry point contract's `handleOps` function async fn call_handle_ops( &self, - ops_per_aggregator: Vec, + ops_per_aggregator: Vec>, beneficiary: Address, gas: U256, ) -> anyhow::Result; - /// Get the balance of an address - async fn balance_of(&self, address: Address, block_id: Option) - -> anyhow::Result; + /// Construct the transaction to send a bundle of operations to the entry point contract + fn get_send_bundle_transaction( + &self, + ops_per_aggregator: Vec>, + beneficiary: Address, + gas: U256, + gas_fees: GasFees, + ) -> TypedTransaction; +} + +/// Trait for calculating L1 gas costs for user operations +/// +/// Used for L2 gas estimation +#[async_trait::async_trait] +#[auto_impl::auto_impl(&, Arc)] +pub trait L1GasProvider: Send + Sync + 'static { + /// The type of user operation used by this entry point + type UO: UserOperation; + + /// Calculate the L1 portion of the gas for a user operation + /// + /// Returns zero for operations that do not require L1 gas + async fn calc_l1_gas( + &self, + entry_point_address: Address, + op: Self::UO, + gas_price: U256, + ) -> anyhow::Result; +} + +/// Call data along with necessary state overrides for calling the entry +/// point's `simulateHandleOp` function. +#[derive(Debug)] +pub struct SimulateOpCallData { + /// Call data representing a call to `simulateHandleOp` + pub call_data: Bytes, + /// Required state override. Necessary with the v0.7 entry point, where the + /// simulation methods aren't deployed on-chain but instead must be added + /// via state overrides + pub spoofed_state: spoof::State, +} + +/// Trait for simulating user operations on an entry point contract +#[async_trait::async_trait] +#[auto_impl::auto_impl(&, Arc)] +pub trait SimulationProvider: Send + Sync + 'static { + /// The type of user operation used by this entry point + type UO: UserOperation; + + /// Construct a call for the entry point contract's `simulateValidation` function + fn get_tracer_simulate_validation_call( + &self, + user_op: Self::UO, + max_validation_gas: u64, + ) -> (TypedTransaction, spoof::State); - /// Call the entry point contract's `simulateValidation` function - async fn simulate_validation( + /// Call the entry point contract's `simulateValidation` function. + async fn call_simulate_validation( &self, - user_op: UserOperation, + user_op: Self::UO, max_validation_gas: u64, - ) -> anyhow::Result; + block_hash: Option, + ) -> Result; - /// Call the entry point contract's `simulateHandleOps` function + /// Get call data and state overrides needed to call `simulateHandleOp` + fn get_simulate_op_call_data( + &self, + op: Self::UO, + spoofed_state: &spoof::State, + ) -> SimulateOpCallData; + + /// Call the entry point contract's `simulateHandleOp` function /// with a spoofed state async fn call_spoofed_simulate_op( &self, - op: UserOperation, + op: Self::UO, target: Address, target_call_data: Bytes, block_hash: H256, gas: U256, spoofed_state: &spoof::State, - ) -> anyhow::Result>; - - /// Construct the transaction to send a bundle of operations to the entry point contract - fn get_send_bundle_transaction( - &self, - ops_per_aggregator: Vec, - beneficiary: Address, - gas: U256, - gas_fees: GasFees, - ) -> TypedTransaction; + ) -> anyhow::Result>; /// Decode the revert data from a call to `simulateHandleOps` fn decode_simulate_handle_ops_revert( &self, revert_data: Bytes, - ) -> Result; + ) -> Result; + + /// Returns true if this entry point uses reverts to communicate simulation + /// results. + fn simulation_should_revert(&self) -> bool; - /// Get the AA nonce for an account. Used for Hybrid Compute + /// Return the AA nonce for the given sender and key async fn get_nonce(&self, address: Address, key: ::ethers::core::types::U256) -> Result<::ethers::core::types::U256, String>; + +} + +/// Trait for a provider that provides all entry point functionality +pub trait EntryPointProvider: + EntryPoint + + SignatureAggregator + + BundleHandler + + SimulationProvider + + L1GasProvider +{ } diff --git a/crates/provider/src/traits/mod.rs b/crates/provider/src/traits/mod.rs index fa61b001..73d9a54c 100644 --- a/crates/provider/src/traits/mod.rs +++ b/crates/provider/src/traits/mod.rs @@ -17,26 +17,11 @@ mod error; pub use error::ProviderError; mod entry_point; -#[cfg(feature = "test-utils")] -pub use entry_point::MockEntryPoint; -pub use entry_point::{EntryPoint, HandleOpsOut}; +pub use entry_point::*; mod provider; #[cfg(feature = "test-utils")] pub use provider::MockProvider; -pub use provider::{AggregatorOut, AggregatorSimOut, Provider, ProviderResult}; - -mod stake_manager; -#[cfg(feature = "test-utils")] -pub use stake_manager::MockStakeManager; -pub use stake_manager::StakeManager; - -mod nonce_manager; -#[cfg(feature = "test-utils")] -pub use nonce_manager::MockNonceManager; -pub use nonce_manager::NonceManager; - -mod paymaster_helper; +pub use provider::{Provider, ProviderResult}; #[cfg(feature = "test-utils")] -pub use paymaster_helper::MockPaymasterHelper; -pub use paymaster_helper::PaymasterHelper; +pub(crate) mod test_utils; diff --git a/crates/provider/src/traits/provider.rs b/crates/provider/src/traits/provider.rs index 173430f1..0aac930b 100644 --- a/crates/provider/src/traits/provider.rs +++ b/crates/provider/src/traits/provider.rs @@ -15,38 +15,21 @@ use std::{fmt::Debug, sync::Arc}; -use ethers::types::{ - spoof, transaction::eip2718::TypedTransaction, Address, Block, BlockId, BlockNumber, Bytes, - FeeHistory, Filter, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, Log, - Transaction, TransactionReceipt, TxHash, H256, U256, U64, +use ethers::{ + abi::{AbiDecode, AbiEncode}, + types::{ + spoof, transaction::eip2718::TypedTransaction, Address, Block, BlockId, BlockNumber, Bytes, + FeeHistory, Filter, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, Log, + Transaction, TransactionReceipt, TxHash, H256, U256, U64, + }, }; #[cfg(feature = "test-utils")] use mockall::automock; -use rundler_types::UserOperation; +use rundler_types::contracts::utils::get_gas_used::GasUsedResult; use serde::{de::DeserializeOwned, Serialize}; use super::error::ProviderError; -/// Output of a successful signature aggregator simulation call -#[derive(Clone, Debug, Default)] -pub struct AggregatorSimOut { - /// Address of the aggregator contract - pub address: Address, - /// Aggregated signature - pub signature: Bytes, -} - -/// Result of a signature aggregator call -#[derive(Debug)] -pub enum AggregatorOut { - /// No aggregator used - NotNeeded, - /// Successful call - SuccessWithInfo(AggregatorSimOut), - /// Aggregator validation function reverted - ValidationReverted, -} - /// Result of a provider method call pub type ProviderResult = Result; @@ -76,6 +59,19 @@ pub trait Provider: Send + Sync + Debug + 'static { state_overrides: &spoof::State, ) -> ProviderResult; + /// Call a contract's constructor + /// The constructor is assumed to revert with the result data + async fn call_constructor( + &self, + bytecode: &Bytes, + args: A, + block_id: Option, + state_overrides: &spoof::State, + ) -> anyhow::Result + where + A: AbiEncode + Send + Sync + 'static, + R: AbiDecode + Send + Sync + 'static; + /// Get the current block number async fn get_block_number(&self) -> ProviderResult; @@ -133,33 +129,19 @@ pub trait Provider: Send + Sync + Debug + 'static { /// Get the logs matching a filter async fn get_logs(&self, filter: &Filter) -> ProviderResult>; - /// Call an aggregator to aggregate signatures for a set of operations - async fn aggregate_signatures( - self: Arc, - aggregator_address: Address, - ops: Vec, - ) -> ProviderResult>; - - /// Validate a user operation signature using an aggregator - async fn validate_user_op_signature( - self: Arc, - aggregator_address: Address, - user_op: UserOperation, - gas_cap: u64, - ) -> ProviderResult; - - /// Calculate the L1 portion of the gas for a user operation on Arbitrum - async fn calc_arbitrum_l1_gas( - self: Arc, - entry_point_address: Address, - op: UserOperation, - ) -> ProviderResult; - - /// Calculate the L1 portion of the gas for a user operation on optimism - async fn calc_optimism_l1_gas( - self: Arc, - entry_point_address: Address, - op: UserOperation, - gas_price: U256, - ) -> ProviderResult; + /// Measures the gas used by a call to target with value and data. + async fn get_gas_used( + self: &Arc, + target: Address, + value: U256, + data: Bytes, + state_overrides: spoof::State, + ) -> ProviderResult; + + /// Get the storage values at a given address and slots + async fn batch_get_storage_at( + &self, + address: Address, + slots: Vec, + ) -> ProviderResult>; } diff --git a/crates/provider/src/traits/test_utils.rs b/crates/provider/src/traits/test_utils.rs new file mode 100644 index 00000000..4b149fb0 --- /dev/null +++ b/crates/provider/src/traits/test_utils.rs @@ -0,0 +1,214 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use ethers::types::{ + spoof, transaction::eip2718::TypedTransaction, Address, BlockId, Bytes, H256, U256, +}; +use rundler_types::{ + v0_6, v0_7, GasFees, UserOpsPerAggregator, ValidationError, ValidationOutput, ValidationRevert, +}; + +use crate::{ + AggregatorOut, BundleHandler, DepositInfo, EntryPoint, ExecutionResult, HandleOpsOut, + L1GasProvider, SignatureAggregator, SimulateOpCallData, SimulationProvider, +}; + +mockall::mock! { + pub EntryPointV0_6 {} + + #[async_trait::async_trait] + impl EntryPoint for EntryPointV0_6 { + fn address(&self) -> Address; + async fn balance_of(&self, address: Address, block_id: Option) + -> anyhow::Result; + async fn get_deposit_info(&self, address: Address) -> anyhow::Result; + async fn get_balances(&self, addresses: Vec
) -> anyhow::Result>; + } + + #[async_trait::async_trait] + impl SignatureAggregator for EntryPointV0_6 { + type UO = v0_6::UserOperation; + async fn aggregate_signatures( + &self, + aggregator_address: Address, + ops: Vec, + ) -> anyhow::Result>; + async fn validate_user_op_signature( + &self, + aggregator_address: Address, + user_op: v0_6::UserOperation, + gas_cap: u64, + ) -> anyhow::Result; + } + + #[async_trait::async_trait] + impl SimulationProvider for EntryPointV0_6 { + type UO = v0_6::UserOperation; + fn get_tracer_simulate_validation_call( + &self, + user_op: v0_6::UserOperation, + max_validation_gas: u64, + ) -> (TypedTransaction, spoof::State); + async fn call_simulate_validation( + &self, + user_op: v0_6::UserOperation, + max_validation_gas: u64, + block_hash: Option + ) -> Result; + fn get_simulate_op_call_data( + &self, + op: v0_6::UserOperation, + spoofed_state: &spoof::State, + ) -> SimulateOpCallData; + async fn call_spoofed_simulate_op( + &self, + op: v0_6::UserOperation, + target: Address, + target_call_data: Bytes, + block_hash: H256, + gas: U256, + spoofed_state: &spoof::State, + ) -> anyhow::Result>; + fn decode_simulate_handle_ops_revert( + &self, + revert_data: Bytes, + ) -> Result; + fn simulation_should_revert(&self) -> bool; + async fn get_nonce(&self, address: Address, key: ::ethers::core::types::U256) -> Result<::ethers::core::types::U256, String>; + + } + + #[async_trait::async_trait] + impl L1GasProvider for EntryPointV0_6 { + type UO = v0_6::UserOperation; + async fn calc_l1_gas( + &self, + entry_point_address: Address, + op: v0_6::UserOperation, + gas_price: U256, + ) -> anyhow::Result; + } + + #[async_trait::async_trait] + impl BundleHandler for EntryPointV0_6 { + type UO = v0_6::UserOperation; + async fn call_handle_ops( + &self, + ops_per_aggregator: Vec>, + beneficiary: Address, + gas: U256, + ) -> anyhow::Result; + fn get_send_bundle_transaction( + &self, + ops_per_aggregator: Vec>, + beneficiary: Address, + gas: U256, + gas_fees: GasFees, + ) -> TypedTransaction; + } +} + +mockall::mock! { + pub EntryPointV0_7 {} + + #[async_trait::async_trait] + impl EntryPoint for EntryPointV0_7 { + fn address(&self) -> Address; + async fn balance_of(&self, address: Address, block_id: Option) + -> anyhow::Result; + async fn get_deposit_info(&self, address: Address) -> anyhow::Result; + async fn get_balances(&self, addresses: Vec
) -> anyhow::Result>; + } + + #[async_trait::async_trait] + impl SignatureAggregator for EntryPointV0_7 { + type UO = v0_7::UserOperation; + async fn aggregate_signatures( + &self, + aggregator_address: Address, + ops: Vec, + ) -> anyhow::Result>; + async fn validate_user_op_signature( + &self, + aggregator_address: Address, + user_op: v0_7::UserOperation, + gas_cap: u64, + ) -> anyhow::Result; + } + + #[async_trait::async_trait] + impl SimulationProvider for EntryPointV0_7 { + type UO = v0_7::UserOperation; + fn get_tracer_simulate_validation_call( + &self, + user_op: v0_7::UserOperation, + max_validation_gas: u64, + ) -> (TypedTransaction, spoof::State); + async fn call_simulate_validation( + &self, + user_op: v0_7::UserOperation, + max_validation_gas: u64, + block_hash: Option + ) -> Result; + fn get_simulate_op_call_data( + &self, + op: v0_7::UserOperation, + spoofed_state: &spoof::State, + ) -> SimulateOpCallData; + async fn call_spoofed_simulate_op( + &self, + op: v0_7::UserOperation, + target: Address, + target_call_data: Bytes, + block_hash: H256, + gas: U256, + spoofed_state: &spoof::State, + ) -> anyhow::Result>; + fn decode_simulate_handle_ops_revert( + &self, + revert_data: Bytes, + ) -> Result; + fn simulation_should_revert(&self) -> bool; + async fn get_nonce(&self, address: Address, key: ::ethers::core::types::U256) -> Result<::ethers::core::types::U256, String>; + } + + #[async_trait::async_trait] + impl L1GasProvider for EntryPointV0_7 { + type UO = v0_7::UserOperation; + async fn calc_l1_gas( + &self, + entry_point_address: Address, + op: v0_7::UserOperation, + gas_price: U256, + ) -> anyhow::Result; + } + + #[async_trait::async_trait] + impl BundleHandler for EntryPointV0_7 { + type UO = v0_7::UserOperation; + async fn call_handle_ops( + &self, + ops_per_aggregator: Vec>, + beneficiary: Address, + gas: U256, + ) -> anyhow::Result; + fn get_send_bundle_transaction( + &self, + ops_per_aggregator: Vec>, + beneficiary: Address, + gas: U256, + gas_fees: GasFees, + ) -> TypedTransaction; + } + +} diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index 53f6c532..00f4d6bd 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -7,8 +7,6 @@ license.workspace = true repository.workspace = true [dependencies] -rundler-builder = { path = "../builder" } -rundler-pool = { path = "../pool" } rundler-provider = { path = "../provider" } rundler-sim = { path = "../sim" } rundler-task = { path = "../task" } @@ -36,4 +34,5 @@ tower-http.workspace = true [dev-dependencies] mockall.workspace = true rundler-provider = { path = "../provider", features = ["test-utils"]} -rundler-pool = { path = "../pool", features = ["test-utils"] } +rundler-sim = { path = "../sim", features = ["test-utils"] } +rundler-types= { path = "../types", features = ["test-utils"]} diff --git a/crates/rpc/src/admin.rs b/crates/rpc/src/admin.rs new file mode 100644 index 00000000..73e896fc --- /dev/null +++ b/crates/rpc/src/admin.rs @@ -0,0 +1,110 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use anyhow::Context; +use async_trait::async_trait; +use ethers::types::Address; +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use rundler_types::pool::Pool; + +use crate::{ + types::{RpcAdminClearState, RpcAdminSetTracking}, + utils::{self, InternalRpcResult}, +}; + +/// Admin API +#[rpc(client, server, namespace = "admin")] +pub trait AdminApi { + /// Clears the state of the mempool field if name is true + #[method(name = "clearState")] + async fn clear_state(&self, clear_params: RpcAdminClearState) -> RpcResult; + + /// Sets the tracking state for the paymaster and reputation pool modules + #[method(name = "setTracking")] + async fn set_tracking( + &self, + entry_point: Address, + tracking_info: RpcAdminSetTracking, + ) -> RpcResult; +} + +pub(crate) struct AdminApi

{ + pool: P, +} + +impl

AdminApi

{ + pub(crate) fn new(pool: P) -> Self { + Self { pool } + } +} + +#[async_trait] +impl

AdminApiServer for AdminApi

+where + P: Pool, +{ + async fn clear_state(&self, clear_params: RpcAdminClearState) -> RpcResult { + utils::safe_call_rpc_handler( + "admin_clearState", + AdminApi::clear_state(self, clear_params), + ) + .await + } + + async fn set_tracking( + &self, + entry_point: Address, + tracking_params: RpcAdminSetTracking, + ) -> RpcResult { + utils::safe_call_rpc_handler( + "admin_setTracking", + AdminApi::set_tracking(self, entry_point, tracking_params), + ) + .await + } +} + +impl

AdminApi

+where + P: Pool, +{ + async fn clear_state(&self, clear_params: RpcAdminClearState) -> InternalRpcResult { + self.pool + .debug_clear_state( + clear_params.clear_mempool.unwrap_or(false), + clear_params.clear_paymaster.unwrap_or(false), + clear_params.clear_reputation.unwrap_or(false), + ) + .await + .context("should clear state")?; + + Ok("ok".to_string()) + } + + async fn set_tracking( + &self, + entry_point: Address, + tracking_params: RpcAdminSetTracking, + ) -> InternalRpcResult { + self.pool + .admin_set_tracking( + entry_point, + tracking_params.paymaster_tracking, + tracking_params.reputation_tracking, + ) + .await + .context("should set tracking")?; + + Ok("ok".to_string()) + } +} diff --git a/crates/rpc/src/debug.rs b/crates/rpc/src/debug.rs index 8bb92d01..69efaff0 100644 --- a/crates/rpc/src/debug.rs +++ b/crates/rpc/src/debug.rs @@ -11,18 +11,22 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. +use anyhow::Context; use async_trait::async_trait; use ethers::types::{Address, H256}; use futures_util::StreamExt; -use jsonrpsee::{core::RpcResult, proc_macros::rpc, types::error::INTERNAL_ERROR_CODE}; -use rundler_builder::{BuilderServer, BundlingMode}; -use rundler_pool::PoolServer; +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use rundler_types::{ + builder::{Builder, BundlingMode}, + pool::Pool, +}; use crate::{ - error::rpc_err, types::{ - RpcReputationInput, RpcReputationOutput, RpcStakeInfo, RpcStakeStatus, RpcUserOperation, + RpcDebugPaymasterBalance, RpcReputationInput, RpcReputationOutput, RpcStakeInfo, + RpcStakeStatus, RpcUserOperation, }, + utils::{self, InternalRpcResult}, }; /// Debug API @@ -69,9 +73,16 @@ pub trait DebugApi { #[method(name = "bundler_getStakeStatus")] async fn bundler_get_stake_status( &self, - entry_point: Address, address: Address, + entry_point: Address, ) -> RpcResult; + + /// Dumps the paymaster balance cache + #[method(name = "bundler_dumpPaymasterBalances")] + async fn bundler_dump_paymaster_balances( + &self, + entry_point: Address, + ) -> RpcResult>; } pub(crate) struct DebugApi { @@ -88,52 +99,145 @@ impl DebugApi { #[async_trait] impl DebugApiServer for DebugApi where - P: PoolServer, - B: BuilderServer, + P: Pool, + B: Builder, { async fn bundler_clear_state(&self) -> RpcResult { - let _ = self - .pool - .debug_clear_state(true, true) + utils::safe_call_rpc_handler("bundler_clearState", DebugApi::bundler_clear_state(self)) .await - .map_err(|e| rpc_err(INTERNAL_ERROR_CODE, e.to_string()))?; + } + + async fn bundler_clear_mempool(&self) -> RpcResult { + utils::safe_call_rpc_handler( + "bundler_clearMempool", + DebugApi::bundler_clear_mempool(self), + ) + .await + } + + async fn bundler_dump_mempool(&self, entry_point: Address) -> RpcResult> { + utils::safe_call_rpc_handler( + "bundler_dumpMempool", + DebugApi::bundler_dump_mempool(self, entry_point), + ) + .await + } + + async fn bundler_send_bundle_now(&self) -> RpcResult { + utils::safe_call_rpc_handler( + "bundler_sendBundleNow", + DebugApi::bundler_send_bundle_now(self), + ) + .await + } + + async fn bundler_set_bundling_mode(&self, mode: BundlingMode) -> RpcResult { + utils::safe_call_rpc_handler( + "bundler_setBundlingMode", + DebugApi::bundler_set_bundling_mode(self, mode), + ) + .await + } + + async fn bundler_set_reputation( + &self, + reputations: Vec, + entry_point: Address, + ) -> RpcResult { + utils::safe_call_rpc_handler( + "bundler_setReputation", + DebugApi::bundler_set_reputation(self, reputations, entry_point), + ) + .await + } + + async fn bundler_dump_reputation( + &self, + entry_point: Address, + ) -> RpcResult> { + utils::safe_call_rpc_handler( + "bundler_dumpReputation", + DebugApi::bundler_dump_reputation(self, entry_point), + ) + .await + } + + async fn bundler_get_stake_status( + &self, + address: Address, + entry_point: Address, + ) -> RpcResult { + utils::safe_call_rpc_handler( + "bundler_getStakeStatus", + DebugApi::bundler_get_stake_status(self, address, entry_point), + ) + .await + } + + async fn bundler_dump_paymaster_balances( + &self, + entry_point: Address, + ) -> RpcResult> { + utils::safe_call_rpc_handler( + "bundler_dumpPaymasterBalances", + DebugApi::bundler_dump_paymaster_balances(self, entry_point), + ) + .await + } +} + +impl DebugApi +where + P: Pool, + B: Builder, +{ + async fn bundler_clear_state(&self) -> InternalRpcResult { + self.pool + .debug_clear_state(true, true, true) + .await + .context("should clear state")?; Ok("ok".to_string()) } - async fn bundler_clear_mempool(&self) -> RpcResult { - let _ = self - .pool - .debug_clear_state(true, false) + async fn bundler_clear_mempool(&self) -> InternalRpcResult { + self.pool + .debug_clear_state(true, true, false) .await - .map_err(|e| rpc_err(INTERNAL_ERROR_CODE, e.to_string()))?; + .context("should clear mempool")?; Ok("ok".to_string()) } - async fn bundler_dump_mempool(&self, entry_point: Address) -> RpcResult> { + async fn bundler_dump_mempool( + &self, + entry_point: Address, + ) -> InternalRpcResult> { Ok(self .pool .debug_dump_mempool(entry_point) .await - .map_err(|e| rpc_err(INTERNAL_ERROR_CODE, e.to_string()))? + .context("should dump mempool")? .into_iter() .map(|pop| pop.uo.into()) .collect::>()) } - async fn bundler_send_bundle_now(&self) -> RpcResult { + async fn bundler_send_bundle_now(&self) -> InternalRpcResult { + tracing::debug!("Sending bundle"); + let mut new_heads = self .pool .subscribe_new_heads() .await - .map_err(|e| rpc_err(INTERNAL_ERROR_CODE, e.to_string()))?; + .context("should subscribe new heads")?; - let (tx, block_number) = self - .builder - .debug_send_bundle_now() - .await - .map_err(|e| rpc_err(INTERNAL_ERROR_CODE, e.to_string()))?; + let (tx, block_number) = self.builder.debug_send_bundle_now().await.map_err(|e| { + tracing::error!("Error sending bundle {e:?}"); + anyhow::anyhow!(e) + })?; + + tracing::debug!("Waiting for block number {block_number}"); // After the bundle is sent, we need to make sure that the mempool // has processes the same block that the transaction was mined on. @@ -147,7 +251,7 @@ where } } None => { - return Err(rpc_err(INTERNAL_ERROR_CODE, "Next block not available")); + Err(anyhow::anyhow!("Next block not available"))?; } } } @@ -155,11 +259,13 @@ where Ok(tx) } - async fn bundler_set_bundling_mode(&self, mode: BundlingMode) -> RpcResult { + async fn bundler_set_bundling_mode(&self, mode: BundlingMode) -> InternalRpcResult { + tracing::debug!("Setting bundling mode to {:?}", mode); + self.builder .debug_set_bundling_mode(mode) .await - .map_err(|e| rpc_err(INTERNAL_ERROR_CODE, e.to_string()))?; + .context("should set bundling mode")?; Ok("ok".to_string()) } @@ -168,7 +274,7 @@ where &self, reputations: Vec, entry_point: Address, - ) -> RpcResult { + ) -> InternalRpcResult { let _ = self .pool .debug_set_reputations( @@ -183,12 +289,12 @@ where async fn bundler_dump_reputation( &self, entry_point: Address, - ) -> RpcResult> { + ) -> InternalRpcResult> { let result = self .pool .debug_dump_reputation(entry_point) .await - .map_err(|e| rpc_err(INTERNAL_ERROR_CODE, e.to_string()))?; + .context("should dump reputation")?; let mut results = Vec::new(); for r in result { @@ -196,7 +302,7 @@ where .pool .get_reputation_status(entry_point, r.address) .await - .map_err(|e| rpc_err(INTERNAL_ERROR_CODE, e.to_string()))?; + .context("should get reputation status")?; let reputation = RpcReputationOutput { address: r.address, @@ -215,20 +321,44 @@ where &self, address: Address, entry_point: Address, - ) -> RpcResult { + ) -> InternalRpcResult { let result = self .pool .get_stake_status(entry_point, address) .await - .map_err(|e| rpc_err(INTERNAL_ERROR_CODE, e.to_string()))?; + .context("should get stake status")?; Ok(RpcStakeStatus { is_staked: result.is_staked, stake_info: RpcStakeInfo { addr: address, - stake: result.stake_info.stake, - unstake_delay_sec: result.stake_info.unstake_delay_sec, + stake: result.stake_info.stake.as_u128(), + unstake_delay_sec: result.stake_info.unstake_delay_sec.as_u32(), }, }) } + + async fn bundler_dump_paymaster_balances( + &self, + entry_point: Address, + ) -> InternalRpcResult> { + let result = self + .pool + .debug_dump_paymaster_balances(entry_point) + .await + .context("should dump paymaster balances")?; + + let mut results = Vec::new(); + for b in result { + let balance = RpcDebugPaymasterBalance { + address: b.address, + pending_balance: b.pending_balance, + confirmed_balance: b.confirmed_balance, + }; + + results.push(balance); + } + + Ok(results) + } } diff --git a/crates/rpc/src/eth/api.rs b/crates/rpc/src/eth/api.rs index e2293762..01a4b470 100644 --- a/crates/rpc/src/eth/api.rs +++ b/crates/rpc/src/eth/api.rs @@ -12,99 +12,55 @@ // If not, see https://www.gnu.org/licenses/. use std::{ - collections::{HashMap, VecDeque}, - sync::Arc, + collections::{HashMap}, + future::Future, + pin::Pin, }; -use anyhow::Context; use ethers::{ - abi::{AbiDecode, RawLog}, - prelude::EthEvent, - types::{ - spoof, Address, Bytes, Filter, GethDebugBuiltInTracerType, GethDebugTracerType, - GethDebugTracingOptions, GethTrace, GethTraceFrame, Log, TransactionReceipt, H256, U256, - U64, - }, + types::{spoof, Address, H256, U64}, utils::{to_checksum, hex}, }; -use rundler_pool::PoolServer; -use rundler_provider::{EntryPoint, Provider }; -use rundler_sim::{ - EstimationSettings, FeeEstimator, GasEstimate, GasEstimationError, GasEstimator, - GasEstimatorImpl, PrecheckSettings, UserOperationOptionalGas, -}; +use futures_util::future; use rundler_types::{ - contracts::i_entry_point::{ - IEntryPointCalls, UserOperationEventFilter, UserOperationRevertReasonFilter, - }, - contracts::hc_helper::{HCHelper as HH2}, - contracts::simple_account::SimpleAccount, - UserOperation, + chain::ChainSpec, pool::Pool, UserOperation, UserOperationOptionalGas, UserOperationVariant, + contracts::v0_6::hc_helper::{HCHelper as HH2}, + contracts::v0_6::simple_account::SimpleAccount, + }; -use rundler_utils::{eth::log_to_raw_log, log::LogOnError}; +use rundler_utils::log::LogOnError; use tracing::Level; -use super::error::{EthResult, EthRpcError, ExecutionRevertedWithBytesData}; -use crate::types::{RichUserOperation, RpcUserOperation, UserOperationReceipt}; +use super::{ + error::{EthResult, EthRpcError}, + router::EntryPointRouter, +}; +use crate::types::{RpcGasEstimate, RpcUserOperationByHash, RpcUserOperationReceipt}; use rundler_types::hybrid_compute; -//use ethers::types::BigEndianHash; - +use ethers::types::{U256,Bytes}; use jsonrpsee::{ core::{client::ClientT, params::ObjectParams, JsonValue}, http_client::{HttpClientBuilder}, }; -use rundler_utils::eth; -//use std::backtrace::Backtrace; +use crate::types::RpcGasEstimateV0_6; + /// Settings for the `eth_` API -#[derive(Clone, Debug)] // FIXME - can't Copy because of hc.node_http string +#[derive(Copy, Clone, Debug)] pub struct Settings { /// The number of blocks to look back for user operation events pub user_operation_event_block_distance: Option, - /// HybridCompute info - pub hc: hybrid_compute::HcCfg, } impl Settings { /// Create new settings for the `eth_` API - pub fn new( - block_distance: Option, - ) -> Self { + pub fn new(block_distance: Option) -> Self { Self { user_operation_event_block_distance: block_distance, - hc: hybrid_compute::HC_CONFIG.lock().unwrap().clone(), } } } -#[derive(Debug)] -struct EntryPointContext { - gas_estimator: GasEstimatorImpl, -} - -impl EntryPointContext -where - P: Provider, - E: EntryPoint, -{ - fn new( - chain_id: u64, - provider: Arc

, - entry_point: E, - estimation_settings: EstimationSettings, - fee_estimator: FeeEstimator

, - ) -> Self { - let gas_estimator = GasEstimatorImpl::new( - chain_id, - provider, - entry_point, - estimation_settings, - fee_estimator, - ); - Self { gas_estimator } - } -} - // Can't track down what's causing the gas differences between // simulateHandleOps and SimulateValidation, so pad it and // hope for the best. Unused gas will be refunded. @@ -113,109 +69,78 @@ const VG_PAD:i32 = 20000; // FIXME - Workaround for another occasional failure. const PVG_PAD:i32 = 5000; -#[derive(Debug)] -pub(crate) struct EthApi where E: EntryPoint { - contexts_by_entry_point: HashMap>, - provider: Arc

, - chain_id: u64, - pool: PS, - settings: Settings, +pub(crate) struct EthApi

{ + pub(crate) chain_spec: ChainSpec, + pool: P, + router: EntryPointRouter, + hc: hybrid_compute::HcCfg, } -impl EthApi +impl

EthApi

where - P: Provider, - E: EntryPoint, - PS: PoolServer, + P: Pool, { - pub(crate) fn new( - provider: Arc

, - entry_points: Vec, - chain_id: u64, - pool: PS, - settings: Settings, - estimation_settings: EstimationSettings, - precheck_settings: PrecheckSettings, - ) -> Self - where - E: Clone, - { - let contexts_by_entry_point = entry_points - .into_iter() - .map(|entry_point| { - ( - entry_point.address(), - EntryPointContext::new( - chain_id, - Arc::clone(&provider), - entry_point, - estimation_settings, - FeeEstimator::new( - Arc::clone(&provider), - chain_id, - precheck_settings.priority_fee_mode, - precheck_settings.bundle_priority_fee_overhead_percent, - ), - ), - ) - }) - .collect(); - + pub(crate) fn new(chain_spec: ChainSpec, router: EntryPointRouter, pool: P) -> Self { + let hc = hybrid_compute::HC_CONFIG.lock().unwrap().clone(); Self { - settings, - contexts_by_entry_point, - provider, - chain_id, + router, pool, + chain_spec, + hc, } } pub(crate) async fn send_user_operation( &self, - op: RpcUserOperation, + op: UserOperationVariant, entry_point: Address, ) -> EthResult { - if !self.contexts_by_entry_point.contains_key(&entry_point) { - return Err(EthRpcError::InvalidParams( - "supplied entry point addr is not a known entry point".to_string(), - )); - } println!("HC send_user_operation {:?}", op); + let bundle_size = op.single_uo_bundle_size_bytes(); + if bundle_size > self.chain_spec.max_transaction_size_bytes { + return Err(EthRpcError::InvalidParams(format!( + "User operation in bundle size {} exceeds max transaction size {}", + bundle_size, self.chain_spec.max_transaction_size_bytes + ))); + } + + self.router.check_and_get_route(&entry_point, &op)?; + self.pool - .add_op(entry_point, op.into()) + .add_op(entry_point, op) .await .map_err(EthRpcError::from) .log_on_error_level(Level::DEBUG, "failed to add op to the mempool") } - - // Verify that the trigger string came from the HCHelper contract async fn hc_verify_trigger( &self, - context:&EntryPointContext, + //context:&EntryPointContext, + entry_point: Address, op: UserOperationOptionalGas, key: H256, state_override: Option, ) -> bool { - let mut s2 = state_override.clone().unwrap_or_default(); - let hc_addr = self.settings.hc.helper_addr; + let mut s2 = state_override.clone().unwrap_or_default(); + let hc_addr = self.hc.helper_addr; // Set a 1-byte value which will trigger a special revert code let val_vrfy = "0xff00000000000000000000000000000000000000000000000000000000000002".parse::().unwrap(); s2.account(hc_addr).store(key, H256::from_slice(&val_vrfy)); - let result_v = context - .gas_estimator - .estimate_op_gas(op.clone(), s2.clone(), None) - .await; - println!("HC HC result_v {:?}", result_v); + let result_v = self.router + .estimate_gas(&entry_point, op.clone(), Some(s2), None) + .await; + + println!("HC result_v {:?}", result_v); match result_v { - Err(GasEstimationError::RevertInCallWithMessage(msg)) => { - if msg == "_HC_VRFY".to_string() { + Err(EthRpcError::ExecutionReverted(ref msg)) => { + if *msg == "_HC_VRFY".to_string() { return true; } } _ => {} } + false } @@ -223,41 +148,43 @@ where // with its data inserted into the HCHelper contract's state. async fn hc_simulate_response( &self, - context:&EntryPointContext, + //context:&EntryPointContext, + entry_point: Address, op: UserOperationOptionalGas, state_override: Option, revert_data: &Bytes, - ) -> Result { - + ) -> EthResult { let s2 = state_override.unwrap_or_default(); - - let es = EstimationSettings { - max_verification_gas: 0, - max_call_gas: 0, - max_simulate_handle_ops_gas: 0, - validation_estimation_gas_fee: 0, - }; - let hh = op.clone().into_user_operation(&es).op_hc_hash(); + //let es = EstimationSettings { + // max_verification_gas: 0, + // max_call_gas: 0, + // max_simulate_handle_ops_gas: 0, + // validation_estimation_gas_fee: 0, + //}; + let op6:rundler_types::v0_6::UserOperationOptionalGas = op.clone().into(); + + let hh = op6.clone().into_user_operation(U256::from(0),U256::from(0)).hc_hash(); println!("HC api.rs hh {:?}", hh); let ep_addr = hybrid_compute::hc_ep_addr(revert_data); - let n_key:U256 = op.nonce >> 64; - let at_price = op.max_priority_fee_per_gas; - let hc_nonce = context.gas_estimator.entry_point.get_nonce(op.sender, n_key).await.unwrap(); - let err_nonce = context.gas_estimator.entry_point.get_nonce(self.settings.hc.sys_account, n_key).await.unwrap(); - println!("HC hc_nonce {:?} op_nonce {:?} n_key {:?}", hc_nonce, op.nonce, n_key); - let p2 = eth::new_provider(&self.settings.hc.node_http, None)?; + let n_key:U256 = op6.nonce >> 64; + let at_price = op6.max_priority_fee_per_gas; + //let hc_nonce = context.gas_estimator.entry_point.get_nonce(op6.sender, n_key).await.unwrap(); + let hc_nonce = self.router.get_nonce(&entry_point, op6.sender, n_key).await.unwrap(); - let hx = HH2::new(self.settings.hc.helper_addr, p2.clone()); + let err_nonce = self.router.get_nonce(&entry_point, self.hc.sys_account, n_key).await.unwrap(); + println!("HC hc_nonce {:?} err_nonce {:?} op_nonce {:?} n_key {:?}", hc_nonce, err_nonce, op6.nonce, n_key); + let p2 = rundler_provider::new_provider(&self.hc.node_http, None)?; + + let hx = HH2::new(self.hc.helper_addr, p2.clone()); let url = hx.registered_callers(ep_addr).await.expect("url_decode").1; println!("HC registered_caller url {:?}", url); let cc = HttpClientBuilder::default().build(url); // could specify a request_timeout() here. if cc.is_err() { - return Err(GasEstimationError::RevertInValidation("Invalid URL registered for HC".to_string())); + return Err(EthRpcError::Internal(anyhow::anyhow!("Invalid URL registered for HC"))); } - let m = hex::encode(hybrid_compute::hc_selector(revert_data)); let sub_key = hybrid_compute::hc_sub_key(revert_data); let sk_hex = hex::encode(sub_key); @@ -268,17 +195,16 @@ where let payload = hex::encode(hybrid_compute::hc_req_payload(revert_data)); let n_bytes:[u8; 32] = (hc_nonce).into(); let src_n = hex::encode(n_bytes); - let src_addr = hex::encode(op.sender); + let src_addr = hex::encode(op6.sender); - let oo_n_key:U256 = U256::from_big_endian(op.sender.as_fixed_bytes()); - let oo_nonce = context.gas_estimator.entry_point.get_nonce(ep_addr, oo_n_key).await.unwrap(); + let oo_n_key:U256 = U256::from_big_endian(op6.sender.as_fixed_bytes()); + let oo_nonce = self.router.get_nonce(&entry_point, ep_addr, oo_n_key).await.unwrap(); let ha_owner = SimpleAccount::new(ep_addr, p2).owner().await; if ha_owner.is_err() { - return Err(GasEstimationError::RevertInValidation("Failed to look up HybridAccount owner".to_string())); + return Err(EthRpcError::Internal(anyhow::anyhow!("Failed to look up HybridAccount owner"))); } - const REQ_VERSION:&str = "0.2"; let mut params = ObjectParams::new(); @@ -304,7 +230,7 @@ where let hc_res:Bytes = hex::decode(resp_hex).unwrap().into(); //println!("HC api.rs do_op result sk {:?} success {:?} res {:?}", sub_key, op_success, hc_res); - err_hc = hybrid_compute::external_op(hh, op.sender, hc_nonce, op_success, &hc_res, sub_key, ep_addr, sig_hex, oo_nonce, map_key, &self.settings.hc, ha_owner.unwrap(), err_nonce).await; + err_hc = hybrid_compute::external_op(hh, op6.sender, hc_nonce, op_success, &hc_res, sub_key, ep_addr, sig_hex, oo_nonce, map_key, &self.hc, ha_owner.unwrap(), err_nonce).await; } else { err_hc = hybrid_compute::HcErr{code: 3, message:"HC03: Decode Error".to_string()}; } @@ -338,21 +264,29 @@ where if err_hc.code != 0 { println!("HC api.rs calling err_op {:?}", err_hc.message); - hybrid_compute::err_op(hh, context.gas_estimator.entry_point.address(), err_hc.clone(), sub_key, op.sender, hc_nonce, err_nonce, map_key, &self.settings.hc).await; + hybrid_compute::err_op(hh, entry_point, err_hc.clone(), sub_key, op6.sender, hc_nonce, err_nonce, map_key, &self.hc).await; } let s2 = hybrid_compute::get_hc_op_statediff(hh, s2); - let result2 = context - .gas_estimator - .estimate_op_gas(op, s2, None) + let result2 = self.router + .estimate_gas(&entry_point, op.clone(), Some(s2), None) .await; println!("HC result2 {:?}", result2); + let r3:RpcGasEstimateV0_6; if result2.is_ok() { println!("HC api.rs Ok gas result2 = {:?}", result2); - let r3 = result2.unwrap(); + let r3a = result2.unwrap(); + match r3a { + RpcGasEstimate::V0_6(abc) => { + r3 = abc; + }, + _ => { + return Err(EthRpcError::Internal(anyhow::anyhow!("HC04 user_op gas estimation failed"))); + } + } let op_tmp = hybrid_compute::get_hc_ent(hh).unwrap().user_op; - let op_tmp_2: UserOperationOptionalGas = UserOperationOptionalGas { + let op_tmp_2: rundler_types::v0_6::UserOperationOptionalGas = rundler_types::v0_6::UserOperationOptionalGas { sender: op_tmp.sender, nonce: op_tmp.nonce, init_code: op_tmp.init_code, @@ -369,33 +303,41 @@ where // The op_tmp_2 below specifies a 0 gas price, but we need to estimate the L1 fee at the // price offered by real userOperation which will be paying for it. - let r2a = context - .gas_estimator - .estimate_op_gas(op_tmp_2.clone(), spoof::State::default(), at_price) + let r2a = self.router + .estimate_gas(&entry_point, rundler_types::UserOperationOptionalGas::V0_6(op_tmp_2.clone()), Some(spoof::State::default()), at_price) .await; - if let Err(GasEstimationError::RevertInValidation(ref r2_err)) = r2a { + if let Err(EthRpcError::ExecutionReverted(ref r2_err)) = r2a { // FIXME println!("HC op_tmp_2 gas estimation failed (RevertInValidation)"); let msg = "HC04: Offchain validation failed: ".to_string() + &r2_err; - return Err(GasEstimationError::RevertInValidation(msg)); + return Err(EthRpcError::Internal(anyhow::anyhow!(msg))); }; - let r2 = r2a?; + + let r2:RpcGasEstimateV0_6; + match r2a? { + RpcGasEstimate::V0_6(abc) => { + r2 = abc; + }, + _ => { + return Err(EthRpcError::Internal(anyhow::anyhow!("HC04 offchain_op gas estimation failed"))); + } + } // The current formula used to estimate gas usage in the offchain_rpc service // sometimes underestimates the true cost. For now all we can do is error here. if r2.call_gas_limit > op_tmp_2.call_gas_limit.unwrap() { println!("HC op_tmp_2 failed, call_gas_limit too low"); let msg = "HC04: Offchain call_gas_limit too low".to_string(); - return Err(GasEstimationError::RevertInValidation(msg)); + return Err(EthRpcError::Internal(anyhow::anyhow!(msg))); } let offchain_gas = r2.pre_verification_gas + r2.verification_gas_limit + r2.call_gas_limit; let mut cleanup_keys:Vec = Vec::new(); cleanup_keys.push(map_key); - let c_nonce = context.gas_estimator.entry_point.get_nonce(self.settings.hc.sys_account, U256::zero()).await.unwrap(); - let cleanup_op = hybrid_compute::rr_op(&self.settings.hc, c_nonce, cleanup_keys.clone()).await; - let op_tmp_4: UserOperationOptionalGas = UserOperationOptionalGas { + let c_nonce = self.router.get_nonce(&entry_point, self.hc.sys_account, U256::zero()).await.unwrap(); + let cleanup_op = hybrid_compute::rr_op(&self.hc, c_nonce, cleanup_keys.clone()).await; + let op_tmp_4: rundler_types::v0_6::UserOperationOptionalGas = rundler_types::v0_6::UserOperationOptionalGas { sender: cleanup_op.sender, nonce: cleanup_op.nonce, init_code: cleanup_op.init_code, @@ -408,8 +350,18 @@ where paymaster_and_data: cleanup_op.paymaster_and_data, signature: cleanup_op.signature, }; - //println!("HC op_tmp_4 {:?} {:?}", op_tmp_4, cleanup_keys); - let r4 = context.gas_estimator.estimate_op_gas(op_tmp_4, spoof::State::default(), at_price).await?; + println!("HC op_tmp_4 {:?} {:?}", op_tmp_4, cleanup_keys); + let r4a = self.router.estimate_gas(&entry_point, rundler_types::UserOperationOptionalGas::V0_6(op_tmp_4), Some(spoof::State::default()), at_price).await; + let r4:RpcGasEstimateV0_6; + match r4a? { + RpcGasEstimate::V0_6(abc) => { + r4 = abc; + }, + _ => { + return Err(EthRpcError::Internal(anyhow::anyhow!("HC04 cleanup_op gas estimation failed"))); + } + } + let cleanup_gas = r4.pre_verification_gas + r4.verification_gas_limit + r4.call_gas_limit; let op_gas = r3.pre_verification_gas + r3.verification_gas_limit + r3.call_gas_limit; println!("HC api.rs offchain_gas estimate {:?} sum {:?}", r2, offchain_gas); @@ -422,20 +374,20 @@ where hybrid_compute::hc_set_pvg(hh, needed_pvg, offchain_gas + cleanup_gas + offchain_gas); if err_hc.code != 0 { - return Err(GasEstimationError::RevertInValidation(err_hc.message)); + return Err(EthRpcError::Internal(anyhow::anyhow!(err_hc.message))); } let total_gas = needed_pvg + (r3.verification_gas_limit + VG_PAD) + r3.call_gas_limit; if total_gas > U256::from(25_000_000) { // Approaching the block gas limit let err_msg:String = "Excessive HC total_gas estimate = ".to_owned() + &total_gas.to_string(); - return Err(GasEstimationError::RevertInValidation(err_msg)); + return Err(EthRpcError::Internal(anyhow::anyhow!(err_msg))); } - return Ok(GasEstimate { + return Ok(RpcGasEstimateV0_6 { pre_verification_gas: (needed_pvg + PVG_PAD), verification_gas_limit: r3.verification_gas_limit, call_gas_limit: r3.call_gas_limit, - }); + }.into()); } else { return result2; } @@ -446,243 +398,122 @@ where op: UserOperationOptionalGas, entry_point: Address, state_override: Option, - ) -> EthResult { - let context = self - .contexts_by_entry_point - .get(&entry_point) - .ok_or_else(|| { - EthRpcError::InvalidParams( - "supplied entry_point address is not a known entry point".to_string(), - ) - })?; - - //println!("HC api.rs Before estimate_gas {:?}", op); - let mut result = context - .gas_estimator - .estimate_op_gas(op.clone(), state_override.clone().unwrap_or_default(), None) + ) -> EthResult { + let bundle_size = op.single_uo_bundle_size_bytes(); + if bundle_size > self.chain_spec.max_transaction_size_bytes { + return Err(EthRpcError::InvalidParams(format!( + "User operation in bundle size {} exceeds max transaction size {}", + bundle_size, self.chain_spec.max_transaction_size_bytes + ))); + } + + let mut result = self.router + .estimate_gas(&entry_point, op.clone(), state_override.clone(), None) .await; + println!("HC api.rs estimate_gas result1 {:?}", result); match result { - Ok(_) => {} - Err(GasEstimationError::RevertInCallWithBytes(ref revert_data)) => { - if hybrid_compute::check_trigger(revert_data) { - let bn = self.provider.get_block_number().await.unwrap(); - println!("HC api.rs HC trigger at bn {}", bn); - - let map_key = hybrid_compute::hc_map_key(revert_data); - let key:H256 = hybrid_compute::hc_storage_key(map_key); - - if self.hc_verify_trigger(context, op.clone(), key, state_override.clone()).await { - result = self.hc_simulate_response(context, op, state_override, revert_data).await; - } else { - println!("HC did not get expected _HC_VRFY"); - let msg = "HC04: Failed to verify trigger event".to_owned(); - return Err(EthRpcError::Internal(anyhow::anyhow!(msg))); - } + Ok(ref estimate) => { + match estimate { + RpcGasEstimate::V0_6(estimate6) => { + return Ok(RpcGasEstimateV0_6{ + pre_verification_gas: estimate6.pre_verification_gas, + verification_gas_limit: estimate6.verification_gas_limit + VG_PAD, + call_gas_limit: estimate6.call_gas_limit, + }.into()); + }, + _ => {} + } + } + Err(EthRpcError::ExecutionRevertedWithBytes(ref r)) => { + if hybrid_compute::check_trigger(&r.revert_data) { + let bn = 0; //self.provider.get_block_number().await.unwrap(); + println!("HC api.rs HC trigger at bn {}", bn); + + let map_key = hybrid_compute::hc_map_key(&r.revert_data); + let key:H256 = hybrid_compute::hc_storage_key(map_key); + + if self.hc_verify_trigger(entry_point, op.clone(), key, state_override.clone()).await { + result = self.hc_simulate_response(entry_point, op, state_override, &r.revert_data).await; + } else { + println!("HC did not get expected _HC_VRFY"); + let msg = "HC04: Failed to verify trigger event".to_owned(); + return Err(EthRpcError::Internal(anyhow::anyhow!(msg))); + } + } } - } - Err(_) => {} + Err(_) => {} } - - match result { - Ok(estimate) => Ok(GasEstimate { - pre_verification_gas: estimate.pre_verification_gas, - verification_gas_limit: estimate.verification_gas_limit + VG_PAD, - call_gas_limit: estimate.call_gas_limit, - }), - Err(GasEstimationError::RevertInValidation(message)) => { - Err(EthRpcError::EntryPointValidationRejected(message))? - } - Err(GasEstimationError::RevertInCallWithMessage(message)) => { - Err(EthRpcError::ExecutionReverted(message))? - } - Err(GasEstimationError::RevertInCallWithBytes(b)) => { - Err(EthRpcError::ExecutionRevertedWithBytes( - ExecutionRevertedWithBytesData { revert_data: b }, - ))? - } - Err(GasEstimationError::Other(error)) => Err(error)?, - } + result } pub(crate) async fn get_user_operation_by_hash( &self, hash: H256, - ) -> EthResult> { + ) -> EthResult> { if hash == H256::zero() { return Err(EthRpcError::InvalidParams( "Missing/invalid userOpHash".to_string(), )); } - // check for the user operation both in the pool and mined on chain - let mined_fut = self.get_mined_user_operation_by_hash(hash); - let pending_fut = self.get_pending_user_operation_by_hash(hash); - let (mined, pending) = tokio::join!(mined_fut, pending_fut); - - // mined takes precedence over pending - if let Ok(Some(mined)) = mined { - Ok(Some(mined)) - } else if let Ok(Some(pending)) = pending { - Ok(Some(pending)) - } else if mined.is_err() || pending.is_err() { - // if either futures errored, and the UO was not found, return the errors - Err(EthRpcError::Internal(anyhow::anyhow!( - "error fetching user operation by hash: mined: {:?}, pending: {:?}", - mined.err().map(|e| e.to_string()).unwrap_or_default(), - pending.err().map(|e| e.to_string()).unwrap_or_default(), - ))) - } else { - // not found in either pool or mined - Ok(None) + // check both entry points and pending for the user operation event + #[allow(clippy::type_complexity)] + let mut futs: Vec< + Pin>> + Send>>, + > = vec![]; + + for ep in self.router.entry_points() { + futs.push(Box::pin(self.router.get_mined_by_hash(ep, hash))); } + futs.push(Box::pin(self.get_pending_user_operation_by_hash(hash))); + + let results = future::try_join_all(futs).await?; + Ok(results.into_iter().find_map(|x| x)) } pub(crate) async fn get_user_operation_receipt( &self, hash: H256, - ) -> EthResult> { + ) -> EthResult> { if hash == H256::zero() { return Err(EthRpcError::InvalidParams( "Missing/invalid userOpHash".to_string(), )); } - // Get event associated with hash (need to check all entry point addresses associated with this API) - let log = self - .get_user_operation_event_by_hash(hash) - .await - .context("should have fetched user ops by hash")?; + let futs = self + .router + .entry_points() + .map(|ep| self.router.get_receipt(ep, hash)); - let Some(log) = log else { return Ok(None) }; - let entry_point = log.address; - - // If the event is found, get the TX receipt - let tx_hash = log.transaction_hash.context("tx_hash should be present")?; - let tx_receipt = self - .provider - .get_transaction_receipt(tx_hash) - .await - .context("should have fetched tx receipt")? - .context("Failed to fetch tx receipt")?; - - // Return null if the tx isn't included in the block yet - if tx_receipt.block_hash.is_none() && tx_receipt.block_number.is_none() { - return Ok(None); - } - - // Filter receipt logs to match just those belonging to the user op - let filtered_logs = - EthApi::::filter_receipt_logs_matching_user_op(&log, &tx_receipt) - .context("should have found receipt logs matching user op")?; - - // Decode log and find failure reason if not success - let uo_event = self - .decode_user_operation_event(log) - .context("should have decoded user operation event")?; - let reason: String = if uo_event.success { - "".to_owned() - } else { - EthApi::::get_user_operation_failure_reason(&tx_receipt.logs, hash) - .context("should have found revert reason if tx wasn't successful")? - .unwrap_or_default() - }; - - Ok(Some(UserOperationReceipt { - user_op_hash: hash, - entry_point: entry_point.into(), - sender: uo_event.sender.into(), - nonce: uo_event.nonce, - paymaster: uo_event.paymaster.into(), - actual_gas_cost: uo_event.actual_gas_cost, - actual_gas_used: uo_event.actual_gas_used, - success: uo_event.success, - logs: filtered_logs, - receipt: tx_receipt, - reason, - })) + let results = future::try_join_all(futs).await?; + Ok(results.into_iter().find_map(|x| x)) } pub(crate) async fn supported_entry_points(&self) -> EthResult> { Ok(self - .contexts_by_entry_point - .keys() + .router + .entry_points() .map(|ep| to_checksum(ep, None)) .collect()) } pub(crate) async fn chain_id(&self) -> EthResult { - Ok(self.chain_id.into()) - } - - async fn get_mined_user_operation_by_hash( - &self, - hash: H256, - ) -> EthResult> { - // Get event associated with hash (need to check all entry point addresses associated with this API) - let event = self - .get_user_operation_event_by_hash(hash) - .await - .log_on_error("should have successfully queried for user op events by hash")?; - - let Some(event) = event else { return Ok(None) }; - - // If the event is found, get the TX and entry point - let transaction_hash = event - .transaction_hash - .context("tx_hash should be present")?; - - let tx = self - .provider - .get_transaction(transaction_hash) - .await - .context("should have fetched tx from provider")? - .context("should have found tx")?; - - // We should return null if the tx isn't included in the block yet - if tx.block_hash.is_none() && tx.block_number.is_none() { - return Ok(None); - } - let to = tx - .to - .context("tx.to should be present on transaction containing user operation event")?; - - // Find first op matching the hash - let user_operation = if self.contexts_by_entry_point.contains_key(&to) { - self.get_user_operations_from_tx_data(tx.input) - .into_iter() - .find(|op| op.op_hash(to, self.chain_id) == hash) - .context("matching user operation should be found in tx data")? - } else { - self.trace_find_user_operation(transaction_hash, hash) - .await - .context("error running trace")? - .context("should have found user operation in trace")? - }; - - Ok(Some(RichUserOperation { - user_operation: user_operation.into(), - entry_point: event.address.into(), - block_number: Some( - tx.block_number - .map(|n| U256::from(n.as_u64())) - .unwrap_or_default(), - ), - block_hash: Some(tx.block_hash.unwrap_or_default()), - transaction_hash: Some(transaction_hash), - })) + Ok(self.chain_spec.id.into()) } async fn get_pending_user_operation_by_hash( &self, hash: H256, - ) -> EthResult> { + ) -> EthResult> { let res = self .pool .get_op_by_hash(hash) .await .map_err(EthRpcError::from)?; - Ok(res.map(|op| RichUserOperation { + + Ok(res.map(|op| RpcUserOperationByHash { user_operation: op.uo.into(), entry_point: op.entry_point.into(), block_number: None, @@ -690,349 +521,50 @@ where transaction_hash: None, })) } - - async fn get_user_operation_event_by_hash(&self, hash: H256) -> EthResult> { - let to_block = self.provider.get_block_number().await?; - - let from_block = match self.settings.user_operation_event_block_distance { - Some(distance) => to_block.saturating_sub(distance), - None => 0, - }; - - let filter = Filter::new() - .address::>( - self.contexts_by_entry_point - .iter() - .map(|ep| *ep.0) - .collect(), - ) - .event(&UserOperationEventFilter::abi_signature()) - .from_block(from_block) - .to_block(to_block) - .topic1(hash); - - let logs = self.provider.get_logs(&filter).await?; - Ok(logs.into_iter().next()) - } - - fn get_user_operations_from_tx_data(&self, tx_data: Bytes) -> Vec { - let entry_point_calls = match IEntryPointCalls::decode(tx_data) { - Ok(entry_point_calls) => entry_point_calls, - Err(_) => return vec![], - }; - - match entry_point_calls { - IEntryPointCalls::HandleOps(handle_ops_call) => handle_ops_call.ops, - IEntryPointCalls::HandleAggregatedOps(handle_aggregated_ops_call) => { - handle_aggregated_ops_call - .ops_per_aggregator - .into_iter() - .flat_map(|ops| ops.user_ops) - .collect() - } - _ => vec![], - } - } - - fn decode_user_operation_event(&self, log: Log) -> EthResult { - Ok(UserOperationEventFilter::decode_log(&log_to_raw_log(log)) - .context("log should be a user operation event")?) - } - - /// This method takes a user operation event and a transaction receipt and filters out all the logs - /// relevant to the user operation. Since there are potentially many user operations in a transaction, - /// we want to find all the logs (including the user operation event itself) that are sandwiched between - /// ours and the one before it that wasn't ours. - /// eg. reference_log: UserOp(hash_moldy) logs: \[...OtherLogs, UserOp(hash1), ...OtherLogs, UserOp(hash_moldy), ...OtherLogs\] - /// -> logs: logs\[(idx_of_UserOp(hash1) + 1)..=idx_of_UserOp(hash_moldy)\] - /// - /// topic\[0\] == event name - /// topic\[1\] == user operation hash - /// - /// NOTE: we can't convert just decode all the logs as user operations and filter because we still want all the other log types - /// - fn filter_receipt_logs_matching_user_op( - reference_log: &Log, - tx_receipt: &TransactionReceipt, - ) -> EthResult> { - let mut start_idx = 0; - let mut end_idx = tx_receipt.logs.len() - 1; - let logs = &tx_receipt.logs; - - let is_ref_user_op = |log: &Log| { - log.topics[0] == reference_log.topics[0] - && log.topics[1] == reference_log.topics[1] - && log.address == reference_log.address - }; - - let is_user_op_event = |log: &Log| log.topics[0] == reference_log.topics[0]; - - let mut i = 0; - while i < logs.len() { - if i < end_idx && is_user_op_event(&logs[i]) && !is_ref_user_op(&logs[i]) { - start_idx = i; - } else if is_ref_user_op(&logs[i]) { - end_idx = i; - } - - i += 1; - } - - if !is_ref_user_op(&logs[end_idx]) { - return Err(EthRpcError::Internal(anyhow::anyhow!( - "fatal: no user ops found in tx receipt ({start_idx},{end_idx})" - ))); - } - - let start_idx = if start_idx == 0 { 0 } else { start_idx + 1 }; - Ok(logs[start_idx..=end_idx].to_vec()) - } - - fn get_user_operation_failure_reason( - logs: &[Log], - user_op_hash: H256, - ) -> EthResult> { - let revert_reason_evt: Option = logs - .iter() - .filter(|l| l.topics.len() > 1 && l.topics[1] == user_op_hash) - .map_while(|l| { - UserOperationRevertReasonFilter::decode_log(&RawLog { - topics: l.topics.clone(), - data: l.data.to_vec(), - }) - .ok() - }) - .next(); - - Ok(revert_reason_evt.map(|r| r.revert_reason.to_string())) - } - - /// This method takes a transaction hash and a user operation hash and returns the full user operation if it exists. - /// This is meant to be used when a user operation event is found in the logs of a transaction, but the top level call - /// wasn't to an entrypoint, so we need to trace the transaction to find the user operation by inspecting each call frame - /// and returning the user operation that matches the hash. - async fn trace_find_user_operation( - &self, - tx_hash: H256, - user_op_hash: H256, - ) -> EthResult> { - // initial call wasn't to an entrypoint, so we need to trace the transaction to find the user operation - let trace_options = GethDebugTracingOptions { - tracer: Some(GethDebugTracerType::BuiltInTracer( - GethDebugBuiltInTracerType::CallTracer, - )), - ..Default::default() - }; - println!("HC trace_find_user_operation pre"); - let trace = self - .provider - .debug_trace_transaction(tx_hash, trace_options) - .await - .context("should have fetched trace from provider")?; - println!("HC trace_find_user_operation post {:?}", trace); - - // breadth first search for the user operation in the trace - let mut frame_queue = VecDeque::new(); - - if let GethTrace::Known(GethTraceFrame::CallTracer(call_frame)) = trace { - frame_queue.push_back(call_frame); - } - - while let Some(call_frame) = frame_queue.pop_front() { - // check if the call is to an entrypoint, if not enqueue the child calls if any - if let Some(to) = call_frame - .to - .as_ref() - .and_then(|to| to.as_address()) - .filter(|to| self.contexts_by_entry_point.contains_key(to)) - { - // check if the user operation is in the call frame - if let Some(uo) = self - .get_user_operations_from_tx_data(call_frame.input) - .into_iter() - .find(|op| op.op_hash(*to, self.chain_id) == user_op_hash) - { - return Ok(Some(uo)); - } - } else if let Some(calls) = call_frame.calls { - frame_queue.extend(calls) - } - } - - Ok(None) - } } #[cfg(test)] mod tests { + use std::sync::Arc; + use ethers::{ abi::AbiEncode, - types::{Log, Transaction, TransactionReceipt}, - utils::keccak256, + types::{Bytes, Log, Transaction}, }; use mockall::predicate::eq; - use rundler_pool::{MockPoolServer, PoolOperation}; - use rundler_provider::{MockEntryPoint, MockProvider}; - use rundler_sim::PriorityFeeMode; - use rundler_types::contracts::i_entry_point::HandleOpsCall; + use rundler_provider::{MockEntryPointV0_6, MockProvider}; + use rundler_sim::MockGasEstimator; + use rundler_types::{ + contracts::v0_6::i_entry_point::{HandleOpsCall, IEntryPointCalls}, + pool::{MockPool, PoolOperation}, + v0_6::UserOperation, + EntityInfos, UserOperation as UserOperationTrait, ValidTimeRange, + }; use super::*; - - const UO_OP_TOPIC: &str = "user-op-event-topic"; - - #[test] - fn test_filter_receipt_logs_when_at_beginning_of_list() { - let reference_log = given_log(UO_OP_TOPIC, "moldy-hash"); - let receipt = given_receipt(vec![ - given_log("other-topic", "some-hash"), - reference_log.clone(), - given_log(UO_OP_TOPIC, "other-hash"), - given_log(UO_OP_TOPIC, "another-hash"), - ]); - - let result = - EthApi::::filter_receipt_logs_matching_user_op( - &reference_log, - &receipt, - ); - - assert!(result.is_ok(), "{}", result.unwrap_err()); - let result = result.unwrap(); - assert_eq!(result, receipt.logs[0..=1]); - } - - #[test] - fn test_filter_receipt_logs_when_in_middle_of_list() { - let reference_log = given_log(UO_OP_TOPIC, "moldy-hash"); - let receipt = given_receipt(vec![ - given_log("other-topic", "some-hash"), - given_log(UO_OP_TOPIC, "other-hash"), - given_log("another-topic", "some-hash"), - given_log("another-topic-2", "some-hash"), - reference_log.clone(), - given_log(UO_OP_TOPIC, "another-hash"), - ]); - - let result = - EthApi::::filter_receipt_logs_matching_user_op( - &reference_log, - &receipt, - ); - - assert!(result.is_ok(), "{}", result.unwrap_err()); - let result = result.unwrap(); - assert_eq!(result, receipt.logs[2..=4]); - } - - #[test] - fn test_filter_receipt_logs_when_at_end_of_list() { - let reference_log = given_log(UO_OP_TOPIC, "moldy-hash"); - let receipt = given_receipt(vec![ - given_log("other-topic", "some-hash"), - given_log(UO_OP_TOPIC, "other-hash"), - given_log(UO_OP_TOPIC, "another-hash"), - given_log("another-topic", "some-hash"), - given_log("another-topic-2", "some-hash"), - reference_log.clone(), - ]); - - let result = - EthApi::::filter_receipt_logs_matching_user_op( - &reference_log, - &receipt, - ); - - assert!(result.is_ok(), "{}", result.unwrap_err()); - let result = result.unwrap(); - assert_eq!(result, receipt.logs[3..=5]); - } - - #[test] - fn test_filter_receipt_logs_skips_event_from_different_address() { - let reference_log = given_log(UO_OP_TOPIC, "moldy-hash"); - let mut reference_log_w_different_address = reference_log.clone(); - reference_log_w_different_address.address = Address::from_low_u64_be(0x1234); - - let receipt = given_receipt(vec![ - given_log("other-topic", "some-hash"), - given_log(UO_OP_TOPIC, "other-hash"), - given_log(UO_OP_TOPIC, "another-hash"), - reference_log_w_different_address, - given_log("another-topic", "some-hash"), - given_log("another-topic-2", "some-hash"), - reference_log.clone(), - ]); - - let result = - EthApi::::filter_receipt_logs_matching_user_op( - &reference_log, - &receipt, - ); - - assert!(result.is_ok(), "{}", result.unwrap_err()); - let result = result.unwrap(); - assert_eq!(result, receipt.logs[4..=6]); - } - - #[test] - fn test_filter_receipt_logs_includes_multiple_sets_of_ref_uo() { - let reference_log = given_log(UO_OP_TOPIC, "moldy-hash"); - - let receipt = given_receipt(vec![ - given_log("other-topic", "some-hash"), - given_log(UO_OP_TOPIC, "other-hash"), - given_log("other-topic-2", "another-hash"), - reference_log.clone(), - given_log("another-topic", "some-hash"), - given_log("another-topic-2", "some-hash"), - reference_log.clone(), - given_log(UO_OP_TOPIC, "other-hash"), - ]); - - let result = - EthApi::::filter_receipt_logs_matching_user_op( - &reference_log, - &receipt, - ); - - assert!(result.is_ok(), "{}", result.unwrap_err()); - let result = result.unwrap(); - assert_eq!(result, receipt.logs[2..=6]); - } - - #[test] - fn test_filter_receipt_logs_when_not_found() { - let reference_log = given_log(UO_OP_TOPIC, "moldy-hash"); - let receipt = given_receipt(vec![ - given_log("other-topic", "some-hash"), - given_log(UO_OP_TOPIC, "other-hash"), - given_log(UO_OP_TOPIC, "another-hash"), - given_log("another-topic", "some-hash"), - given_log("another-topic-2", "some-hash"), - ]); - - let result = - EthApi::::filter_receipt_logs_matching_user_op( - &reference_log, - &receipt, - ); - - assert!(result.is_err(), "{:?}", result.unwrap()); - } + use crate::eth::{ + EntryPointRouteImpl, EntryPointRouterBuilder, UserOperationEventProviderV0_6, + }; #[tokio::test] async fn test_get_user_op_by_hash_pending() { let ep = Address::random(); let uo = UserOperation::default(); - let hash = uo.op_hash(ep, 1); + let hash = uo.hash(ep, 1); let po = PoolOperation { - uo: uo.clone(), + uo: uo.clone().into(), entry_point: ep, - ..Default::default() + aggregator: None, + valid_time_range: ValidTimeRange::default(), + expected_code_hash: H256::random(), + sim_block_hash: H256::random(), + sim_block_number: 1000, + account_is_staked: false, + entity_infos: EntityInfos::default(), }; - let mut pool = MockPoolServer::default(); + let mut pool = MockPool::default(); pool.expect_get_op_by_hash() .with(eq(hash)) .times(1) @@ -1042,13 +574,13 @@ mod tests { provider.expect_get_logs().returning(move |_| Ok(vec![])); provider.expect_get_block_number().returning(|| Ok(1000)); - let mut entry_point = MockEntryPoint::default(); + let mut entry_point = MockEntryPointV0_6::default(); entry_point.expect_address().returning(move || ep); - let api = create_api(provider, entry_point, pool); + let api = create_api(provider, entry_point, pool, MockGasEstimator::default()); let res = api.get_user_operation_by_hash(hash).await.unwrap(); - let ro = RichUserOperation { - user_operation: uo.into(), + let ro = RpcUserOperationByHash { + user_operation: UserOperationVariant::from(uo).into(), entry_point: ep.into(), block_number: None, block_hash: None, @@ -1059,13 +591,17 @@ mod tests { #[tokio::test] async fn test_get_user_op_by_hash_mined() { - let ep = Address::random(); + let cs = ChainSpec { + id: 1, + ..Default::default() + }; + let ep = cs.entry_point_address_v0_6; let uo = UserOperation::default(); - let hash = uo.op_hash(ep, 1); + let hash = uo.hash(ep, 1); let block_number = 1000; let block_hash = H256::random(); - let mut pool = MockPoolServer::default(); + let mut pool = MockPool::default(); pool.expect_get_op_by_hash() .with(eq(hash)) .returning(move |_| Ok(None)); @@ -1101,13 +637,13 @@ mod tests { .with(eq(tx_hash)) .returning(move |_| Ok(Some(tx.clone()))); - let mut entry_point = MockEntryPoint::default(); + let mut entry_point = MockEntryPointV0_6::default(); entry_point.expect_address().returning(move || ep); - let api = create_api(provider, entry_point, pool); + let api = create_api(provider, entry_point, pool, MockGasEstimator::default()); let res = api.get_user_operation_by_hash(hash).await.unwrap(); - let ro = RichUserOperation { - user_operation: uo.into(), + let ro = RpcUserOperationByHash { + user_operation: UserOperationVariant::from(uo).into(), entry_point: ep.into(), block_number: Some(block_number.into()), block_hash: Some(block_hash), @@ -1120,9 +656,9 @@ mod tests { async fn test_get_user_op_by_hash_not_found() { let ep = Address::random(); let uo = UserOperation::default(); - let hash = uo.op_hash(ep, 1); + let hash = uo.hash(ep, 1); - let mut pool = MockPoolServer::default(); + let mut pool = MockPool::default(); pool.expect_get_op_by_hash() .with(eq(hash)) .times(1) @@ -1132,64 +668,40 @@ mod tests { provider.expect_get_logs().returning(move |_| Ok(vec![])); provider.expect_get_block_number().returning(|| Ok(1000)); - let mut entry_point = MockEntryPoint::default(); + let mut entry_point = MockEntryPointV0_6::default(); entry_point.expect_address().returning(move || ep); - let api = create_api(provider, entry_point, pool); + let api = create_api(provider, entry_point, pool, MockGasEstimator::default()); let res = api.get_user_operation_by_hash(hash).await.unwrap(); assert_eq!(res, None); } - fn given_log(topic_0: &str, topic_1: &str) -> Log { - Log { - topics: vec![ - keccak256(topic_0.as_bytes()).into(), - keccak256(topic_1.as_bytes()).into(), - ], - ..Default::default() - } - } - - fn given_receipt(logs: Vec) -> TransactionReceipt { - TransactionReceipt { - logs, - ..Default::default() - } - } - fn create_api( provider: MockProvider, - ep: MockEntryPoint, - pool: MockPoolServer, - ) -> EthApi { - let mut contexts_by_entry_point = HashMap::new(); + ep: MockEntryPointV0_6, + pool: MockPool, + gas_estimator: MockGasEstimator, + ) -> EthApi { + let ep = Arc::new(ep); let provider = Arc::new(provider); - contexts_by_entry_point.insert( - ep.address(), - EntryPointContext::new( - 1, - Arc::clone(&provider), - ep, - EstimationSettings { - max_verification_gas: 1_000_000, - max_call_gas: 1_000_000, - max_simulate_handle_ops_gas: 1_000_000, - validation_estimation_gas_fee: 1_000_000_000_000, - }, - FeeEstimator::new( - Arc::clone(&provider), - 1, - PriorityFeeMode::BaseFeePercent(0), - 0, - ), - ), - ); + let chain_spec = ChainSpec { + id: 1, + ..Default::default() + }; + + let router = EntryPointRouterBuilder::default() + .v0_6(EntryPointRouteImpl::new( + ep.clone(), + gas_estimator, + UserOperationEventProviderV0_6::new(chain_spec.clone(), provider.clone(), None), + )) + .build(); + EthApi { - contexts_by_entry_point, - provider, - chain_id: 1, + router, + chain_spec, pool, - settings: Settings::new(None), + hc: hybrid_compute::HC_CONFIG.lock().unwrap().clone(), } } } diff --git a/crates/rpc/src/eth/error.rs b/crates/rpc/src/eth/error.rs index 5b77fc71..183a139e 100644 --- a/crates/rpc/src/eth/error.rs +++ b/crates/rpc/src/eth/error.rs @@ -11,15 +11,19 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -use ethers::types::{Address, Bytes, Opcode, U256}; +use std::fmt::Display; + +use ethers::types::{Address, Bytes, U256}; use jsonrpsee::types::{ error::{CALL_EXECUTION_FAILED_CODE, INTERNAL_ERROR_CODE, INVALID_PARAMS_CODE}, ErrorObjectOwned, }; -use rundler_pool::{MempoolError, PoolServerError}; use rundler_provider::ProviderError; -use rundler_sim::{PrecheckViolation, SimulationViolation}; -use rundler_types::{Entity, EntityType, Timestamp}; +use rundler_sim::GasEstimationError; +use rundler_types::{ + pool::{MempoolError, PoolError, PrecheckViolation, SimulationViolation}, + Entity, EntityType, Opcode, Timestamp, ValidationRevert, +}; use serde::Serialize; use crate::error::{rpc_err, rpc_err_with_data}; @@ -38,6 +42,7 @@ const THROTTLED_OR_BANNED_CODE: i32 = -32504; const STAKE_TOO_LOW_CODE: i32 = -32505; const UNSUPORTED_AGGREGATOR_CODE: i32 = -32506; const SIGNATURE_CHECK_FAILED_CODE: i32 = -32507; +const PAYMASTER_DEPOSIT_TOO_LOW: i32 = -32508; const EXECUTION_REVERTED: i32 = -32521; pub(crate) type EthResult = Result; @@ -69,12 +74,18 @@ pub enum EthRpcError { /// Sender address used as different entity in another UserOperation currently in the mempool. #[error("The sender address {0} is used as a different entity in another UserOperation currently in mempool")] SenderAddressUsedAsAlternateEntity(Address), + /// Simulation ran out of gas + #[error("Simulation ran out of gas for entity: {0}")] + OutOfGas(Entity), /// Opcode violation #[error("{0} uses banned opcode: {1:?}")] OpcodeViolation(EntityType, Opcode), /// Used for other simulation violations that map to Opcode Violations #[error("{0}")] OpcodeViolationMap(SimulationViolation), + /// Associated storage accessed during deployment with unstaked factory or accessing entity + #[error("Sender storage at (address: {1:?} slot: {2:#032x}) accessed during deployment. Factory (or {0:?}) must be staked")] + AssociatedStorageDuringDeploy(Option, Address, U256), /// Invalid storage access, maps to Opcode Violation #[error("{0} accesses inaccessible storage at address: {1:?} slot: {2:#032x}")] InvalidStorageAccess(EntityType, Address, U256), @@ -82,8 +93,8 @@ pub enum EthRpcError { #[error("operation is out of time range")] OutOfTimeRange(OutOfTimeRangeData), /// Max operations reached for this sender - #[error("Max operations ({0}) reached for sender {1:#032x} due to being unstaked")] - MaxOperationsReached(usize, Address), + #[error("Max operations ({0}) reached for {1} due to being unstaked")] + MaxOperationsReached(usize, Entity), /// Entity throttled or banned #[error("{} {:#032x} throttled or banned", .0.kind, .0.address)] ThrottledOrBanned(Entity), @@ -108,10 +119,16 @@ pub enum EthRpcError { /// Other internal errors #[error("Invalid UserOp signature or paymaster signature")] SignatureCheckFailed, + #[error("Invalid account signature")] + AccountSignatureCheckFailed, + #[error("Invalid paymaster signature")] + PaymasterSignatureCheckFailed, #[error("precheck failed: {0}")] PrecheckFailed(PrecheckViolation), #[error("validation simulation failed: {0}")] SimulationFailed(SimulationViolation), + #[error("validation reverted: {0}")] + ValidationRevert(ValidationRevertData), #[error("{0}")] ExecutionReverted(String), #[error("execution reverted")] @@ -138,7 +155,8 @@ pub struct OutOfTimeRangeData { #[derive(Debug, Clone, Serialize)] #[serde(rename_all = "camelCase")] pub struct StakeTooLowData { - entity: Entity, + needs_stake: Entity, + accessing_entity: EntityType, accessed_address: Address, accessed_entity: Option, slot: U256, @@ -148,7 +166,8 @@ pub struct StakeTooLowData { impl StakeTooLowData { pub fn new( - entity: Entity, + needs_stake: Entity, + accessing_entity: EntityType, accessed_address: Address, accessed_entity: Option, slot: U256, @@ -156,7 +175,8 @@ impl StakeTooLowData { minimum_unstake_delay: U256, ) -> Self { Self { - entity, + needs_stake, + accessing_entity, accessed_address, accessed_entity, slot, @@ -166,6 +186,52 @@ impl StakeTooLowData { } } +#[derive(Debug, Clone, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct ValidationRevertData { + reason: Option, + inner_reason: Option, + revert_data: Option, +} + +impl Display for ValidationRevertData { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if let Some(reason) = &self.reason { + write!(f, "[reason]: {}", reason)?; + } + if let Some(inner_reason) = &self.inner_reason { + write!(f, " | [inner reason]: {}", inner_reason)?; + } + Ok(()) + } +} + +impl From for ValidationRevertData { + fn from(value: ValidationRevert) -> Self { + match value { + ValidationRevert::EntryPoint(reason) => Self { + reason: Some(reason), + inner_reason: None, + revert_data: None, + }, + ValidationRevert::Operation { + entry_point_reason, + inner_revert_data, + inner_revert_reason, + } => Self { + reason: Some(entry_point_reason), + inner_reason: inner_revert_reason, + revert_data: Some(inner_revert_data), + }, + ValidationRevert::Unknown(data) => Self { + reason: None, + inner_reason: None, + revert_data: Some(data), + }, + } + } +} + #[derive(Debug, Clone, Serialize)] #[serde(rename_all = "camelCase")] pub struct ReplacementUnderpricedData { @@ -193,14 +259,14 @@ pub struct ExecutionRevertedWithBytesData { pub revert_data: Bytes, } -impl From for EthRpcError { - fn from(value: PoolServerError) -> Self { +impl From for EthRpcError { + fn from(value: PoolError) -> Self { match value { - PoolServerError::MempoolError(e) => e.into(), - PoolServerError::UnexpectedResponse => { - EthRpcError::Internal(anyhow::anyhow!("unexpected response from pool server")) + PoolError::MempoolError(e) => e.into(), + PoolError::UnexpectedResponse => { + Self::Internal(anyhow::anyhow!("unexpected response from pool server")) } - PoolServerError::Other(e) => EthRpcError::Internal(e), + PoolError::Other(e) => Self::Internal(e), } } } @@ -208,41 +274,40 @@ impl From for EthRpcError { impl From for EthRpcError { fn from(value: MempoolError) -> Self { match value { - MempoolError::Other(e) => EthRpcError::Internal(e), - MempoolError::OperationAlreadyKnown => EthRpcError::OperationAlreadyKnown, + MempoolError::Other(e) => Self::Internal(e), + MempoolError::OperationAlreadyKnown => Self::OperationAlreadyKnown, MempoolError::ReplacementUnderpriced(priority_fee, fee) => { - EthRpcError::ReplacementUnderpriced(ReplacementUnderpricedData { + Self::ReplacementUnderpriced(ReplacementUnderpricedData { current_max_priority_fee: priority_fee, current_max_fee: fee, }) } MempoolError::MaxOperationsReached(count, address) => { - EthRpcError::MaxOperationsReached(count, address) - } - MempoolError::EntityThrottled(entity) => EthRpcError::ThrottledOrBanned(entity), - MempoolError::MultipleRolesViolation(entity) => { - EthRpcError::MultipleRolesViolation(entity) + Self::MaxOperationsReached(count, address) } + MempoolError::EntityThrottled(entity) => Self::ThrottledOrBanned(entity), + MempoolError::MultipleRolesViolation(entity) => Self::MultipleRolesViolation(entity), MempoolError::PaymasterBalanceTooLow(current_balance, required_balance) => { - EthRpcError::PaymasterBalanceTooLow(current_balance, required_balance) + Self::PaymasterBalanceTooLow(current_balance, required_balance) } MempoolError::AssociatedStorageIsAlternateSender => { - EthRpcError::AssociatedStorageIsAlternateSender + Self::AssociatedStorageIsAlternateSender } MempoolError::SenderAddressUsedAsAlternateEntity(address) => { - EthRpcError::SenderAddressUsedAsAlternateEntity(address) + Self::SenderAddressUsedAsAlternateEntity(address) } MempoolError::DiscardedOnInsert => { - EthRpcError::OperationRejected("discarded on insert".to_owned()) + Self::OperationRejected("discarded on insert".to_owned()) } MempoolError::PrecheckViolation(violation) => violation.into(), MempoolError::SimulationViolation(violation) => violation.into(), MempoolError::UnsupportedAggregator(a) => { - EthRpcError::UnsupportedAggregator(UnsupportedAggregatorData { aggregator: a }) + Self::UnsupportedAggregator(UnsupportedAggregatorData { aggregator: a }) } MempoolError::UnknownEntryPoint(a) => { - EthRpcError::EntryPointValidationRejected(format!("unknown entry point: {}", a)) + Self::EntryPointValidationRejected(format!("unknown entry point: {}", a)) } + MempoolError::OperationDropTooSoon(_, _, _) => Self::InvalidParams(value.to_string()), } } } @@ -257,6 +322,8 @@ impl From for EthRpcError { fn from(value: SimulationViolation) -> Self { match value { SimulationViolation::InvalidSignature => Self::SignatureCheckFailed, + SimulationViolation::InvalidAccountSignature => Self::AccountSignatureCheckFailed, + SimulationViolation::InvalidPaymasterSignature => Self::PaymasterSignatureCheckFailed, SimulationViolation::UnintendedRevertWithMessage( EntityType::Paymaster, reason, @@ -273,18 +340,23 @@ impl From for EthRpcError { } SimulationViolation::UsedForbiddenPrecompile(_, _, _) | SimulationViolation::AccessedUndeployedContract(_, _) + | SimulationViolation::AccessedUnsupportedContractType(_, _) | SimulationViolation::CalledBannedEntryPointMethod(_) | SimulationViolation::CallHadValue(_) => Self::OpcodeViolationMap(value), SimulationViolation::FactoryCalledCreate2Twice(_) => { Self::OpcodeViolation(EntityType::Factory, Opcode::CREATE2) } SimulationViolation::UnstakedPaymasterContext => Self::UnstakedPaymasterContext, + SimulationViolation::AssociatedStorageDuringDeploy(e, s) => { + Self::AssociatedStorageDuringDeploy(e.map(|e| e.kind), s.address, s.slot) + } SimulationViolation::InvalidStorageAccess(entity, slot) => { Self::InvalidStorageAccess(entity.kind, slot.address, slot.slot) } SimulationViolation::NotStaked(stake_data) => { Self::StakeTooLow(Box::new(StakeTooLowData::new( - stake_data.entity, + stake_data.needs_stake, + stake_data.accessing_entity, stake_data.accessed_address, stake_data.accessed_entity, stake_data.slot, @@ -293,6 +365,8 @@ impl From for EthRpcError { ))) } SimulationViolation::AggregatorValidationFailed => Self::SignatureCheckFailed, + SimulationViolation::OutOfGas(entity) => Self::OutOfGas(entity), + SimulationViolation::ValidationRevert(revert) => Self::ValidationRevert(revert.into()), _ => Self::SimulationFailed(value), } } @@ -305,21 +379,22 @@ impl From for ErrorObjectOwned { match error { EthRpcError::Internal(_) => rpc_err(INTERNAL_ERROR_CODE, msg), EthRpcError::InvalidParams(_) => rpc_err(INVALID_PARAMS_CODE, msg), - EthRpcError::EntryPointValidationRejected(_) => { + EthRpcError::EntryPointValidationRejected(_) | EthRpcError::SimulationFailed(_) => { rpc_err(ENTRYPOINT_VALIDATION_REJECTED_CODE, msg) } EthRpcError::PaymasterValidationRejected(data) => { rpc_err_with_data(PAYMASTER_VALIDATION_REJECTED_CODE, msg, data) } + EthRpcError::PaymasterBalanceTooLow(_, _) => rpc_err(PAYMASTER_DEPOSIT_TOO_LOW, msg), EthRpcError::OpcodeViolation(_, _) | EthRpcError::OpcodeViolationMap(_) - | EthRpcError::SimulationFailed(_) + | EthRpcError::OutOfGas(_) | EthRpcError::UnstakedAggregator | EthRpcError::MultipleRolesViolation(_) | EthRpcError::UnstakedPaymasterContext | EthRpcError::SenderAddressUsedAsAlternateEntity(_) - | EthRpcError::PaymasterBalanceTooLow(_, _) | EthRpcError::AssociatedStorageIsAlternateSender + | EthRpcError::AssociatedStorageDuringDeploy(_, _, _) | EthRpcError::InvalidStorageAccess(_, _, _) => rpc_err(OPCODE_VIOLATION_CODE, msg), EthRpcError::OutOfTimeRange(data) => { rpc_err_with_data(OUT_OF_TIME_RANGE_CODE, msg, data) @@ -336,12 +411,19 @@ impl From for ErrorObjectOwned { } EthRpcError::OperationAlreadyKnown => rpc_err(INVALID_PARAMS_CODE, msg), EthRpcError::MaxOperationsReached(_, _) => rpc_err(STAKE_TOO_LOW_CODE, msg), - EthRpcError::SignatureCheckFailed => rpc_err(SIGNATURE_CHECK_FAILED_CODE, msg), + EthRpcError::SignatureCheckFailed + | EthRpcError::AccountSignatureCheckFailed + | EthRpcError::PaymasterSignatureCheckFailed => { + rpc_err(SIGNATURE_CHECK_FAILED_CODE, msg) + } EthRpcError::PrecheckFailed(_) => rpc_err(CALL_EXECUTION_FAILED_CODE, msg), EthRpcError::ExecutionReverted(_) => rpc_err(EXECUTION_REVERTED, msg), EthRpcError::ExecutionRevertedWithBytes(data) => { rpc_err_with_data(EXECUTION_REVERTED, msg, data) } + EthRpcError::ValidationRevert(data) => { + rpc_err_with_data(ENTRYPOINT_VALIDATION_REJECTED_CODE, msg, data) + } EthRpcError::OperationRejected(_) => rpc_err(INVALID_PARAMS_CODE, msg), } } @@ -349,7 +431,7 @@ impl From for ErrorObjectOwned { impl From for EthRpcError { fn from(status: tonic::Status) -> Self { - EthRpcError::Internal(anyhow::anyhow!( + Self::Internal(anyhow::anyhow!( "internal server error code: {} message: {}", status.code(), status.message() @@ -359,6 +441,30 @@ impl From for EthRpcError { impl From for EthRpcError { fn from(e: ProviderError) -> Self { - EthRpcError::Internal(anyhow::anyhow!("provider error: {e:?}")) + Self::Internal(anyhow::anyhow!("provider error: {e:?}")) + } +} + +impl From for EthRpcError { + fn from(e: GasEstimationError) -> Self { + match e { + GasEstimationError::RevertInValidation(revert) => Self::ValidationRevert(revert.into()), + GasEstimationError::RevertInCallWithMessage(message) => { + Self::ExecutionReverted(message) + } + GasEstimationError::RevertInCallWithBytes(b) => { + Self::ExecutionRevertedWithBytes(ExecutionRevertedWithBytesData { revert_data: b }) + } + error @ GasEstimationError::GasUsedTooLarge => { + Self::EntryPointValidationRejected(error.to_string()) + } + error @ GasEstimationError::GasTotalTooLarge(_, _) => { + Self::InvalidParams(error.to_string()) + } + error @ GasEstimationError::GasFieldTooLarge(_, _) => { + Self::InvalidParams(error.to_string()) + } + GasEstimationError::Other(error) => Self::Internal(error), + } } } diff --git a/crates/rpc/src/eth/events/common.rs b/crates/rpc/src/eth/events/common.rs new file mode 100644 index 00000000..ebc1aaa5 --- /dev/null +++ b/crates/rpc/src/eth/events/common.rs @@ -0,0 +1,257 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use std::{collections::VecDeque, marker::PhantomData, sync::Arc}; + +use anyhow::Context; +use ethers::{ + prelude::EthEvent, + types::{ + Address, Bytes, Filter, GethDebugBuiltInTracerType, GethDebugTracerType, + GethDebugTracingOptions, GethTrace, GethTraceFrame, Log, TransactionReceipt, H256, U256, + }, +}; +use rundler_provider::Provider; +use rundler_types::{chain::ChainSpec, UserOperation, UserOperationVariant}; +use rundler_utils::{eth, log::LogOnError}; + +use super::UserOperationEventProvider; +use crate::types::{RpcUserOperationByHash, RpcUserOperationReceipt}; + +#[derive(Debug)] +pub(crate) struct UserOperationEventProviderImpl { + chain_spec: ChainSpec, + provider: Arc

, + event_block_distance: Option, + _f_type: PhantomData, +} + +pub(crate) trait EntryPointFilters: Send + Sync + 'static { + type UO: UserOperation + Into; + type UserOperationEventFilter: EthEvent; + type UserOperationRevertReasonFilter: EthEvent; + + fn construct_receipt( + event: Self::UserOperationEventFilter, + hash: H256, + entry_point: Address, + logs: Vec, + tx_receipt: TransactionReceipt, + ) -> RpcUserOperationReceipt; + + fn get_user_operations_from_tx_data(tx_data: Bytes, chain_spec: &ChainSpec) -> Vec; + + fn address(chain_spec: &ChainSpec) -> Address; +} + +#[async_trait::async_trait] +impl UserOperationEventProvider for UserOperationEventProviderImpl +where + P: Provider, + F: EntryPointFilters, +{ + async fn get_mined_by_hash( + &self, + hash: H256, + ) -> anyhow::Result> { + // Get event associated with hash (need to check all entry point addresses associated with this API) + let event = self + .get_event_by_hash(hash) + .await + .log_on_error("should have successfully queried for user op events by hash")?; + + let Some(event) = event else { return Ok(None) }; + + // If the event is found, get the TX and entry point + let transaction_hash = event + .transaction_hash + .context("tx_hash should be present")?; + + let tx = self + .provider + .get_transaction(transaction_hash) + .await + .context("should have fetched tx from provider")? + .context("should have found tx")?; + + // We should return null if the tx isn't included in the block yet + if tx.block_hash.is_none() && tx.block_number.is_none() { + return Ok(None); + } + let to = tx + .to + .context("tx.to should be present on transaction containing user operation event")?; + + // Find first op matching the hash + let user_operation = if F::address(&self.chain_spec) == to { + F::get_user_operations_from_tx_data(tx.input, &self.chain_spec) + .into_iter() + .find(|op| op.hash(to, self.chain_spec.id) == hash) + .context("matching user operation should be found in tx data")? + } else { + self.trace_find_user_operation(transaction_hash, hash) + .await + .context("error running trace")? + .context("should have found user operation in trace")? + }; + + Ok(Some(RpcUserOperationByHash { + user_operation: user_operation.into().into(), + entry_point: event.address.into(), + block_number: Some( + tx.block_number + .map(|n| U256::from(n.as_u64())) + .unwrap_or_default(), + ), + block_hash: Some(tx.block_hash.unwrap_or_default()), + transaction_hash: Some(transaction_hash), + })) + } + + async fn get_receipt(&self, hash: H256) -> anyhow::Result> { + let event = self + .get_event_by_hash(hash) + .await + .log_on_error("should have successfully queried for user op events by hash")?; + let Some(event) = event else { return Ok(None) }; + + let entry_point = event.address; + + let tx_hash = event + .transaction_hash + .context("tx_hash should be present")?; + + // get transaction receipt + let tx_receipt = self + .provider + .get_transaction_receipt(tx_hash) + .await + .context("should have fetched tx receipt")? + .context("Failed to fetch tx receipt")?; + + // filter receipt logs + let filtered_logs = super::filter_receipt_logs_matching_user_op(&event, &tx_receipt) + .context("should have found receipt logs matching user op")?; + + // decode uo event + let uo_event = self + .decode_user_operation_event(event) + .context("should have decoded user operation event")?; + + Ok(Some(F::construct_receipt( + uo_event, + hash, + entry_point, + filtered_logs, + tx_receipt, + ))) + } +} + +impl UserOperationEventProviderImpl +where + P: Provider, + F: EntryPointFilters, +{ + pub(crate) fn new( + chain_spec: ChainSpec, + provider: Arc

, + event_block_distance: Option, + ) -> Self { + Self { + chain_spec, + provider, + event_block_distance, + _f_type: PhantomData, + } + } + + async fn get_event_by_hash(&self, hash: H256) -> anyhow::Result> { + let to_block = self.provider.get_block_number().await?; + + let from_block = match self.event_block_distance { + Some(distance) => to_block.saturating_sub(distance), + None => 0, + }; + + let filter = Filter::new() + .address(F::address(&self.chain_spec)) + .event(&F::UserOperationEventFilter::abi_signature()) + .from_block(from_block) + .to_block(to_block) + .topic1(hash); + + let logs = self.provider.get_logs(&filter).await?; + Ok(logs.into_iter().next()) + } + + fn decode_user_operation_event(&self, log: Log) -> anyhow::Result { + F::UserOperationEventFilter::decode_log(ð::log_to_raw_log(log)) + .context("log should be a user operation event") + } + + /// This method takes a transaction hash and a user operation hash and returns the full user operation if it exists. + /// This is meant to be used when a user operation event is found in the logs of a transaction, but the top level call + /// wasn't to an entrypoint, so we need to trace the transaction to find the user operation by inspecting each call frame + /// and returning the user operation that matches the hash. + async fn trace_find_user_operation( + &self, + tx_hash: H256, + user_op_hash: H256, + ) -> anyhow::Result> { + // initial call wasn't to an entrypoint, so we need to trace the transaction to find the user operation + let trace_options = GethDebugTracingOptions { + tracer: Some(GethDebugTracerType::BuiltInTracer( + GethDebugBuiltInTracerType::CallTracer, + )), + ..Default::default() + }; + println!("HC trace_find_user_operation pre"); + let trace = self + .provider + .debug_trace_transaction(tx_hash, trace_options) + .await + .context("should have fetched trace from provider")?; + println!("HC trace_find_user_operation post {:?}", trace); + + // breadth first search for the user operation in the trace + let mut frame_queue = VecDeque::new(); + + if let GethTrace::Known(GethTraceFrame::CallTracer(call_frame)) = trace { + frame_queue.push_back(call_frame); + } + + while let Some(call_frame) = frame_queue.pop_front() { + // check if the call is to an entrypoint, if not enqueue the child calls if any + if let Some(to) = call_frame + .to + .as_ref() + .and_then(|to| to.as_address()) + .filter(|to| **to == F::address(&self.chain_spec)) + { + // check if the user operation is in the call frame + if let Some(uo) = + F::get_user_operations_from_tx_data(call_frame.input, &self.chain_spec) + .into_iter() + .find(|op| op.hash(*to, self.chain_spec.id) == user_op_hash) + { + return Ok(Some(uo)); + } + } else if let Some(calls) = call_frame.calls { + frame_queue.extend(calls) + } + } + + Ok(None) + } +} diff --git a/crates/rpc/src/eth/events/mod.rs b/crates/rpc/src/eth/events/mod.rs new file mode 100644 index 00000000..eba1f184 --- /dev/null +++ b/crates/rpc/src/eth/events/mod.rs @@ -0,0 +1,222 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use anyhow::bail; +use ethers::types::{Log, TransactionReceipt, H256}; + +use crate::types::{RpcUserOperationByHash, RpcUserOperationReceipt}; + +mod common; + +mod v0_6; +pub(crate) use v0_6::UserOperationEventProviderV0_6; +mod v0_7; +pub(crate) use v0_7::UserOperationEventProviderV0_7; + +#[async_trait::async_trait] +pub(crate) trait UserOperationEventProvider: Send + Sync + 'static { + async fn get_mined_by_hash(&self, hash: H256) + -> anyhow::Result>; + + async fn get_receipt(&self, hash: H256) -> anyhow::Result>; +} + +// This method takes a user operation event and a transaction receipt and filters out all the logs +// relevant to the user operation. Since there are potentially many user operations in a transaction, +// we want to find all the logs (including the user operation event itself) that are sandwiched between +// ours and the one before it that wasn't ours. +// eg. reference_log: UserOp(hash_moldy) logs: \[...OtherLogs, UserOp(hash1), ...OtherLogs, UserOp(hash_moldy), ...OtherLogs\] +// -> logs: logs\[(idx_of_UserOp(hash1) + 1)..=idx_of_UserOp(hash_moldy)\] +// +// topic\[0\] == event name +// topic\[1\] == user operation hash +// +// NOTE: we can't convert just decode all the logs as user operations and filter because we still want all the other log types +// +fn filter_receipt_logs_matching_user_op( + reference_log: &Log, + tx_receipt: &TransactionReceipt, +) -> anyhow::Result> { + let mut start_idx = 0; + let mut end_idx = tx_receipt.logs.len() - 1; + let logs = &tx_receipt.logs; + + let is_ref_user_op = |log: &Log| { + log.topics[0] == reference_log.topics[0] + && log.topics[1] == reference_log.topics[1] + && log.address == reference_log.address + }; + + let is_user_op_event = |log: &Log| log.topics[0] == reference_log.topics[0]; + + let mut i = 0; + while i < logs.len() { + if i < end_idx && is_user_op_event(&logs[i]) && !is_ref_user_op(&logs[i]) { + start_idx = i; + } else if is_ref_user_op(&logs[i]) { + end_idx = i; + } + + i += 1; + } + + if !is_ref_user_op(&logs[end_idx]) { + bail!("fatal: no user ops found in tx receipt ({start_idx},{end_idx})"); + } + + let start_idx = if start_idx == 0 { 0 } else { start_idx + 1 }; + Ok(logs[start_idx..=end_idx].to_vec()) +} + +#[cfg(test)] +mod tests { + + use ethers::{types::Address, utils::keccak256}; + + use super::*; + + const UO_OP_TOPIC: &str = "user-op-event-topic"; + + #[test] + fn test_filter_receipt_logs_when_at_beginning_of_list() { + let reference_log = given_log(UO_OP_TOPIC, "moldy-hash"); + let receipt = given_receipt(vec![ + given_log("other-topic", "some-hash"), + reference_log.clone(), + given_log(UO_OP_TOPIC, "other-hash"), + given_log(UO_OP_TOPIC, "another-hash"), + ]); + + let result = filter_receipt_logs_matching_user_op(&reference_log, &receipt); + + assert!(result.is_ok(), "{}", result.unwrap_err()); + let result = result.unwrap(); + assert_eq!(result, receipt.logs[0..=1]); + } + + #[test] + fn test_filter_receipt_logs_when_in_middle_of_list() { + let reference_log = given_log(UO_OP_TOPIC, "moldy-hash"); + let receipt = given_receipt(vec![ + given_log("other-topic", "some-hash"), + given_log(UO_OP_TOPIC, "other-hash"), + given_log("another-topic", "some-hash"), + given_log("another-topic-2", "some-hash"), + reference_log.clone(), + given_log(UO_OP_TOPIC, "another-hash"), + ]); + + let result = filter_receipt_logs_matching_user_op(&reference_log, &receipt); + + assert!(result.is_ok(), "{}", result.unwrap_err()); + let result = result.unwrap(); + assert_eq!(result, receipt.logs[2..=4]); + } + + #[test] + fn test_filter_receipt_logs_when_at_end_of_list() { + let reference_log = given_log(UO_OP_TOPIC, "moldy-hash"); + let receipt = given_receipt(vec![ + given_log("other-topic", "some-hash"), + given_log(UO_OP_TOPIC, "other-hash"), + given_log(UO_OP_TOPIC, "another-hash"), + given_log("another-topic", "some-hash"), + given_log("another-topic-2", "some-hash"), + reference_log.clone(), + ]); + + let result = filter_receipt_logs_matching_user_op(&reference_log, &receipt); + + assert!(result.is_ok(), "{}", result.unwrap_err()); + let result = result.unwrap(); + assert_eq!(result, receipt.logs[3..=5]); + } + + #[test] + fn test_filter_receipt_logs_skips_event_from_different_address() { + let reference_log = given_log(UO_OP_TOPIC, "moldy-hash"); + let mut reference_log_w_different_address = reference_log.clone(); + reference_log_w_different_address.address = Address::from_low_u64_be(0x1234); + + let receipt = given_receipt(vec![ + given_log("other-topic", "some-hash"), + given_log(UO_OP_TOPIC, "other-hash"), + given_log(UO_OP_TOPIC, "another-hash"), + reference_log_w_different_address, + given_log("another-topic", "some-hash"), + given_log("another-topic-2", "some-hash"), + reference_log.clone(), + ]); + + let result = filter_receipt_logs_matching_user_op(&reference_log, &receipt); + + assert!(result.is_ok(), "{}", result.unwrap_err()); + let result = result.unwrap(); + assert_eq!(result, receipt.logs[4..=6]); + } + + #[test] + fn test_filter_receipt_logs_includes_multiple_sets_of_ref_uo() { + let reference_log = given_log(UO_OP_TOPIC, "moldy-hash"); + + let receipt = given_receipt(vec![ + given_log("other-topic", "some-hash"), + given_log(UO_OP_TOPIC, "other-hash"), + given_log("other-topic-2", "another-hash"), + reference_log.clone(), + given_log("another-topic", "some-hash"), + given_log("another-topic-2", "some-hash"), + reference_log.clone(), + given_log(UO_OP_TOPIC, "other-hash"), + ]); + + let result = filter_receipt_logs_matching_user_op(&reference_log, &receipt); + + assert!(result.is_ok(), "{}", result.unwrap_err()); + let result = result.unwrap(); + assert_eq!(result, receipt.logs[2..=6]); + } + + #[test] + fn test_filter_receipt_logs_when_not_found() { + let reference_log = given_log(UO_OP_TOPIC, "moldy-hash"); + let receipt = given_receipt(vec![ + given_log("other-topic", "some-hash"), + given_log(UO_OP_TOPIC, "other-hash"), + given_log(UO_OP_TOPIC, "another-hash"), + given_log("another-topic", "some-hash"), + given_log("another-topic-2", "some-hash"), + ]); + + let result = filter_receipt_logs_matching_user_op(&reference_log, &receipt); + + assert!(result.is_err(), "{:?}", result.unwrap()); + } + + fn given_log(topic_0: &str, topic_1: &str) -> Log { + Log { + topics: vec![ + keccak256(topic_0.as_bytes()).into(), + keccak256(topic_1.as_bytes()).into(), + ], + ..Default::default() + } + } + + fn given_receipt(logs: Vec) -> TransactionReceipt { + TransactionReceipt { + logs, + ..Default::default() + } + } +} diff --git a/crates/rpc/src/eth/events/v0_6.rs b/crates/rpc/src/eth/events/v0_6.rs new file mode 100644 index 00000000..ff09d985 --- /dev/null +++ b/crates/rpc/src/eth/events/v0_6.rs @@ -0,0 +1,105 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use ethers::{ + abi::{AbiDecode, RawLog}, + prelude::EthEvent, + types::{Address, Bytes, Log, TransactionReceipt, H256}, +}; +use rundler_types::{ + chain::ChainSpec, + contracts::v0_6::i_entry_point::{ + IEntryPointCalls, UserOperationEventFilter, UserOperationRevertReasonFilter, + }, + v0_6::UserOperation, +}; + +use super::common::{EntryPointFilters, UserOperationEventProviderImpl}; +use crate::types::RpcUserOperationReceipt; + +pub(crate) type UserOperationEventProviderV0_6

= + UserOperationEventProviderImpl; + +pub(crate) struct EntryPointFiltersV0_6; + +impl EntryPointFilters for EntryPointFiltersV0_6 { + type UO = UserOperation; + type UserOperationEventFilter = UserOperationEventFilter; + type UserOperationRevertReasonFilter = UserOperationRevertReasonFilter; + + fn construct_receipt( + event: Self::UserOperationEventFilter, + hash: H256, + entry_point: Address, + logs: Vec, + tx_receipt: TransactionReceipt, + ) -> RpcUserOperationReceipt { + // get failure reason + let reason: String = if event.success { + "".to_owned() + } else { + let revert_reason_evt: Option = logs + .iter() + .filter(|l| l.topics.len() > 1 && l.topics[1] == hash) + .map_while(|l| { + Self::UserOperationRevertReasonFilter::decode_log(&RawLog { + topics: l.topics.clone(), + data: l.data.to_vec(), + }) + .ok() + }) + .next(); + + revert_reason_evt + .map(|r| r.revert_reason.to_string()) + .unwrap_or_default() + }; + + RpcUserOperationReceipt { + user_op_hash: hash, + entry_point: entry_point.into(), + sender: event.sender.into(), + nonce: event.nonce, + paymaster: event.paymaster.into(), + actual_gas_cost: event.actual_gas_cost, + actual_gas_used: event.actual_gas_used, + success: event.success, + logs, + receipt: tx_receipt, + reason, + } + } + + fn get_user_operations_from_tx_data(tx_data: Bytes, _chain_spec: &ChainSpec) -> Vec { + let entry_point_calls = match IEntryPointCalls::decode(tx_data) { + Ok(entry_point_calls) => entry_point_calls, + Err(_) => return vec![], + }; + + match entry_point_calls { + IEntryPointCalls::HandleOps(handle_ops_call) => handle_ops_call.ops, + IEntryPointCalls::HandleAggregatedOps(handle_aggregated_ops_call) => { + handle_aggregated_ops_call + .ops_per_aggregator + .into_iter() + .flat_map(|ops| ops.user_ops) + .collect() + } + _ => vec![], + } + } + + fn address(chain_spec: &ChainSpec) -> Address { + chain_spec.entry_point_address_v0_6 + } +} diff --git a/crates/rpc/src/eth/events/v0_7.rs b/crates/rpc/src/eth/events/v0_7.rs new file mode 100644 index 00000000..b43d5cf1 --- /dev/null +++ b/crates/rpc/src/eth/events/v0_7.rs @@ -0,0 +1,109 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use ethers::{ + abi::{AbiDecode, RawLog}, + prelude::EthEvent, + types::{Address, Bytes, Log, TransactionReceipt, H256}, +}; +use rundler_types::{ + chain::ChainSpec, + contracts::v0_7::i_entry_point::{ + IEntryPointCalls, UserOperationEventFilter, UserOperationRevertReasonFilter, + }, + v0_7::UserOperation, +}; + +use super::common::{EntryPointFilters, UserOperationEventProviderImpl}; +use crate::types::RpcUserOperationReceipt; + +pub(crate) type UserOperationEventProviderV0_7

= + UserOperationEventProviderImpl; + +pub(crate) struct EntryPointFiltersV0_7; + +impl EntryPointFilters for EntryPointFiltersV0_7 { + type UO = UserOperation; + type UserOperationEventFilter = UserOperationEventFilter; + type UserOperationRevertReasonFilter = UserOperationRevertReasonFilter; + + fn construct_receipt( + event: Self::UserOperationEventFilter, + hash: H256, + entry_point: Address, + logs: Vec, + tx_receipt: TransactionReceipt, + ) -> RpcUserOperationReceipt { + // get failure reason + let reason: String = if event.success { + "".to_owned() + } else { + let revert_reason_evt: Option = logs + .iter() + .filter(|l| l.topics.len() > 1 && l.topics[1] == hash) + .map_while(|l| { + Self::UserOperationRevertReasonFilter::decode_log(&RawLog { + topics: l.topics.clone(), + data: l.data.to_vec(), + }) + .ok() + }) + .next(); + + revert_reason_evt + .map(|r| r.revert_reason.to_string()) + .unwrap_or_default() + }; + + RpcUserOperationReceipt { + user_op_hash: hash, + entry_point: entry_point.into(), + sender: event.sender.into(), + nonce: event.nonce, + paymaster: event.paymaster.into(), + actual_gas_cost: event.actual_gas_cost, + actual_gas_used: event.actual_gas_used, + success: event.success, + logs, + receipt: tx_receipt, + reason, + } + } + + fn get_user_operations_from_tx_data(tx_data: Bytes, chain_spec: &ChainSpec) -> Vec { + let entry_point_calls = match IEntryPointCalls::decode(tx_data) { + Ok(entry_point_calls) => entry_point_calls, + Err(_) => return vec![], + }; + + match entry_point_calls { + IEntryPointCalls::HandleOps(handle_ops_call) => handle_ops_call + .ops + .into_iter() + .map(|op| op.unpack(chain_spec)) + .collect(), + IEntryPointCalls::HandleAggregatedOps(handle_aggregated_ops_call) => { + handle_aggregated_ops_call + .ops_per_aggregator + .into_iter() + .flat_map(|ops| ops.user_ops.into_iter().map(|op| op.unpack(chain_spec))) + .collect() + } + _ => vec![], + } + } + + fn address(chain_spec: &ChainSpec) -> Address { + chain_spec.entry_point_address_v0_7 + } +} diff --git a/crates/rpc/src/eth/mod.rs b/crates/rpc/src/eth/mod.rs index bdbe6a41..82151fcf 100644 --- a/crates/rpc/src/eth/mod.rs +++ b/crates/rpc/src/eth/mod.rs @@ -15,14 +15,22 @@ mod api; pub(crate) use api::EthApi; pub use api::Settings as EthApiSettings; +mod router; +pub(crate) use router::*; + mod error; +pub(crate) use error::{EthResult, EthRpcError}; +mod events; +pub(crate) use events::{UserOperationEventProviderV0_6, UserOperationEventProviderV0_7}; mod server; use ethers::types::{spoof, Address, H256, U64}; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use rundler_sim::{GasEstimate, UserOperationOptionalGas}; -use crate::types::{RichUserOperation, RpcUserOperation, UserOperationReceipt}; +use crate::types::{ + RpcGasEstimate, RpcUserOperation, RpcUserOperationByHash, RpcUserOperationOptionalGas, + RpcUserOperationReceipt, +}; /// Eth API #[rpc(client, server, namespace = "eth")] @@ -40,21 +48,24 @@ pub trait EthApi { #[method(name = "estimateUserOperationGas")] async fn estimate_user_operation_gas( &self, - op: UserOperationOptionalGas, + op: RpcUserOperationOptionalGas, entry_point: Address, state_override: Option, - ) -> RpcResult; + ) -> RpcResult; /// Returns the user operation with the given hash. #[method(name = "getUserOperationByHash")] - async fn get_user_operation_by_hash(&self, hash: H256) -> RpcResult>; + async fn get_user_operation_by_hash( + &self, + hash: H256, + ) -> RpcResult>; /// Returns the user operation receipt with the given hash. #[method(name = "getUserOperationReceipt")] async fn get_user_operation_receipt( &self, hash: H256, - ) -> RpcResult>; + ) -> RpcResult>; /// Returns the supported entry points addresses #[method(name = "supportedEntryPoints")] diff --git a/crates/rpc/src/eth/router.rs b/crates/rpc/src/eth/router.rs new file mode 100644 index 00000000..a52ecb59 --- /dev/null +++ b/crates/rpc/src/eth/router.rs @@ -0,0 +1,365 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use std::{fmt::Debug, marker::PhantomData, sync::Arc}; + +use ethers::types::{spoof, Address, H256, U256}; +use rundler_provider::{EntryPoint, SimulationProvider}; +use rundler_sim::{GasEstimationError, GasEstimator}; +use rundler_types::{ + EntryPointVersion, GasEstimate, UserOperation, UserOperationOptionalGas, UserOperationVariant, +}; + +use super::events::UserOperationEventProvider; +use crate::{ + eth::{error::EthResult, EthRpcError}, + types::{ + RpcGasEstimate, RpcGasEstimateV0_6, RpcGasEstimateV0_7, RpcUserOperationByHash, + RpcUserOperationReceipt, + }, +}; + +#[derive(Default)] +pub(crate) struct EntryPointRouterBuilder { + entry_points: Vec

, + v0_6: Option<(Address, Arc)>, + v0_7: Option<(Address, Arc)>, +} + +impl EntryPointRouterBuilder { + pub(crate) fn v0_6(mut self, route: R) -> Self + where + R: EntryPointRoute, + { + if route.version() != EntryPointVersion::V0_6 { + panic!( + "Invalid entry point version for route: {:?}", + route.version() + ); + } + + self.entry_points.push(route.address()); + self.v0_6 = Some((route.address(), Arc::new(route))); + self + } + + pub(crate) fn v0_7(mut self, route: R) -> Self + where + R: EntryPointRoute, + { + if route.version() != EntryPointVersion::V0_7 { + panic!( + "Invalid entry point version for route: {:?}", + route.version() + ); + } + + self.entry_points.push(route.address()); + self.v0_7 = Some((route.address(), Arc::new(route))); + self + } + + pub(crate) fn build(self) -> EntryPointRouter { + EntryPointRouter { + entry_points: self.entry_points, + v0_6: self.v0_6, + v0_7: self.v0_7, + } + } +} + +#[derive(Clone)] +pub(crate) struct EntryPointRouter { + entry_points: Vec
, + v0_6: Option<(Address, Arc)>, + v0_7: Option<(Address, Arc)>, +} + +impl EntryPointRouter { + pub(crate) fn entry_points(&self) -> impl Iterator { + self.entry_points.iter() + } + + pub(crate) fn check_and_get_route( + &self, + entry_point: &Address, + uo: &UserOperationVariant, + ) -> EthResult<&Arc> { + match self.get_ep_version(entry_point)? { + EntryPointVersion::V0_6 => { + if !matches!(uo, UserOperationVariant::V0_6(_)) { + return Err(EthRpcError::InvalidParams(format!( + "Invalid user operation for entry point: {:?}", + entry_point + ))); + } + Ok(&self.v0_6.as_ref().unwrap().1) + } + EntryPointVersion::V0_7 => { + if !matches!(uo, UserOperationVariant::V0_7(_)) { + return Err(EthRpcError::InvalidParams(format!( + "Invalid user operation for entry point: {:?}", + entry_point + ))); + } + Ok(&self.v0_7.as_ref().unwrap().1) + } + EntryPointVersion::Unspecified => unreachable!("unspecified entry point version"), + } + } + + pub(crate) async fn get_mined_by_hash( + &self, + entry_point: &Address, + hash: H256, + ) -> EthResult> { + self.get_route(entry_point)? + .get_mined_by_hash(hash) + .await + .map_err(Into::into) + } + + pub(crate) async fn get_receipt( + &self, + entry_point: &Address, + hash: H256, + ) -> EthResult> { + self.get_route(entry_point)? + .get_receipt(hash) + .await + .map_err(Into::into) + } + + pub(crate) async fn estimate_gas( + &self, + entry_point: &Address, + uo: UserOperationOptionalGas, + state_override: Option, + at_price: Option, + ) -> EthResult { + match self.get_ep_version(entry_point)? { + EntryPointVersion::V0_6 => { + if !matches!(uo, UserOperationOptionalGas::V0_6(_)) { + return Err(EthRpcError::InvalidParams(format!( + "Invalid user operation for entry point: {:?}", + entry_point + ))); + } + + let e = self + .v0_6 + .as_ref() + .unwrap() + .1 + .estimate_gas(uo, state_override, at_price) + .await?; + + Ok(RpcGasEstimateV0_6::from(e).into()) + } + EntryPointVersion::V0_7 => { + if !matches!(uo, UserOperationOptionalGas::V0_7(_)) { + return Err(EthRpcError::InvalidParams(format!( + "Invalid user operation for entry point: {:?}", + entry_point + ))); + } + + let e = self + .v0_7 + .as_ref() + .unwrap() + .1 + .estimate_gas(uo, state_override, at_price) + .await?; + + Ok(RpcGasEstimateV0_7::from(e).into()) + } + EntryPointVersion::Unspecified => unreachable!("unspecified entry point version"), + } + } + + pub(crate) async fn get_nonce( + &self, + entry_point: &Address, + addr: Address, + key: U256, + ) -> EthResult { + + self.get_route(entry_point)? + .get_nonce(addr,key) + .await + .map_err(Into::into) + //Ok(U256::from(0)) + } + + pub(crate) async fn check_signature( + &self, + entry_point: &Address, + uo: UserOperationVariant, + max_verification_gas: u64, + ) -> EthResult { + self.check_and_get_route(entry_point, &uo)? + .check_signature(uo, max_verification_gas) + .await + .map_err(Into::into) + } + + fn get_ep_version(&self, entry_point: &Address) -> EthResult { + if let Some((addr, _)) = self.v0_6 { + if addr == *entry_point { + return Ok(EntryPointVersion::V0_6); + } + } + if let Some((addr, _)) = self.v0_7 { + if addr == *entry_point { + return Ok(EntryPointVersion::V0_7); + } + } + + Err(EthRpcError::InvalidParams(format!( + "No entry point found for address: {:?}", + entry_point + ))) + } + + fn get_route(&self, entry_point: &Address) -> EthResult<&Arc> { + let ep = self.get_ep_version(entry_point)?; + + match ep { + EntryPointVersion::V0_6 => Ok(&self.v0_6.as_ref().unwrap().1), + EntryPointVersion::V0_7 => Ok(&self.v0_7.as_ref().unwrap().1), + EntryPointVersion::Unspecified => unreachable!("unspecified entry point version"), + } + } +} + +#[async_trait::async_trait] +pub(crate) trait EntryPointRoute: Send + Sync + 'static { + fn version(&self) -> EntryPointVersion; + + fn address(&self) -> Address; + + async fn get_mined_by_hash(&self, hash: H256) + -> anyhow::Result>; + + async fn get_receipt(&self, hash: H256) -> anyhow::Result>; + + async fn estimate_gas( + &self, + uo: UserOperationOptionalGas, + state_override: Option, + at_price: Option, + ) -> Result; + + async fn get_nonce( + &self, + addr: Address, + key: U256, + ) -> anyhow::Result; + + async fn check_signature( + &self, + uo: UserOperationVariant, + max_verification_gas: u64, + ) -> anyhow::Result; +} + +#[derive(Debug)] +pub(crate) struct EntryPointRouteImpl { + entry_point: E, + gas_estimator: G, + event_provider: EV, + _uo_type: PhantomData, +} + +#[async_trait::async_trait] +impl EntryPointRoute for EntryPointRouteImpl +where + UO: UserOperation + From, + E: EntryPoint + SimulationProvider, + G: GasEstimator, + G::UserOperationOptionalGas: From, + EV: UserOperationEventProvider, +{ + fn version(&self) -> EntryPointVersion { + UO::entry_point_version() + } + + fn address(&self) -> Address { + self.entry_point.address() + } + + async fn get_mined_by_hash( + &self, + hash: H256, + ) -> anyhow::Result> { + self.event_provider.get_mined_by_hash(hash).await + } + + async fn get_receipt(&self, hash: H256) -> anyhow::Result> { + self.event_provider.get_receipt(hash).await + } + + async fn estimate_gas( + &self, + uo: UserOperationOptionalGas, + state_override: Option, + at_price: Option, + ) -> Result { + println!("HC router estimate_gas op {:?} state {:?}", uo, state_override); + let ret = self.gas_estimator + .estimate_op_gas(uo.into(), state_override.unwrap_or_default(), at_price) + .await; + println!("HC router estimate_gas ret {:?}", ret); + ret + } + + async fn get_nonce( + &self, + addr: Address, + key: U256, + ) -> anyhow::Result { + let output = self + .entry_point + .get_nonce(addr, key) + .await; + if output.is_ok() { + return Ok(output.unwrap()); + } + Err(anyhow::anyhow!("get_nonce() failed")) + } + + async fn check_signature( + &self, + uo: UserOperationVariant, + max_verification_gas: u64, + ) -> anyhow::Result { + let output = self + .entry_point + .call_simulate_validation(uo.into(), max_verification_gas, None) + .await?; + + Ok(!output.return_info.account_sig_failed) + } +} + +impl EntryPointRouteImpl { + pub(crate) fn new(entry_point: E, gas_estimator: G, event_provider: EP) -> Self { + Self { + entry_point, + gas_estimator, + event_provider, + _uo_type: PhantomData, + } + } +} diff --git a/crates/rpc/src/eth/server.rs b/crates/rpc/src/eth/server.rs index bd5c5814..8e485a92 100644 --- a/crates/rpc/src/eth/server.rs +++ b/crates/rpc/src/eth/server.rs @@ -11,58 +11,84 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -use async_trait::async_trait; use ethers::types::{spoof, Address, H256, U64}; use jsonrpsee::core::RpcResult; -use rundler_pool::PoolServer; -use rundler_provider::{EntryPoint, Provider}; -use rundler_sim::{GasEstimate, UserOperationOptionalGas}; +use rundler_types::{pool::Pool, UserOperationVariant}; use super::{api::EthApi, EthApiServer}; -use crate::types::{RichUserOperation, RpcUserOperation, UserOperationReceipt}; +use crate::{ + types::{ + FromRpc, RpcGasEstimate, RpcUserOperation, RpcUserOperationByHash, + RpcUserOperationOptionalGas, RpcUserOperationReceipt, + }, + utils, +}; -#[async_trait] -impl EthApiServer for EthApi +#[async_trait::async_trait] +impl

EthApiServer for EthApi

where - P: Provider, - E: EntryPoint, - PS: PoolServer, + P: Pool, { async fn send_user_operation( &self, op: RpcUserOperation, entry_point: Address, ) -> RpcResult { - Ok(EthApi::send_user_operation(self, op, entry_point).await?) + utils::safe_call_rpc_handler( + "eth_sendUserOperation", + EthApi::send_user_operation( + self, + UserOperationVariant::from_rpc(op, &self.chain_spec), + entry_point, + ), + ) + .await } async fn estimate_user_operation_gas( &self, - op: UserOperationOptionalGas, + op: RpcUserOperationOptionalGas, entry_point: Address, state_override: Option, - ) -> RpcResult { - //println!("HC server.rs est_userOp_gas state {:?}", state_override); - - Ok(EthApi::estimate_user_operation_gas(self, op, entry_point, state_override).await?) + ) -> RpcResult { + utils::safe_call_rpc_handler( + "eth_estimateUserOperationGas", + EthApi::estimate_user_operation_gas(self, op.into(), entry_point, state_override), + ) + .await } - async fn get_user_operation_by_hash(&self, hash: H256) -> RpcResult> { - Ok(EthApi::get_user_operation_by_hash(self, hash).await?) + async fn get_user_operation_by_hash( + &self, + hash: H256, + ) -> RpcResult> { + utils::safe_call_rpc_handler( + "eth_getUserOperationByHash", + EthApi::get_user_operation_by_hash(self, hash), + ) + .await } async fn get_user_operation_receipt( &self, hash: H256, - ) -> RpcResult> { - Ok(EthApi::get_user_operation_receipt(self, hash).await?) + ) -> RpcResult> { + utils::safe_call_rpc_handler( + "eth_getUserOperationReceipt", + EthApi::get_user_operation_receipt(self, hash), + ) + .await } async fn supported_entry_points(&self) -> RpcResult> { - Ok(EthApi::supported_entry_points(self).await?) + utils::safe_call_rpc_handler( + "eth_supportedEntryPoints", + EthApi::supported_entry_points(self), + ) + .await } async fn chain_id(&self) -> RpcResult { - Ok(EthApi::chain_id(self).await?) + utils::safe_call_rpc_handler("eth_chainId", EthApi::chain_id(self)).await } } diff --git a/crates/rpc/src/lib.rs b/crates/rpc/src/lib.rs index 56593383..9945ff71 100644 --- a/crates/rpc/src/lib.rs +++ b/crates/rpc/src/lib.rs @@ -22,6 +22,9 @@ mod debug; pub use debug::DebugApiClient; +mod admin; +pub use admin::AdminApiClient; + mod error; mod eth; @@ -31,10 +34,10 @@ mod health; mod metrics; mod rundler; -pub use rundler::RundlerApiClient; +pub use rundler::{RundlerApiClient, Settings as RundlerApiSettings}; mod task; pub use task::{Args as RpcTaskArgs, RpcTask}; mod types; -pub use types::{RichUserOperation, RpcUserOperation, UserOperationReceipt}; +mod utils; diff --git a/crates/rpc/src/metrics.rs b/crates/rpc/src/metrics.rs index a85a38b9..0214daa7 100644 --- a/crates/rpc/src/metrics.rs +++ b/crates/rpc/src/metrics.rs @@ -82,22 +82,22 @@ pub(crate) struct RpcMetrics {} impl RpcMetrics { fn increment_num_requests(method_name: String) { - metrics::increment_counter!("rpc_num_requests", "method_name" => method_name) + metrics::counter!("rpc_num_requests", "method_name" => method_name).increment(1); } fn increment_open_requests(method_name: String) { - metrics::increment_gauge!("rpc_open_requests", 1_f64, "method_name" => method_name) + metrics::gauge!("rpc_open_requests", "method_name" => method_name).increment(1_f64); } fn decrement_open_requests(method_name: String) { - metrics::decrement_gauge!("rpc_open_requests", 1_f64, "method_name" => method_name) + metrics::gauge!("rpc_open_requests", "method_name" => method_name).decrement(1_f64); } fn increment_rpc_error_count(method_name: String) { - metrics::increment_counter!("rpc_error_count", "method_name" => method_name) + metrics::counter!("rpc_error_count", "method_name" => method_name).increment(1); } fn record_request_latency(method_name: String, latency: Duration) { - metrics::histogram!("rpc_request_latency", latency, "method_name" => method_name) + metrics::histogram!("rpc_request_latency", "method_name" => method_name).record(latency); } } diff --git a/crates/rpc/src/rundler.rs b/crates/rpc/src/rundler.rs index 82c0c050..d61967f5 100644 --- a/crates/rpc/src/rundler.rs +++ b/crates/rpc/src/rundler.rs @@ -13,55 +13,163 @@ use std::sync::Arc; +use anyhow::Context; use async_trait::async_trait; -use ethers::types::U256; -use jsonrpsee::{core::RpcResult, proc_macros::rpc, types::error::INTERNAL_ERROR_CODE}; +use ethers::types::{Address, H256, U256}; +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use rundler_provider::Provider; -use rundler_sim::{FeeEstimator, PrecheckSettings}; +use rundler_sim::{gas, FeeEstimator}; +use rundler_types::{chain::ChainSpec, pool::Pool, UserOperation, UserOperationVariant}; -use crate::error::rpc_err; +use crate::{ + eth::{EntryPointRouter, EthResult, EthRpcError}, + types::{FromRpc, RpcUserOperation}, + utils, +}; + +/// Settings for the `rundler_` API +#[derive(Copy, Clone, Debug)] +pub struct Settings { + /// The priority fee mode to use for calculating required user operation priority fee. + pub priority_fee_mode: gas::PriorityFeeMode, + /// If using a bundle priority fee, the percentage to add to the network/oracle + /// provided value as a safety margin for fast inclusion. + pub bundle_priority_fee_overhead_percent: u64, + /// Max verification gas + pub max_verification_gas: u64, +} #[rpc(client, server, namespace = "rundler")] pub trait RundlerApi { /// Returns the maximum priority fee per gas required by Rundler #[method(name = "maxPriorityFeePerGas")] async fn max_priority_fee_per_gas(&self) -> RpcResult; + + /// Drops a user operation from the local mempool. + /// + /// Requirements: + /// - The user operation must contain a sender/nonce pair this is present in the local mempool. + /// - The user operation must pass entrypoint.simulateValidation. I.e. it must have a valid signature and verificationGasLimit + /// - The user operation must have zero values for: preVerificationGas, callGasLimit, calldata, and maxFeePerGas + /// + /// Returns none if no user operation was found, otherwise returns the hash of the removed user operation. + #[method(name = "dropLocalUserOperation")] + async fn drop_local_user_operation( + &self, + uo: RpcUserOperation, + entry_point: Address, + ) -> RpcResult>; } -pub(crate) struct RundlerApi { +pub(crate) struct RundlerApi { + chain_spec: ChainSpec, + settings: Settings, fee_estimator: FeeEstimator

, + pool_server: PL, + entry_point_router: EntryPointRouter, +} + +#[async_trait] +impl RundlerApiServer for RundlerApi +where + P: Provider, + PL: Pool, +{ + async fn max_priority_fee_per_gas(&self) -> RpcResult { + utils::safe_call_rpc_handler( + "rundler_maxPriorityFeePerGas", + RundlerApi::max_priority_fee_per_gas(self), + ) + .await + } + + async fn drop_local_user_operation( + &self, + user_op: RpcUserOperation, + entry_point: Address, + ) -> RpcResult> { + utils::safe_call_rpc_handler( + "rundler_dropLocalUserOperation", + RundlerApi::drop_local_user_operation(self, user_op, entry_point), + ) + .await + } } -impl

RundlerApi

+impl RundlerApi where P: Provider, + PL: Pool, { - pub(crate) fn new(provider: Arc

, chain_id: u64, settings: PrecheckSettings) -> Self { + pub(crate) fn new( + chain_spec: &ChainSpec, + provider: Arc

, + entry_point_router: EntryPointRouter, + pool_server: PL, + settings: Settings, + ) -> Self { Self { + chain_spec: chain_spec.clone(), + settings, fee_estimator: FeeEstimator::new( + chain_spec, provider, - chain_id, settings.priority_fee_mode, settings.bundle_priority_fee_overhead_percent, ), + entry_point_router, + pool_server, } } -} -#[async_trait] -impl

RundlerApiServer for RundlerApi

-where - P: Provider, -{ - async fn max_priority_fee_per_gas(&self) -> RpcResult { + async fn max_priority_fee_per_gas(&self) -> EthResult { let (bundle_fees, _) = self .fee_estimator .required_bundle_fees(None) .await - .map_err(|e| rpc_err(INTERNAL_ERROR_CODE, e.to_string()))?; + .context("should get required fees")?; Ok(self .fee_estimator .required_op_fees(bundle_fees) .max_priority_fee_per_gas) } + + async fn drop_local_user_operation( + &self, + user_op: RpcUserOperation, + entry_point: Address, + ) -> EthResult> { + let uo = UserOperationVariant::from_rpc(user_op, &self.chain_spec); + let id = uo.id(); + + if uo.pre_verification_gas() != U256::zero() + || uo.call_gas_limit() != U256::zero() + || uo.call_data().len() != 0 + || uo.max_fee_per_gas() != U256::zero() + { + Err(EthRpcError::InvalidParams("Invalid user operation for drop: preVerificationGas, callGasLimit, callData, and maxFeePerGas must be zero".to_string()))?; + } + + let valid = self + .entry_point_router + .check_signature(&entry_point, uo, self.settings.max_verification_gas) + .await?; + if !valid { + Err(EthRpcError::InvalidParams( + "Invalid user operation for drop: invalid signature".to_string(), + ))?; + } + + // remove the op from the pool + let ret = self + .pool_server + .remove_op_by_id(entry_point, id) + .await + .map_err(|e| { + tracing::info!("Error dropping user operation: {}", e); + EthRpcError::from(e) + })?; + + Ok(ret) + } } diff --git a/crates/rpc/src/task.rs b/crates/rpc/src/task.rs index c67a68cc..446f6901 100644 --- a/crates/rpc/src/task.rs +++ b/crates/rpc/src/task.rs @@ -15,48 +15,49 @@ use std::{net::SocketAddr, sync::Arc, time::Duration}; use anyhow::bail; use async_trait::async_trait; -use ethers::{ - providers::{Http, Provider, RetryClient}, - types::Address, -}; +use ethers::providers::{JsonRpcClient, Provider}; use jsonrpsee::{ server::{middleware::ProxyGetRequestLayer, ServerBuilder}, RpcModule, }; use hyper::Method; use tower_http::cors::{Any, CorsLayer}; -use rundler_builder::BuilderServer; -use rundler_pool::PoolServer; -use rundler_provider::EntryPoint; -use rundler_sim::{EstimationSettings, PrecheckSettings}; +use rundler_provider::{EthersEntryPointV0_6, EthersEntryPointV0_7}; +use rundler_sim::{ + EstimationSettings, FeeEstimator, GasEstimatorV0_6, GasEstimatorV0_7, PrecheckSettings, +}; use rundler_task::{ server::{format_socket_addr, HealthCheck}, Task, }; -use rundler_types::contracts::i_entry_point::IEntryPoint; -use rundler_utils::eth; +use rundler_types::{builder::Builder, chain::ChainSpec, pool::Pool}; use tokio_util::sync::CancellationToken; +use tracing::info; use crate::{ + admin::{AdminApi, AdminApiServer}, debug::{DebugApi, DebugApiServer}, - eth::{EthApi, EthApiServer, EthApiSettings}, + eth::{ + EntryPointRouteImpl, EntryPointRouter, EntryPointRouterBuilder, EthApi, EthApiServer, + EthApiSettings, UserOperationEventProviderV0_6, UserOperationEventProviderV0_7, + }, health::{HealthChecker, SystemApiServer}, metrics::RpcMetricsLogger, - rundler::{RundlerApi, RundlerApiServer}, + rundler::{RundlerApi, RundlerApiServer, Settings as RundlerApiSettings}, types::ApiNamespace, }; /// RPC server arguments. #[derive(Debug)] pub struct Args { + /// Chain spec + pub chain_spec: ChainSpec, + /// True if using unsafe mode + pub unsafe_mode: bool, /// Port to listen on. pub port: u16, /// Host to listen on. pub host: String, - /// List of supported entry points. - pub entry_points: Vec

, - /// Chain ID. - pub chain_id: u64, /// List of API namespaces to enable. pub api_namespaces: Vec, /// Full node RPC URL to use. @@ -65,12 +66,18 @@ pub struct Args { pub precheck_settings: PrecheckSettings, /// eth_ API settings. pub eth_api_settings: EthApiSettings, + /// rundler_ API settings. + pub rundler_api_settings: RundlerApiSettings, /// Estimation settings. pub estimation_settings: EstimationSettings, /// RPC timeout. pub rpc_timeout: Duration, /// Max number of connections. pub max_connections: u32, + /// Whether to enable entry point v0.6. + pub entry_point_v0_6_enabled: bool, + /// Whether to enable entry point v0.7. + pub entry_point_v0_7_enabled: bool, } /// JSON-RPC server task. @@ -84,27 +91,87 @@ pub struct RpcTask { #[async_trait] impl Task for RpcTask where - P: PoolServer + HealthCheck + Clone, - B: BuilderServer + HealthCheck + Clone, + P: Pool + HealthCheck + Clone, + B: Builder + HealthCheck + Clone, { async fn run(mut self: Box, shutdown_token: CancellationToken) -> anyhow::Result<()> { let addr: SocketAddr = format_socket_addr(&self.args.host, self.args.port).parse()?; tracing::info!("Starting rpc server on {}", addr); - if self.args.entry_points.is_empty() { - bail!("No entry points provided"); + let provider = rundler_provider::new_provider(&self.args.rpc_url, None)?; + let ep_v0_6 = EthersEntryPointV0_6::new( + self.args.chain_spec.entry_point_address_v0_6, + &self.args.chain_spec, + self.args.estimation_settings.max_simulate_handle_ops_gas, + provider.clone(), + ); + let ep_v0_7 = EthersEntryPointV0_7::new( + self.args.chain_spec.entry_point_address_v0_7, + &self.args.chain_spec, + self.args.estimation_settings.max_simulate_handle_ops_gas, + provider.clone(), + ); + + let mut router_builder = EntryPointRouterBuilder::default(); + if self.args.entry_point_v0_6_enabled { + router_builder = router_builder.v0_6(EntryPointRouteImpl::new( + ep_v0_6.clone(), + GasEstimatorV0_6::new( + self.args.chain_spec.clone(), + provider.clone(), + ep_v0_6.clone(), + self.args.estimation_settings, + FeeEstimator::new( + &self.args.chain_spec, + Arc::clone(&provider), + self.args.precheck_settings.priority_fee_mode, + self.args + .precheck_settings + .bundle_priority_fee_overhead_percent, + ), + ), + UserOperationEventProviderV0_6::new( + self.args.chain_spec.clone(), + provider.clone(), + self.args + .eth_api_settings + .user_operation_event_block_distance, + ), + )); } - let provider = eth::new_provider(&self.args.rpc_url, None)?; - let entry_points = self - .args - .entry_points - .iter() - .map(|addr| IEntryPoint::new(*addr, provider.clone())) - .collect(); + if self.args.entry_point_v0_7_enabled { + router_builder = router_builder.v0_7(EntryPointRouteImpl::new( + ep_v0_7.clone(), + GasEstimatorV0_7::new( + self.args.chain_spec.clone(), + Arc::clone(&provider), + ep_v0_7.clone(), + self.args.estimation_settings, + FeeEstimator::new( + &self.args.chain_spec, + Arc::clone(&provider), + self.args.precheck_settings.priority_fee_mode, + self.args + .precheck_settings + .bundle_priority_fee_overhead_percent, + ), + ), + UserOperationEventProviderV0_7::new( + self.args.chain_spec.clone(), + provider.clone(), + self.args + .eth_api_settings + .user_operation_event_block_distance, + ), + )); + } + + // create the entry point router + let router = router_builder.build(); let mut module = RpcModule::new(()); - self.attach_namespaces(provider, entry_points, &mut module)?; + self.attach_namespaces(provider, router, &mut module)?; let servers: Vec> = vec![Box::new(self.pool.clone()), Box::new(self.builder.clone())]; @@ -133,11 +200,19 @@ where .set_logger(RpcMetricsLogger) .set_middleware(service_builder) .max_connections(self.args.max_connections) + // Set max request body size to 2x the max transaction size as none of our + // APIs should require more than that. + .max_request_body_size( + (self.args.chain_spec.max_transaction_size_bytes * 2) + .try_into() + .expect("max_transaction_size_bytes * 2 overflowed u32"), + ) .http_only() .build(addr) .await?; let handle = server.start(module); + info!("Started RPC server"); println!("Started RPC server"); tokio::select! { @@ -155,8 +230,8 @@ where impl RpcTask where - P: PoolServer + HealthCheck + Clone, - B: BuilderServer + HealthCheck + Clone, + P: Pool + HealthCheck + Clone, + B: Builder + HealthCheck + Clone, { /// Creates a new RPC server task. pub fn new(args: Args, pool: P, builder: B) -> Self { @@ -172,39 +247,47 @@ where Box::new(self) } - fn attach_namespaces( + fn attach_namespaces( &self, - provider: Arc>>, - entry_points: Vec, + provider: Arc>, + entry_point_router: EntryPointRouter, module: &mut RpcModule<()>, - ) -> anyhow::Result<()> { - for api in &self.args.api_namespaces { - match api { - ApiNamespace::Eth => module.merge( - EthApi::new( - provider.clone(), - entry_points.clone(), - self.args.chain_id, - self.pool.clone(), - self.args.eth_api_settings.clone(), - self.args.estimation_settings, - self.args.precheck_settings, - ) - .into_rpc(), - )?, - ApiNamespace::Debug => module - .merge(DebugApi::new(self.pool.clone(), self.builder.clone()).into_rpc())?, - ApiNamespace::Rundler => module.merge( - RundlerApi::new( - provider.clone(), - self.args.chain_id, - self.args.precheck_settings, - ) - .into_rpc(), - )?, - } + ) -> anyhow::Result<()> + where + C: JsonRpcClient + 'static, + { + if self.args.api_namespaces.contains(&ApiNamespace::Eth) { + module.merge( + EthApi::new( + self.args.chain_spec.clone(), + entry_point_router.clone(), + self.pool.clone(), + ) + .into_rpc(), + )? + } + + if self.args.api_namespaces.contains(&ApiNamespace::Debug) { + module.merge(DebugApi::new(self.pool.clone(), self.builder.clone()).into_rpc())?; + } + + if self.args.api_namespaces.contains(&ApiNamespace::Admin) { + module.merge(AdminApi::new(self.pool.clone()).into_rpc())?; + } + + if self.args.api_namespaces.contains(&ApiNamespace::Rundler) { + module.merge( + RundlerApi::new( + &self.args.chain_spec, + provider.clone(), + entry_point_router, + self.pool.clone(), + self.args.rundler_api_settings, + ) + .into_rpc(), + )?; } Ok(()) } -} \ No newline at end of file +} diff --git a/crates/rpc/src/types.rs b/crates/rpc/src/types/mod.rs similarity index 51% rename from crates/rpc/src/types.rs rename to crates/rpc/src/types/mod.rs index 28b2a7c9..29350380 100644 --- a/crates/rpc/src/types.rs +++ b/crates/rpc/src/types/mod.rs @@ -12,13 +12,29 @@ // If not, see https://www.gnu.org/licenses/. use ethers::{ - types::{Address, Bytes, Log, TransactionReceipt, H160, H256, U256}, + types::{Address, Log, TransactionReceipt, H160, H256, U256}, utils::to_checksum, }; -use rundler_pool::{Reputation, ReputationStatus}; -use rundler_types::UserOperation; +use rundler_types::{ + chain::ChainSpec, + pool::{Reputation, ReputationStatus}, + v0_6::UserOperation as UserOperationV0_6, + v0_7::UserOperation as UserOperationV0_7, + UserOperationOptionalGas, UserOperationVariant, +}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; +mod v0_6; +pub(crate) use v0_6::{ + RpcGasEstimate as RpcGasEstimateV0_6, RpcUserOperation as RpcUserOperationV0_6, + RpcUserOperationOptionalGas as RpcUserOperationOptionalGasV0_6, +}; +mod v0_7; +pub(crate) use v0_7::{ + RpcGasEstimate as RpcGasEstimateV0_7, RpcUserOperation as RpcUserOperationV0_7, + RpcUserOperationOptionalGas as RpcUserOperationOptionalGasV0_7, +}; + /// API namespace #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, strum::EnumString)] #[strum(serialize_all = "lowercase", ascii_case_insensitive)] @@ -26,6 +42,12 @@ pub enum ApiNamespace { Eth, Debug, Rundler, + Admin, +} + +/// Conversion trait for RPC types adding the context of the entry point and chain id +pub(crate) trait FromRpc { + fn from_rpc(rpc: R, chain_spec: &ChainSpec) -> Self; } #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -65,92 +87,103 @@ impl From
for RpcAddress { /// Stake info definition for RPC #[derive(Debug, Clone, Deserialize, Serialize)] #[serde(rename_all = "camelCase")] -pub struct RpcStakeStatus { - pub is_staked: bool, - pub stake_info: RpcStakeInfo, +pub(crate) struct RpcStakeStatus { + pub(crate) is_staked: bool, + pub(crate) stake_info: RpcStakeInfo, } #[derive(Debug, Clone, Deserialize, Serialize)] #[serde(rename_all = "camelCase")] -pub struct RpcStakeInfo { - pub addr: Address, - pub stake: u128, - pub unstake_delay_sec: u32, +pub(crate) struct RpcStakeInfo { + pub(crate) addr: Address, + pub(crate) stake: u128, + pub(crate) unstake_delay_sec: u32, } -/// User operation definition for RPC -#[derive(Debug, Clone, Deserialize, Serialize, PartialEq)] -#[serde(rename_all = "camelCase")] -pub struct RpcUserOperation { - sender: RpcAddress, - nonce: U256, - init_code: Bytes, - call_data: Bytes, - call_gas_limit: U256, - verification_gas_limit: U256, - pre_verification_gas: U256, - max_fee_per_gas: U256, - max_priority_fee_per_gas: U256, - paymaster_and_data: Bytes, - signature: Bytes, -} - -impl From for RpcUserOperation { - fn from(op: UserOperation) -> Self { - RpcUserOperation { - sender: op.sender.into(), - nonce: op.nonce, - init_code: op.init_code, - call_data: op.call_data, - call_gas_limit: op.call_gas_limit, - verification_gas_limit: op.verification_gas_limit, - pre_verification_gas: op.pre_verification_gas, - max_fee_per_gas: op.max_fee_per_gas, - max_priority_fee_per_gas: op.max_priority_fee_per_gas, - paymaster_and_data: op.paymaster_and_data, - signature: op.signature, +#[derive(Debug, Clone, Deserialize, Serialize, Eq, PartialEq)] +#[serde(untagged)] +pub(crate) enum RpcUserOperation { + V0_6(RpcUserOperationV0_6), + V0_7(RpcUserOperationV0_7), +} + +impl From for RpcUserOperation { + fn from(op: UserOperationVariant) -> Self { + match op { + UserOperationVariant::V0_6(op) => RpcUserOperation::V0_6(op.into()), + UserOperationVariant::V0_7(op) => RpcUserOperation::V0_7(op.into()), } } } -impl From for UserOperation { - fn from(def: RpcUserOperation) -> Self { - UserOperation { - sender: def.sender.into(), - nonce: def.nonce, - init_code: def.init_code, - call_data: def.call_data, - call_gas_limit: def.call_gas_limit, - verification_gas_limit: def.verification_gas_limit, - pre_verification_gas: def.pre_verification_gas, - max_fee_per_gas: def.max_fee_per_gas, - max_priority_fee_per_gas: def.max_priority_fee_per_gas, - paymaster_and_data: def.paymaster_and_data, - signature: def.signature, +impl FromRpc for UserOperationVariant { + fn from_rpc(op: RpcUserOperation, chain_spec: &ChainSpec) -> Self { + match op { + RpcUserOperation::V0_6(op) => { + UserOperationVariant::V0_6(UserOperationV0_6::from_rpc(op, chain_spec)) + } + RpcUserOperation::V0_7(op) => { + UserOperationVariant::V0_7(UserOperationV0_7::from_rpc(op, chain_spec)) + } } } } /// User operation with additional metadata -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)] #[serde(rename_all = "camelCase")] -pub struct RichUserOperation { +pub(crate) struct RpcUserOperationByHash { /// The full user operation - pub user_operation: RpcUserOperation, + pub(crate) user_operation: RpcUserOperation, /// The entry point address this operation was sent to - pub entry_point: RpcAddress, + pub(crate) entry_point: RpcAddress, /// The number of the block this operation was included in - pub block_number: Option, + pub(crate) block_number: Option, /// The hash of the block this operation was included in - pub block_hash: Option, + pub(crate) block_hash: Option, /// The hash of the transaction this operation was included in - pub transaction_hash: Option, + pub(crate) transaction_hash: Option, +} + +#[derive(Debug, Clone, Deserialize, Serialize)] +#[serde(untagged)] +pub(crate) enum RpcUserOperationOptionalGas { + V0_6(RpcUserOperationOptionalGasV0_6), + V0_7(RpcUserOperationOptionalGasV0_7), +} + +impl From for UserOperationOptionalGas { + fn from(op: RpcUserOperationOptionalGas) -> Self { + match op { + RpcUserOperationOptionalGas::V0_6(op) => UserOperationOptionalGas::V0_6(op.into()), + RpcUserOperationOptionalGas::V0_7(op) => UserOperationOptionalGas::V0_7(op.into()), + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +#[serde(untagged)] +pub(crate) enum RpcGasEstimate { + V0_6(RpcGasEstimateV0_6), + V0_7(RpcGasEstimateV0_7), +} + +impl From for RpcGasEstimate { + fn from(estimate: RpcGasEstimateV0_6) -> Self { + RpcGasEstimate::V0_6(estimate) + } +} + +impl From for RpcGasEstimate { + fn from(estimate: RpcGasEstimateV0_7) -> Self { + RpcGasEstimate::V0_7(estimate) + } } /// User operation receipt #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] -pub struct UserOperationReceipt { +pub struct RpcUserOperationReceipt { /// The hash of the user operation pub user_op_hash: H256, /// The entry point address this operation was sent to @@ -222,3 +255,39 @@ impl TryFrom for RpcReputationInput { }) } } + +/// Reputation of an entity +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcAdminSetTracking { + /// Field to set the status for tracking within the paymaster + /// module + pub paymaster_tracking: bool, + /// Field to set the status for tracking within the reputation + /// module + pub reputation_tracking: bool, +} + +/// Reputation of an entity +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcAdminClearState { + /// Field to set whether to clear entire mempool + pub clear_mempool: Option, + /// Field to set whether to clear paymaster state + pub clear_paymaster: Option, + /// Field to set whether to clear reputation state + pub clear_reputation: Option, +} + +/// Paymaster balance +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcDebugPaymasterBalance { + /// Paymaster address + pub address: Address, + /// Paymaster balance including pending UOs in pool + pub pending_balance: U256, + /// Paymaster confirmed balance onchain + pub confirmed_balance: U256, +} diff --git a/crates/rpc/src/types/v0_6.rs b/crates/rpc/src/types/v0_6.rs new file mode 100644 index 00000000..36428edf --- /dev/null +++ b/crates/rpc/src/types/v0_6.rs @@ -0,0 +1,127 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use ethers::types::{Address, Bytes, U256}; +use rundler_types::{ + chain::ChainSpec, + v0_6::{UserOperation, UserOperationOptionalGas}, + GasEstimate, +}; +use serde::{Deserialize, Serialize}; + +use super::{FromRpc, RpcAddress}; + +/// User operation definition for RPC +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub(crate) struct RpcUserOperation { + sender: RpcAddress, + nonce: U256, + init_code: Bytes, + call_data: Bytes, + call_gas_limit: U256, + verification_gas_limit: U256, + pre_verification_gas: U256, + max_fee_per_gas: U256, + max_priority_fee_per_gas: U256, + paymaster_and_data: Bytes, + signature: Bytes, +} + +impl From for RpcUserOperation { + fn from(op: UserOperation) -> Self { + RpcUserOperation { + sender: op.sender.into(), + nonce: op.nonce, + init_code: op.init_code, + call_data: op.call_data, + call_gas_limit: op.call_gas_limit, + verification_gas_limit: op.verification_gas_limit, + pre_verification_gas: op.pre_verification_gas, + max_fee_per_gas: op.max_fee_per_gas, + max_priority_fee_per_gas: op.max_priority_fee_per_gas, + paymaster_and_data: op.paymaster_and_data, + signature: op.signature, + } + } +} + +impl FromRpc for UserOperation { + fn from_rpc(def: RpcUserOperation, _chain_spec: &ChainSpec) -> Self { + UserOperation { + sender: def.sender.into(), + nonce: def.nonce, + init_code: def.init_code, + call_data: def.call_data, + call_gas_limit: def.call_gas_limit, + verification_gas_limit: def.verification_gas_limit, + pre_verification_gas: def.pre_verification_gas, + max_fee_per_gas: def.max_fee_per_gas, + max_priority_fee_per_gas: def.max_priority_fee_per_gas, + paymaster_and_data: def.paymaster_and_data, + signature: def.signature, + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +#[serde(rename_all = "camelCase")] +pub(crate) struct RpcUserOperationOptionalGas { + sender: Address, + nonce: U256, + init_code: Bytes, + call_data: Bytes, + call_gas_limit: Option, + verification_gas_limit: Option, + pre_verification_gas: Option, + max_fee_per_gas: Option, + max_priority_fee_per_gas: Option, + paymaster_and_data: Bytes, + signature: Bytes, +} + +impl From for UserOperationOptionalGas { + fn from(def: RpcUserOperationOptionalGas) -> Self { + UserOperationOptionalGas { + sender: def.sender, + nonce: def.nonce, + init_code: def.init_code, + call_data: def.call_data, + call_gas_limit: def.call_gas_limit, + verification_gas_limit: def.verification_gas_limit, + pre_verification_gas: def.pre_verification_gas, + max_fee_per_gas: def.max_fee_per_gas, + max_priority_fee_per_gas: def.max_priority_fee_per_gas, + paymaster_and_data: def.paymaster_and_data, + signature: def.signature, + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +#[serde(rename_all = "camelCase")] +pub(crate) struct RpcGasEstimate { + pub(crate) pre_verification_gas: U256, + pub(crate) call_gas_limit: U256, + pub(crate) verification_gas_limit: U256, +} + +impl From for RpcGasEstimate { + fn from(estimate: GasEstimate) -> Self { + RpcGasEstimate { + pre_verification_gas: estimate.pre_verification_gas, + call_gas_limit: estimate.call_gas_limit, + verification_gas_limit: estimate.verification_gas_limit, + } + } +} diff --git a/crates/rpc/src/types/v0_7.rs b/crates/rpc/src/types/v0_7.rs new file mode 100644 index 00000000..ccd9ced6 --- /dev/null +++ b/crates/rpc/src/types/v0_7.rs @@ -0,0 +1,194 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use ethers::types::{Address, Bytes, H256, U128, U256}; +use rundler_types::{ + chain::ChainSpec, + v0_7::{ + UserOperation, UserOperationBuilder, UserOperationOptionalGas, UserOperationRequiredFields, + }, + GasEstimate, +}; +use serde::{Deserialize, Serialize}; + +use super::{FromRpc, RpcAddress}; + +/// User operation definition for RPC inputs +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +#[serde(rename_all = "camelCase")] +pub(crate) struct RpcUserOperation { + sender: Address, + nonce: U256, + call_data: Bytes, + call_gas_limit: U128, + verification_gas_limit: U128, + pre_verification_gas: U256, + max_priority_fee_per_gas: U128, + max_fee_per_gas: U128, + #[serde(skip_serializing_if = "Option::is_none")] + factory: Option
, + #[serde(skip_serializing_if = "Option::is_none")] + factory_data: Option, + #[serde(skip_serializing_if = "Option::is_none")] + paymaster: Option
, + #[serde(skip_serializing_if = "Option::is_none")] + paymaster_verification_gas_limit: Option, + #[serde(skip_serializing_if = "Option::is_none")] + paymaster_post_op_gas_limit: Option, + #[serde(skip_serializing_if = "Option::is_none")] + paymaster_data: Option, + signature: Bytes, +} + +impl From for RpcUserOperation { + fn from(op: UserOperation) -> Self { + let factory_data = if op.factory.is_some() { + Some(op.factory_data) + } else { + None + }; + let (paymaster_data, paymaster_verification_gas_limit, paymaster_post_op_gas_limit) = + if op.paymaster.is_some() { + ( + Some(op.paymaster_data), + Some(op.paymaster_verification_gas_limit), + Some(op.paymaster_post_op_gas_limit), + ) + } else { + (None, None, None) + }; + + RpcUserOperation { + sender: op.sender, + nonce: op.nonce, + call_data: op.call_data, + call_gas_limit: op.call_gas_limit, + verification_gas_limit: op.verification_gas_limit, + pre_verification_gas: op.pre_verification_gas, + max_priority_fee_per_gas: op.max_priority_fee_per_gas, + max_fee_per_gas: op.max_fee_per_gas, + factory: op.factory, + factory_data, + paymaster: op.paymaster, + paymaster_verification_gas_limit, + paymaster_post_op_gas_limit, + paymaster_data, + signature: op.signature, + } + } +} + +impl FromRpc for UserOperation { + fn from_rpc(def: RpcUserOperation, chain_spec: &ChainSpec) -> Self { + let mut builder = UserOperationBuilder::new( + chain_spec, + UserOperationRequiredFields { + sender: def.sender, + nonce: def.nonce, + call_data: def.call_data, + call_gas_limit: def.call_gas_limit, + verification_gas_limit: def.verification_gas_limit, + pre_verification_gas: def.pre_verification_gas, + max_priority_fee_per_gas: def.max_priority_fee_per_gas, + max_fee_per_gas: def.max_fee_per_gas, + signature: def.signature, + }, + ); + if def.paymaster.is_some() { + builder = builder.paymaster( + def.paymaster.unwrap(), + def.paymaster_verification_gas_limit.unwrap_or_default(), + def.paymaster_post_op_gas_limit.unwrap_or_default(), + def.paymaster_data.unwrap_or_default(), + ); + } + if def.factory.is_some() { + builder = builder.factory(def.factory.unwrap(), def.factory_data.unwrap_or_default()); + } + + builder.build() + } +} + +/// User operation with additional metadata +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +#[serde(rename_all = "camelCase")] +pub(crate) struct RpcUserOperationByHash { + user_operation: RpcUserOperation, + entry_point: RpcAddress, + block_number: Option, + block_hash: Option, + transaction_hash: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +#[serde(rename_all = "camelCase")] +pub(crate) struct RpcUserOperationOptionalGas { + sender: Address, + nonce: U256, + call_data: Bytes, + call_gas_limit: Option, + verification_gas_limit: Option, + pre_verification_gas: Option, + max_priority_fee_per_gas: Option, + max_fee_per_gas: Option, + factory: Option
, + factory_data: Option, + paymaster: Option
, + paymaster_verification_gas_limit: Option, + paymaster_post_op_gas_limit: Option, + paymaster_data: Option, + signature: Bytes, +} + +impl From for UserOperationOptionalGas { + fn from(def: RpcUserOperationOptionalGas) -> Self { + UserOperationOptionalGas { + sender: def.sender, + nonce: def.nonce, + call_data: def.call_data, + call_gas_limit: def.call_gas_limit, + verification_gas_limit: def.verification_gas_limit, + pre_verification_gas: def.pre_verification_gas, + max_priority_fee_per_gas: def.max_priority_fee_per_gas, + max_fee_per_gas: def.max_fee_per_gas, + factory: def.factory, + factory_data: def.factory_data.unwrap_or_default(), + paymaster: def.paymaster, + paymaster_verification_gas_limit: def.paymaster_verification_gas_limit, + paymaster_post_op_gas_limit: def.paymaster_post_op_gas_limit, + paymaster_data: def.paymaster_data.unwrap_or_default(), + signature: def.signature, + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +#[serde(rename_all = "camelCase")] +pub(crate) struct RpcGasEstimate { + pre_verification_gas: U256, + call_gas_limit: U256, + verification_gas_limit: U256, + paymaster_verification_gas_limit: Option, +} + +impl From for RpcGasEstimate { + fn from(estimate: GasEstimate) -> Self { + RpcGasEstimate { + pre_verification_gas: estimate.pre_verification_gas, + call_gas_limit: estimate.call_gas_limit, + verification_gas_limit: estimate.verification_gas_limit, + paymaster_verification_gas_limit: estimate.paymaster_verification_gas_limit, + } + } +} diff --git a/crates/rpc/src/utils.rs b/crates/rpc/src/utils.rs new file mode 100644 index 00000000..92c2f9ed --- /dev/null +++ b/crates/rpc/src/utils.rs @@ -0,0 +1,58 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use std::panic::AssertUnwindSafe; + +use futures_util::{Future, FutureExt}; +use jsonrpsee::{ + core::RpcResult, + types::{error::INTERNAL_ERROR_CODE, ErrorObjectOwned}, +}; + +use crate::{error::rpc_err, eth::EthRpcError}; + +pub(crate) async fn safe_call_rpc_handler(rpc_name: &'static str, f: F) -> RpcResult +where + F: Future> + Send, + E: Into, +{ + let f = AssertUnwindSafe(f); + match f.catch_unwind().await { + Ok(r) => r.map_err(Into::into), + Err(_) => { + metrics::counter!("rpc_panic_count", "rpc_name" => rpc_name).increment(1); + tracing::error!("PANIC in RPC handler: {}", rpc_name); + Err(EthRpcError::Internal(anyhow::anyhow!("internal error: panic, see logs")).into()) + } + } +} + +/// Internal RPC result type. +pub(crate) type InternalRpcResult = std::result::Result; + +/// Internal RPC error. +/// +/// Allowing easy use of anyhow in RPC handlers for internal errors. +pub(crate) struct InternalRpcError(anyhow::Error); + +impl From for InternalRpcError { + fn from(e: anyhow::Error) -> Self { + Self(e) + } +} + +impl From for ErrorObjectOwned { + fn from(e: InternalRpcError) -> Self { + rpc_err(INTERNAL_ERROR_CODE, e.0.to_string()) + } +} diff --git a/crates/sim/build.rs b/crates/sim/build.rs index 7447c13c..b33c0c01 100644 --- a/crates/sim/build.rs +++ b/crates/sim/build.rs @@ -15,7 +15,8 @@ use std::{error, io::ErrorKind, process::Command}; fn main() -> Result<(), Box> { println!("cargo:rerun-if-changed=tracer/package.json"); - println!("cargo:rerun-if-changed=tracer/src/validationTracer.ts"); + println!("cargo:rerun-if-changed=tracer/src/validationTracerV0_6.ts"); + println!("cargo:rerun-if-changed=tracer/src/validationTracerV0_7.ts"); compile_tracer()?; Ok(()) } diff --git a/crates/sim/src/estimation/estimate_call_gas.rs b/crates/sim/src/estimation/estimate_call_gas.rs new file mode 100644 index 00000000..b68fa0a9 --- /dev/null +++ b/crates/sim/src/estimation/estimate_call_gas.rs @@ -0,0 +1,249 @@ +use anyhow::{anyhow, Context}; +use async_trait::async_trait; +use ethers::{ + abi::AbiDecode, + types::{spoof, Address, Bytes, H256, U128, U256}, +}; +use rundler_provider::{EntryPoint, SimulationProvider}; +use rundler_types::{ + contracts::v0_7::call_gas_estimation_proxy::{ + // Errors are shared between v0.6 and v0.7 proxies + EstimateCallGasContinuation, + EstimateCallGasResult, + EstimateCallGasRevertAtMax, + TestCallGasResult, + }, + UserOperation, +}; +use rundler_utils::eth; + +use super::Settings; +use crate::GasEstimationError; + +/// Gas estimates will be rounded up to the next multiple of this. Increasing +/// this value reduces the number of rounds of `eth_call` needed in binary +/// search, e.g. a value of 1024 means ten fewer `eth_call`s needed for each of +/// verification gas and call gas. +const GAS_ROUNDING: u64 = 4096; + +/// Must match the constant in `CallGasEstimationProxyTypes.sol`. +#[allow(dead_code)] +pub(crate) const PROXY_IMPLEMENTATION_ADDRESS_MARKER: &str = + "A13dB4eCfbce0586E57D1AeE224FbE64706E8cd3"; + +/// Estimates the gas limit for a user operation +#[async_trait] +pub trait CallGasEstimator: Send + Sync + 'static { + /// The user operation type estimated by this estimator + type UO: UserOperation; + + /// Returns a gas estimate or a revert message, or an anyhow error on any + /// other error + async fn estimate_call_gas( + &self, + op: Self::UO, + block_hash: H256, + state_override: spoof::State, + ) -> Result; + + /// Calls simulate_handle_op, but captures the execution result. Returning an + /// error if the operation reverts or anyhow error on any other error + async fn simulate_handle_op_with_result( + &self, + op: Self::UO, + block_hash: H256, + state_override: spoof::State, + ) -> Result<(), GasEstimationError>; +} + +/// Implementation of a call gas estimator which performs a binary search with +/// the `target` and `targetData` arguments to `simulateHandleOp` +#[derive(Debug)] +pub struct CallGasEstimatorImpl { + entry_point: E, + settings: Settings, + specialization: S, +} + +/// Functions associated with a particular user operation version that +/// specialize the `CallGasEstimatorImpl` to be able to handle that version. +/// Each user operation version will need an implementation of this trait to be +/// able to be used with `CallGasEstimatorImpl` +pub trait CallGasEstimatorSpecialization: Send + Sync + 'static { + /// The user operation type estimated by this specialization + type UO: UserOperation; + + /// Add the required CallGasEstimation proxy to the overrides at the given entrypoint address + fn add_proxy_to_overrides(&self, ep_to_override: Address, state_override: &mut spoof::State); + + /// Returns the input user operation, modified to have limits but zero for the call gas limits. + /// The intent is that the modified operation should run its validation but do nothing during execution + fn get_op_with_no_call_gas(&self, op: Self::UO) -> Self::UO; + + /// Returns the calldata for the `estimateCallGas` function of the proxy + fn get_estimate_call_gas_calldata( + &self, + callless_op: Self::UO, + min_gas: U256, + max_gas: U256, + rounding: U256, + is_continuation: bool, + ) -> Bytes; + + /// Returns the calldata for the `testCallGas` function of the proxy + fn get_test_call_gas_calldata(&self, callless_op: Self::UO, call_gas_limit: U256) -> Bytes; +} + +#[async_trait] +impl CallGasEstimator for CallGasEstimatorImpl +where + UO: UserOperation, + E: EntryPoint + SimulationProvider, + S: CallGasEstimatorSpecialization, +{ + type UO = UO; + + async fn estimate_call_gas( + &self, + op: Self::UO, + block_hash: H256, + mut state_override: spoof::State, + ) -> Result { + let timer = std::time::Instant::now(); + self.specialization + .add_proxy_to_overrides(self.entry_point.address(), &mut state_override); + + let callless_op = self.specialization.get_op_with_no_call_gas(op.clone()); + + let mut min_gas = U256::zero(); + let mut max_gas = U256::from(self.settings.max_call_gas); + let mut is_continuation = false; + let mut num_rounds = U256::zero(); + loop { + let target_call_data = self.specialization.get_estimate_call_gas_calldata( + callless_op.clone(), + min_gas, + max_gas, + GAS_ROUNDING.into(), + is_continuation, + ); + let target_revert_data = self + .entry_point + .call_spoofed_simulate_op( + callless_op.clone(), + self.entry_point.address(), + target_call_data, + block_hash, + self.settings.max_simulate_handle_ops_gas.into(), + &state_override, + ) + .await? + .map_err(GasEstimationError::RevertInValidation)? + .target_result; + println!("HC estimate_call_gas revert data {:?}", target_revert_data); + if let Ok(result) = EstimateCallGasResult::decode(&target_revert_data) { + println!("HC estimation.rs Ok result"); + num_rounds += result.num_rounds; + tracing::debug!( + "binary search for call gas took {num_rounds} rounds, {}ms", + timer.elapsed().as_millis() + ); + return Ok(result + .gas_estimate + .try_into() + .ok() + .context("gas estimate should fit in a 128-bit int")?); + } else if let Ok(revert) = EstimateCallGasRevertAtMax::decode(&target_revert_data) { + println!("HC estimation.rs RevertAtMax"); + let error = if let Some(message) = eth::parse_revert_message(&revert.revert_data) { + GasEstimationError::RevertInCallWithMessage(message) + } else { + GasEstimationError::RevertInCallWithBytes(revert.revert_data) + }; + return Err(error); + } else if let Ok(continuation) = + EstimateCallGasContinuation::decode(&target_revert_data) + { + println!("HC estimation.rs Ok continuation"); + if is_continuation + && continuation.min_gas <= min_gas + && continuation.max_gas >= max_gas + { + // This should never happen, but if it does, bail so we + // don't end up in an infinite loop! + Err(anyhow!( + "estimateCallGas should make progress each time it is called" + ))?; + } + is_continuation = true; + min_gas = min_gas.max(continuation.min_gas); + max_gas = max_gas.min(continuation.max_gas); + num_rounds += continuation.num_rounds; + } else { + Err(anyhow!( + "estimateCallGas revert should be a Result or a Continuation" + ))?; + } + } + } + + async fn simulate_handle_op_with_result( + &self, + op: Self::UO, + block_hash: H256, + mut state_override: spoof::State, + ) -> Result<(), GasEstimationError> { + self.specialization + .add_proxy_to_overrides(self.entry_point.address(), &mut state_override); + + let call_gas_limit = op.call_gas_limit(); + let callless_op = self.specialization.get_op_with_no_call_gas(op); + let target_call_data = self + .specialization + .get_test_call_gas_calldata(callless_op.clone(), call_gas_limit); + + let target_revert_data = self + .entry_point + .call_spoofed_simulate_op( + callless_op, + self.entry_point.address(), + target_call_data, + block_hash, + self.settings.max_simulate_handle_ops_gas.into(), + &state_override, + ) + .await? + .map_err(GasEstimationError::RevertInValidation)? + .target_result; + if let Ok(result) = TestCallGasResult::decode(&target_revert_data) { + if result.success { + Ok(()) + } else { + let error = if let Some(message) = eth::parse_revert_message(&result.revert_data) { + GasEstimationError::RevertInCallWithMessage(message) + } else { + GasEstimationError::RevertInCallWithBytes(result.revert_data) + }; + Err(error) + } + } else { + Err(anyhow!("testCallGas revert should be a TestCallGasResult"))? + } + } +} + +impl CallGasEstimatorImpl +where + UO: UserOperation, + E: EntryPoint + SimulationProvider, + S: CallGasEstimatorSpecialization, +{ + /// Creates a new call gas estimator + pub fn new(entry_point: E, settings: Settings, specialization: S) -> Self { + Self { + entry_point, + settings, + specialization, + } + } +} diff --git a/crates/sim/src/estimation/estimate_verification_gas.rs b/crates/sim/src/estimation/estimate_verification_gas.rs new file mode 100644 index 00000000..8072a8b7 --- /dev/null +++ b/crates/sim/src/estimation/estimate_verification_gas.rs @@ -0,0 +1,235 @@ +use std::sync::Arc; + +use anyhow::{anyhow, Context}; +use async_trait::async_trait; +use ethers::types::{spoof, Address, Bytes, H256, U128, U256}; +use rundler_provider::{EntryPoint, Provider, SimulateOpCallData, SimulationProvider}; +use rundler_types::{chain::ChainSpec, UserOperation}; + +use super::Settings; +use crate::GasEstimationError; + +/// Gas estimation will stop when the binary search bounds are within +/// `GAS_ESTIMATION_ERROR_MARGIN` of each other. +const GAS_ESTIMATION_ERROR_MARGIN: f64 = 0.1; +/// Error codes returned by the entry point when validation runs out of gas. +/// These appear as the start of the "reason" string in the revert data. +const OUT_OF_GAS_ERROR_CODES: &[&str] = &[ + "AA13", "AA23", "AA26", "AA33", "AA36", "AA40", "AA41", "AA51", +]; + +/// Estimates a verification gas limit for a user operation. Can be used to +/// estimate both verification gas and, in the v0.7 case, paymaster verification +/// gas. +#[async_trait] +pub trait VerificationGasEstimator: Send + Sync + 'static { + /// The user operation type estimated by this estimator + type UO: UserOperation; + + /// Returns a gas estimate or a revert message, or an anyhow error on any + /// other error. + /// + /// By passing different functions for the `get_op_with_limit` argument, + /// the same estimator instance can be used to separately estimate the + /// account and paymaster verification gas limits. + async fn estimate_verification_gas< + F: Send + Sync + Fn(Self::UO, GetOpWithLimitArgs) -> Self::UO, + >( + &self, + op: &Self::UO, + block_hash: H256, + state_override: &spoof::State, + max_guess: U128, + get_op_with_limit: F, + ) -> Result; +} + +#[derive(Debug, Clone, Copy)] +pub struct GetOpWithLimitArgs { + pub gas: U128, + pub fee: U128, +} + +/// Implementation of a verification gas estimator +#[derive(Debug)] +pub struct VerificationGasEstimatorImpl { + chain_spec: ChainSpec, + provider: Arc

, + entry_point: E, + settings: Settings, +} + +#[async_trait] +impl VerificationGasEstimator for VerificationGasEstimatorImpl +where + UO: UserOperation, + P: Provider, + E: EntryPoint + SimulationProvider, +{ + type UO = UO; + + async fn estimate_verification_gas UO>( + &self, + op: &UO, + block_hash: H256, + state_override: &spoof::State, + max_guess: U128, + get_op_with_limit: F, + ) -> Result { + let timer = std::time::Instant::now(); + let paymaster_gas_fee = U128::from(self.settings.verification_estimation_gas_fee); + + // Fee logic for gas estimation: + // + // If there is no paymaster, verification estimation is always performed + // with zero fees. The cost of the native transfer is added to the verification gas + // at the end of estimation. + // + // If using a paymaster, the total cost is kept constant, and the fee is adjusted + // based on the gas used in the simulation. The total cost is set by a configuration + // setting. + let get_op = |gas: U128| -> UO { + let fee = if op.paymaster().is_none() { + U128::zero() + } else { + U128::try_from( + U256::from(paymaster_gas_fee) + .checked_div(U256::from(gas) + op.pre_verification_gas()) + .unwrap_or(U256::MAX), + ) + .unwrap_or(U128::MAX) + }; + get_op_with_limit(op.clone(), GetOpWithLimitArgs { gas, fee }) + }; + + // Make one attempt at max gas, to see if success is possible. + // Capture the gas usage of this attempt and use as the initial guess in the binary search + let initial_op = get_op(max_guess); + println!("HC estimate_verification initial_op {:?}", initial_op.clone()); + let SimulateOpCallData { + call_data, + spoofed_state, + } = self + .entry_point + .get_simulate_op_call_data(initial_op, state_override); + let gas_used = self + .provider + .get_gas_used( + self.entry_point.address(), + U256::zero(), + call_data, + spoofed_state.clone(), + ) + .await + .context("failed to run initial guess")?; + println!("HC estimate_verification SimulateHandleOp initial guess gas_used {:?}", gas_used); + + if gas_used.success { + if self.entry_point.simulation_should_revert() { + Err(anyhow!( + "simulateHandleOp succeeded but should always revert. Make sure the entry point contract is deployed and the address is correct" + ))?; + } + } else if let Some(revert) = self + .entry_point + .decode_simulate_handle_ops_revert(gas_used.result) + .err() + { + println!("HC estimate_verification GasEstimationError {}", revert); + return Err(GasEstimationError::RevertInValidation(revert)); + } + + let run_attempt_returning_error = |gas: u64| async move { + let op = get_op(gas.into()); + let revert = self + .entry_point + .call_spoofed_simulate_op( + op, + Address::zero(), + Bytes::new(), + block_hash, + self.settings.max_simulate_handle_ops_gas.into(), + state_override, + ) + .await? + .err(); + + if let Some(revert) = revert { + if let Some(error_code) = revert.entry_point_error_code() { + if OUT_OF_GAS_ERROR_CODES.contains(&error_code) { + // This error occurs when out of gas, return false. + return Ok(false); + } + } + // This is a different error, return it + Err(GasEstimationError::RevertInValidation(revert)) + } else { + // This succeeded, return true + Ok(true) + } + }; + + let mut max_failure_gas = 1; + let mut min_success_gas = self.settings.max_verification_gas; + + if gas_used.gas_used.gt(&U256::from(u64::MAX)) { + return Err(GasEstimationError::GasUsedTooLarge); + } + let mut guess = gas_used.gas_used.as_u64().saturating_mul(2); + let mut num_rounds = 0; + while (min_success_gas as f64) / (max_failure_gas as f64) + > (1.0 + GAS_ESTIMATION_ERROR_MARGIN) + { + num_rounds += 1; + if run_attempt_returning_error(guess).await? { + min_success_gas = guess; + } else { + max_failure_gas = guess; + } + guess = max_failure_gas.saturating_add(min_success_gas) / 2; + } + println!("HC after verification gas estimation loop max_fail {:?} min_success {:?}", max_failure_gas, min_success_gas); + + tracing::debug!( + "binary search for verification gas took {num_rounds} rounds, {}ms", + timer.elapsed().as_millis() + ); + + let mut min_success_gas = U256::from(min_success_gas); + + // If not using a paymaster, always add the cost of a native transfer to the verification gas. + // This may cause an over estimation when the account does have enough deposit to pay for the + // max cost, but it is better to overestimate than underestimate. + if op.paymaster().is_none() { + min_success_gas += self.chain_spec.deposit_transfer_overhead; + } + println!("HC verification min_success_gas {:?}", min_success_gas); + + + Ok(U128::try_from(min_success_gas) + .ok() + .context("min success gas should fit in 128-bit int")?) + } +} + +impl VerificationGasEstimatorImpl +where + UO: UserOperation, + P: Provider, + E: EntryPoint + SimulationProvider, +{ + /// Create a new instance + pub fn new( + chain_spec: ChainSpec, + provider: Arc

, + entry_point: E, + settings: Settings, + ) -> Self { + Self { + chain_spec, + provider, + entry_point, + settings, + } + } +} diff --git a/crates/sim/src/estimation/estimation.rs b/crates/sim/src/estimation/estimation.rs deleted file mode 100644 index d89d01e7..00000000 --- a/crates/sim/src/estimation/estimation.rs +++ /dev/null @@ -1,1342 +0,0 @@ -// This file is part of Rundler. -// -// Rundler is free software: you can redistribute it and/or modify it under the -// terms of the GNU Lesser General Public License as published by the Free Software -// Foundation, either version 3 of the License, or (at your option) any later version. -// -// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; -// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -// See the GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License along with Rundler. -// If not, see https://www.gnu.org/licenses/. - -use std::{cmp, ops::Deref, sync::Arc}; - -use anyhow::{anyhow, Context}; -use ethers::{ - abi::AbiDecode, - contract::EthCall, - providers::spoof, - types::{Address, Bytes, H256, U256}, -}; -#[cfg(feature = "test-utils")] -use mockall::automock; -use rand::Rng; -use rundler_provider::{EntryPoint, Provider}; -use rundler_types::{ - contracts::{ - call_gas_estimation_proxy::{ - EstimateCallGasArgs, EstimateCallGasCall, EstimateCallGasContinuation, - EstimateCallGasResult, EstimateCallGasRevertAtMax, - CALLGASESTIMATIONPROXY_DEPLOYED_BYTECODE, - }, - i_entry_point, - }, - UserOperation, -}; -use rundler_utils::{eth, math}; -use tokio::join; - -use super::types::{GasEstimate, Settings, UserOperationOptionalGas}; -use crate::{gas, precheck::MIN_CALL_GAS_LIMIT, utils, FeeEstimator}; - -/// Gas estimates will be rounded up to the next multiple of this. Increasing -/// this value reduces the number of rounds of `eth_call` needed in binary -/// search, e.g. a value of 1024 means ten fewer `eth_call`s needed for each of -/// verification gas and call gas. -const GAS_ROUNDING: u64 = 4096; - -/// Gas estimation will stop when the binary search bounds are within -/// `GAS_ESTIMATION_ERROR_MARGIN` of each other. -const GAS_ESTIMATION_ERROR_MARGIN: f64 = 0.1; - -const VERIFICATION_GAS_BUFFER_PERCENT: u64 = 10; - -/// Offset at which the proxy target address appears in the proxy bytecode. Must -/// be updated whenever `CallGasEstimationProxy.sol` changes. -/// -/// The easiest way to get the updated value is to run this module's tests. The -/// failure will tell you the new value. -const PROXY_TARGET_OFFSET: usize = 137; - -/// Error type for gas estimation -#[derive(Debug, thiserror::Error)] -pub enum GasEstimationError { - /// Validation reverted - #[error("{0}")] - RevertInValidation(String), - /// Call reverted with a string message - #[error("user operation's call reverted: {0}")] - RevertInCallWithMessage(String), - /// Call reverted with bytes - #[error("user operation's call reverted: {0:#x}")] - RevertInCallWithBytes(Bytes), - /// Other error - #[error(transparent)] - Other(#[from] anyhow::Error), -} - -/// Gas estimator trait -#[cfg_attr(feature = "test-utils", automock)] -#[async_trait::async_trait] -pub trait GasEstimator: Send + Sync + 'static { - /// Returns a gas estimate or a revert message, or an anyhow error on any - /// other error. - async fn estimate_op_gas( - &self, - op: UserOperationOptionalGas, - state_override: spoof::State, - at_price: Option, - ) -> Result; -} - -/// Gas estimator implementation -#[derive(Debug)] -pub struct GasEstimatorImpl { - chain_id: u64, - /// FIXME - Used by HybridCompute - pub provider: Arc

, - /// FIXME - Used by HybridCompute - pub entry_point: E, - settings: Settings, - fee_estimator: FeeEstimator

, -} - -#[async_trait::async_trait] -impl GasEstimator for GasEstimatorImpl { - async fn estimate_op_gas( - &self, - op: UserOperationOptionalGas, - state_override: spoof::State, - at_price: Option, - ) -> Result { - //println!("HC entering estimate_op_gas, op {:?}", op); - let Self { - provider, settings, .. - } = self; - - let (block_hash, _) = provider - .get_latest_block_hash_and_number() - .await - .map_err(anyhow::Error::from)?; - - // Estimate pre verification gas at the current fees - // If the user provides fees, use them, otherwise use the current bundle fees - let (bundle_fees, base_fee) = self.fee_estimator.required_bundle_fees(None).await?; - println!("HC bundle_fees {:?} base_fee {:?} at_price {:?}", bundle_fees, base_fee, at_price); - let gas_price = if let Some(at_price) = at_price { - at_price - } else if let (Some(max_fee), Some(prio_fee)) = - (op.max_fee_per_gas, op.max_priority_fee_per_gas) - { - cmp::min(max_fee, base_fee + prio_fee) - } else { - base_fee + bundle_fees.max_priority_fee_per_gas - }; - assert!(gas_price > U256::zero()); - - let pre_verification_gas = self.estimate_pre_verification_gas(&op, gas_price).await?; - let op = UserOperation { - pre_verification_gas, - ..op.into_user_operation(settings) - }; - //println!("HC est3a new_op {:?}", op); - - let verification_future = - self.binary_search_verification_gas(&op, block_hash, &state_override); - - let call_future = self.estimate_call_gas(&op, block_hash, state_override.clone()); - - // Not try_join! because then the output is nondeterministic if both - // verification and call estimation fail. - let timer = std::time::Instant::now(); - let (verification_gas_limit, call_gas_limit) = join!(verification_future, call_future); - tracing::debug!("gas estimation took {}ms", timer.elapsed().as_millis()); - - let verification_gas_limit = verification_gas_limit?; - println!("HC verification_gas_limit {:?}", verification_gas_limit); - let call_gas_limit = call_gas_limit?; - - println!("HC call_gas_limit {:?}", call_gas_limit); - - if let Some(err) = settings.validate() { - return Err(GasEstimationError::RevertInValidation(err)); - } - - Ok(GasEstimate { - pre_verification_gas, - verification_gas_limit: math::increase_by_percent( - verification_gas_limit, - VERIFICATION_GAS_BUFFER_PERCENT, - ) - .min(settings.max_verification_gas.into()), - call_gas_limit: call_gas_limit.clamp(MIN_CALL_GAS_LIMIT, settings.max_call_gas.into()), - }) - } -} - -impl GasEstimatorImpl { - /// Create a new gas estimator - pub fn new( - chain_id: u64, - provider: Arc

, - entry_point: E, - settings: Settings, - fee_estimator: FeeEstimator

, - ) -> Self { - Self { - chain_id, - provider, - entry_point, - settings, - fee_estimator, - } - } - - async fn binary_search_verification_gas( - &self, - op: &UserOperation, - block_hash: H256, - state_override: &spoof::State, - ) -> Result { - let timer = std::time::Instant::now(); - let simulation_gas = U256::from(self.settings.max_simulate_handle_ops_gas); - let gas_fee = U256::from(self.settings.validation_estimation_gas_fee); - - // Make one attempt at max gas, to see if success is possible. - // Capture the gas usage of this attempt and use as the initial guess in the binary search - - let initial_op = UserOperation { - verification_gas_limit: simulation_gas, - max_fee_per_gas: gas_fee - .checked_div(simulation_gas + op.pre_verification_gas) - .unwrap_or(U256::MAX), - call_gas_limit: 0.into(), - ..op.clone() - }; - - println!("HC estimation.rs initial_op {:?}", initial_op.clone()); - - let gas_used = utils::get_gas_used( - self.provider.deref(), - self.entry_point.address(), - U256::zero(), - utils::call_data_of( - i_entry_point::SimulateHandleOpCall::selector(), - (initial_op, Address::zero(), Bytes::new()), - ), - state_override, - ) - .await - .context("failed to run initial guess")?; - println!("HC estimation.rs SimulateHandleOp initial guess gas_used {}", gas_used); - - if gas_used.success { - Err(anyhow!( - "simulateHandleOp succeeded but should always revert, make sure the entry point contract is deployed and the address is correct" - ))?; - } - if let Some(message) = self - .entry_point - .decode_simulate_handle_ops_revert(gas_used.result) - .err() - { - println!("HC GasEstimationError {}", message); - return Err(GasEstimationError::RevertInValidation(message)); - } - - let run_attempt_returning_error = |gas: u64| async move { - let max_fee_per_gas = gas_fee - .checked_div(U256::from(gas) + op.pre_verification_gas) - .unwrap_or(U256::MAX); - let op = UserOperation { - max_fee_per_gas, - verification_gas_limit: gas.into(), - call_gas_limit: 0.into(), - ..op.clone() - }; - let error_message = self - .entry_point - .call_spoofed_simulate_op( - op.clone(), - Address::zero(), - Bytes::new(), - block_hash, - simulation_gas, - state_override, - ) - .await? - .err(); - //println!("HC binary search call_spoofed_simulate_op {:?} err={:?}", op.verification_gas_limit, error_message); - - if let Some(error_message) = error_message { - if error_message.contains("AA13") - || error_message.contains("AA23") - || error_message.contains("AA33") - || error_message.contains("AA40") - || error_message.contains("AA41") - || error_message.contains("AA51") - { - // This error occurs when out of gas, return false. - Ok(false) - } else { - // This is a different error, return it - Err(GasEstimationError::RevertInValidation(error_message)) - } - } else { - // This succeeded, return true - Ok(true) - } - }; - - let mut max_failure_gas = 1; - let mut min_success_gas = self.settings.max_verification_gas; - - if gas_used.gas_used.cmp(&U256::from(u64::MAX)).is_gt() { - return Err(GasEstimationError::RevertInValidation( - "gas_used cannot be larger than a u64 integer".to_string(), - )); - } - let mut guess = gas_used.gas_used.as_u64() * 2; - let mut num_rounds = 0; - - //println!("HC ----- before gas estimation loop"); - while (min_success_gas as f64) / (max_failure_gas as f64) - > (1.0 + GAS_ESTIMATION_ERROR_MARGIN) - { - num_rounds += 1; - if run_attempt_returning_error(guess).await? { - min_success_gas = guess; - } else { - max_failure_gas = guess; - } - guess = (max_failure_gas + min_success_gas) / 2; - } - println!("HC after gas estimation loop max_fail {:?} min_success {:?}", max_failure_gas, min_success_gas); - - tracing::debug!( - "binary search for verification gas took {num_rounds} rounds, {}ms", - timer.elapsed().as_millis() - ); - - Ok(min_success_gas.into()) - } - - async fn estimate_call_gas( - &self, - op: &UserOperation, - block_hash: H256, - mut state_override: spoof::State, - ) -> Result { - let timer = std::time::Instant::now(); - // For an explanation of what's going on here, see the comment at the - // top of `CallGasEstimationProxy.sol`. - let entry_point_code = self - .provider - .get_code(self.entry_point.address(), Some(block_hash)) - .await - .map_err(anyhow::Error::from)?; - // Use a random address for the moved entry point so that users can't - // intentionally get bad estimates by interacting with the hardcoded - // address. - let moved_entry_point_address: Address = rand::thread_rng().gen(); - let estimation_proxy_bytecode = - estimation_proxy_bytecode_with_target(moved_entry_point_address); - //println!("HC ecg 2a rand_addr {:?} state_override was {:?}", moved_entry_point_address, state_override); - state_override - .account(moved_entry_point_address) - .code(entry_point_code); - state_override - .account(self.entry_point.address()) - .code(estimation_proxy_bytecode); - - //println!("HC ecg 2b state_override now ..."); - - let callless_op = UserOperation { - call_gas_limit: 0.into(), - max_fee_per_gas: 0.into(), - verification_gas_limit: self.settings.max_verification_gas.into(), - ..op.clone() - }; - - let mut min_gas = U256::zero(); - let mut max_gas = U256::from(self.settings.max_call_gas); - let mut is_continuation = false; - let mut num_rounds = U256::zero(); - loop { - let target_call_data = utils::call_data_of( - EstimateCallGasCall::selector(), - (EstimateCallGasArgs { - sender: op.sender, - call_data: Bytes::clone(&op.call_data), - min_gas, - max_gas, - rounding: GAS_ROUNDING.into(), - is_continuation, - },), - ); - //println!("HC pre ecg 4 {:?}...", callless_op.clone()); - let target_revert_data = self - .entry_point - .call_spoofed_simulate_op( - callless_op.clone(), - self.entry_point.address(), - target_call_data, - block_hash, - self.settings.max_simulate_handle_ops_gas.into(), - &state_override, - ) - .await? - .map_err(GasEstimationError::RevertInCallWithMessage)? - .target_result; - println!("HC estimate_call_gas revert data {:?}", target_revert_data); - if let Ok(result) = EstimateCallGasResult::decode(&target_revert_data) { - println!("HC estimation.rs Ok result"); - num_rounds += result.num_rounds; - tracing::debug!( - "binary search for call gas took {num_rounds} rounds, {}ms", - timer.elapsed().as_millis() - ); - return Ok(result.gas_estimate); - } else if let Ok(revert) = EstimateCallGasRevertAtMax::decode(&target_revert_data) { - println!("HC estimation.rs RevertAtMax"); - let error = if let Some(message) = eth::parse_revert_message(&revert.revert_data) { - GasEstimationError::RevertInCallWithMessage(message) - } else { - GasEstimationError::RevertInCallWithBytes(revert.revert_data) - }; - return Err(error); - } else if let Ok(continuation) = - EstimateCallGasContinuation::decode(&target_revert_data) - { - println!("HC estimation.rs Ok continuation"); - if is_continuation - && continuation.min_gas <= min_gas - && continuation.max_gas >= max_gas - { - // This should never happen, but if it does, bail so we - // don't end up in an infinite loop! - Err(anyhow!( - "estimateCallGas should make progress each time it is called" - ))?; - } - is_continuation = true; - min_gas = min_gas.max(continuation.min_gas); - max_gas = max_gas.min(continuation.max_gas); - num_rounds += continuation.num_rounds; - } else { - Err(anyhow!( - "estimateCallGas revert should be a Result or a Continuation" - ))?; - } - } - } - - async fn estimate_pre_verification_gas( - &self, - op: &UserOperationOptionalGas, - gas_price: U256, - ) -> Result { - //println!("HC in estimate_pre_verification_gas gas_price {:?}", gas_price); - Ok(gas::estimate_pre_verification_gas( - &op.max_fill(&self.settings), - &op.random_fill(&self.settings), - self.entry_point.address(), - self.provider.clone(), - self.chain_id, - gas_price, - ) - .await?) - } -} - -/// Replaces the address of the proxy target where it appears in the proxy -/// bytecode so we don't need the same fixed address every time. -fn estimation_proxy_bytecode_with_target(target: Address) -> Bytes { - let mut vec = CALLGASESTIMATIONPROXY_DEPLOYED_BYTECODE.to_vec(); - vec[PROXY_TARGET_OFFSET..PROXY_TARGET_OFFSET + 20].copy_from_slice(target.as_bytes()); - vec.into() -} - -#[cfg(test)] -mod tests { - use ethers::{ - abi::{AbiEncode, Address}, - providers::JsonRpcError, - types::{Chain, U64}, - utils::hex, - }; - use rundler_provider::{MockEntryPoint, MockProvider, ProviderError}; - use rundler_types::contracts::{get_gas_used::GasUsedResult, i_entry_point::ExecutionResult}; - - use super::*; - use crate::PriorityFeeMode; - - // Gas overhead defaults - const FIXED: u32 = 21000; - const PER_USER_OP: u32 = 18300; - const PER_USER_OP_WORD: u32 = 4; - const BUNDLE_SIZE: u32 = 1; - - /// Must match the constant in `CallGasEstimationProxy.sol`. - const PROXY_TARGET_CONSTANT: &str = "A13dB4eCfbce0586E57D1AeE224FbE64706E8cd3"; - - fn create_base_config() -> (MockEntryPoint, MockProvider) { - let entry = MockEntryPoint::new(); - let provider = MockProvider::new(); - - (entry, provider) - } - - fn create_fee_estimator(provider: Arc) -> FeeEstimator { - FeeEstimator::new(provider, 0, PriorityFeeMode::BaseFeePercent(0), 0) - } - - fn create_estimator( - entry: MockEntryPoint, - provider: MockProvider, - ) -> (GasEstimatorImpl, Settings) { - let settings = Settings { - max_verification_gas: 10000000000, - max_call_gas: 10000000000, - max_simulate_handle_ops_gas: 100000000, - validation_estimation_gas_fee: 1_000_000_000_000, - }; - let provider = Arc::new(provider); - let estimator: GasEstimatorImpl = GasEstimatorImpl::new( - 0, - provider.clone(), - entry, - settings, - create_fee_estimator(provider), - ); - - (estimator, settings) - } - - fn demo_user_op_optional_gas() -> UserOperationOptionalGas { - UserOperationOptionalGas { - sender: Address::zero(), - nonce: U256::zero(), - init_code: Bytes::new(), - call_data: Bytes::new(), - call_gas_limit: Some(U256::from(1000)), - verification_gas_limit: Some(U256::from(1000)), - pre_verification_gas: Some(U256::from(1000)), - max_fee_per_gas: Some(U256::from(1000)), - max_priority_fee_per_gas: Some(U256::from(1000)), - paymaster_and_data: Bytes::new(), - signature: Bytes::new(), - } - } - - fn demo_user_op() -> UserOperation { - UserOperation { - sender: Address::zero(), - nonce: U256::zero(), - init_code: Bytes::new(), - call_data: Bytes::new(), - call_gas_limit: U256::from(1000), - verification_gas_limit: U256::from(1000), - pre_verification_gas: U256::from(1000), - max_fee_per_gas: U256::from(1000), - max_priority_fee_per_gas: U256::from(1000), - paymaster_and_data: Bytes::new(), - signature: Bytes::new(), - } - } - - #[test] - fn test_proxy_target_offset() { - let proxy_target_bytes = hex::decode(PROXY_TARGET_CONSTANT).unwrap(); - let mut offsets = Vec::::new(); - for i in 0..CALLGASESTIMATIONPROXY_DEPLOYED_BYTECODE.len() - 20 { - if CALLGASESTIMATIONPROXY_DEPLOYED_BYTECODE[i..i + 20] == proxy_target_bytes { - offsets.push(i); - } - } - assert_eq!(vec![PROXY_TARGET_OFFSET], offsets); - } - - #[tokio::test] - async fn test_calc_pre_verification_input() { - let (mut entry, provider) = create_base_config(); - entry.expect_address().return_const(Address::zero()); - - let (estimator, settings) = create_estimator(entry, provider); - let user_op = demo_user_op_optional_gas(); - let estimation = estimator - .estimate_pre_verification_gas(&user_op, U256::zero()) - .await - .unwrap(); - - let u_o = user_op.max_fill(&settings); - - let u_o_encoded = u_o.encode(); - let length_in_words = (u_o_encoded.len() + 31) / 32; - - //computed by mapping through the calldata bytes - //and adding to the value either 4 or 16 depending - //if the byte is non-zero - let call_data_cost = 3936; - - let result = U256::from(FIXED) / U256::from(BUNDLE_SIZE) - + call_data_cost - + U256::from(PER_USER_OP) - + U256::from(PER_USER_OP_WORD) * length_in_words; - - let dynamic_gas = 0; - - assert_eq!(result + dynamic_gas, estimation); - } - - #[tokio::test] - async fn test_calc_pre_verification_input_arbitrum() { - let (mut entry, mut provider) = create_base_config(); - entry.expect_address().return_const(Address::zero()); - provider - .expect_calc_arbitrum_l1_gas() - .returning(|_a, _b| Ok(U256::from(1000))); - - let settings = Settings { - max_verification_gas: 10000000000, - max_call_gas: 10000000000, - max_simulate_handle_ops_gas: 100000000, - validation_estimation_gas_fee: 1_000_000_000_000, - }; - - // Chose arbitrum - let provider = Arc::new(provider); - let estimator: GasEstimatorImpl = GasEstimatorImpl::new( - Chain::Arbitrum as u64, - provider.clone(), - entry, - settings, - create_fee_estimator(provider), - ); - - let user_op = demo_user_op_optional_gas(); - let estimation = estimator - .estimate_pre_verification_gas(&user_op, U256::zero()) - .await - .unwrap(); - - let u_o = user_op.max_fill(&settings); - - let u_o_encoded = u_o.encode(); - let length_in_words = (u_o_encoded.len() + 31) / 32; - - //computed by mapping through the calldata bytes - //and adding to the value either 4 or 16 depending - //if the byte is non-zero - let call_data_cost = 3936; - - let result = U256::from(FIXED) / U256::from(BUNDLE_SIZE) - + call_data_cost - + U256::from(PER_USER_OP) - + U256::from(PER_USER_OP_WORD) * length_in_words; - - //Arbitrum dynamic gas - let dynamic_gas = 1000; - - assert_eq!(result + dynamic_gas, estimation); - } - - #[tokio::test] - async fn test_calc_pre_verification_input_op() { - let (mut entry, mut provider) = create_base_config(); - - entry.expect_address().return_const(Address::zero()); - provider - .expect_calc_optimism_l1_gas() - .returning(|_a, _b, _c| Ok(U256::from(1000))); - - let settings = Settings { - max_verification_gas: 10000000000, - max_call_gas: 10000000000, - max_simulate_handle_ops_gas: 100000000, - validation_estimation_gas_fee: 1_000_000_000_000, - }; - - // Chose OP - let provider = Arc::new(provider); - let estimator: GasEstimatorImpl = GasEstimatorImpl::new( - Chain::Optimism as u64, - provider.clone(), - entry, - settings, - create_fee_estimator(provider), - ); - - let user_op = demo_user_op_optional_gas(); - let estimation = estimator - .estimate_pre_verification_gas(&user_op, U256::zero()) - .await - .unwrap(); - - let u_o = user_op.max_fill(&settings); - - let u_o_encoded: Bytes = u_o.encode().into(); - let length_in_words = (u_o_encoded.len() + 31) / 32; - - //computed by mapping through the calldata bytes - //and adding to the value either 4 or 16 depending - //if the byte is non-zero - let call_data_cost = 3936; - - let result = U256::from(FIXED) / U256::from(BUNDLE_SIZE) - + call_data_cost - + U256::from(PER_USER_OP) - + U256::from(PER_USER_OP_WORD) * length_in_words; - - //OP dynamic gas - let dynamic_gas = 1000; - - assert_eq!(result + dynamic_gas, estimation); - } - - #[tokio::test] - async fn test_binary_search_verification_gas() { - let (mut entry, mut provider) = create_base_config(); - - let gas_usage = 10_000.into(); - - entry.expect_address().return_const(Address::zero()); - entry - .expect_decode_simulate_handle_ops_revert() - .returning(|_a| { - Ok(ExecutionResult { - pre_op_gas: U256::from(10000), - paid: U256::from(100000), - valid_after: 100000000000, - valid_until: 100000000001, - target_success: true, - target_result: Bytes::new(), - }) - }); - entry - .expect_call_spoofed_simulate_op() - .returning(move |op, _b, _c, _d, _e, _f| { - if op.verification_gas_limit < gas_usage { - return Ok(Err("AA23".to_string())); - } - - Ok(Ok(ExecutionResult { - target_result: EstimateCallGasResult { - gas_estimate: gas_usage, - num_rounds: 10.into(), - } - .encode() - .into(), - target_success: true, - ..Default::default() - })) - }); - - provider.expect_call().returning(move |_a, _b, _c| { - let result_data: Bytes = GasUsedResult { - gas_used: gas_usage * 2, - success: false, - result: Bytes::new(), - } - .encode() - .into(); - - let json_rpc_error = JsonRpcError { - code: -32000, - message: "execution reverted".to_string(), - data: Some(serde_json::Value::String(result_data.to_string())), - }; - Err(ProviderError::JsonRpcError(json_rpc_error)) - }); - - let (estimator, _) = create_estimator(entry, provider); - let user_op = demo_user_op(); - let estimation = estimator - .binary_search_verification_gas(&user_op, H256::zero(), &spoof::state()) - .await - .unwrap(); - - // the estimation should be the same as the gas usage - assert_eq!(gas_usage, estimation); - } - - #[tokio::test] - async fn test_binary_search_verification_gas_should_not_overflow() { - let (mut entry, mut provider) = create_base_config(); - - entry.expect_address().return_const(Address::zero()); - entry - .expect_decode_simulate_handle_ops_revert() - .returning(|_a| { - Ok(ExecutionResult { - pre_op_gas: U256::from(10000), - paid: U256::from(100000), - valid_after: 100000000000, - valid_until: 100000000001, - target_success: true, - target_result: Bytes::new(), - }) - }); - entry - .expect_call_spoofed_simulate_op() - .returning(|_a, _b, _c, _d, _e, _f| { - Ok(Ok(ExecutionResult { - target_result: EstimateCallGasResult { - gas_estimate: U256::from(10000), - num_rounds: U256::from(10), - } - .encode() - .into(), - target_success: true, - ..Default::default() - })) - }); - - // this gas used number is larger than a u64 max number so we need to - // check for this overflow - provider.expect_call().returning(|_a, _b, _c| { - let result_data: Bytes = GasUsedResult { - gas_used: U256::from(18446744073709551616_u128), - success: false, - result: Bytes::new(), - } - .encode() - .into(); - - let json_rpc_error = JsonRpcError { - code: -32000, - message: "execution reverted".to_string(), - data: Some(serde_json::Value::String(result_data.to_string())), - }; - Err(ProviderError::JsonRpcError(json_rpc_error)) - }); - - let (estimator, _) = create_estimator(entry, provider); - let user_op = demo_user_op(); - let estimation = estimator - .binary_search_verification_gas(&user_op, H256::zero(), &spoof::state()) - .await - .err(); - - assert!(matches!( - estimation, - Some(GasEstimationError::RevertInValidation(..)) - )); - } - - #[tokio::test] - async fn test_binary_search_verification_gas_success_field() { - let (mut entry, mut provider) = create_base_config(); - - entry.expect_address().return_const(Address::zero()); - entry - .expect_decode_simulate_handle_ops_revert() - .returning(|_a| { - Ok(ExecutionResult { - pre_op_gas: U256::from(10000), - paid: U256::from(100000), - valid_after: 100000000000, - valid_until: 100000000001, - target_success: true, - target_result: Bytes::new(), - }) - }); - entry - .expect_call_spoofed_simulate_op() - .returning(|_a, _b, _c, _d, _e, _f| { - Ok(Ok(ExecutionResult { - target_result: EstimateCallGasResult { - gas_estimate: U256::from(10000), - num_rounds: U256::from(10), - } - .encode() - .into(), - target_success: true, - ..Default::default() - })) - }); - - // the success field should not be true as the - // call should always revert - provider.expect_call().returning(|_a, _b, _c| { - let result_data: Bytes = GasUsedResult { - gas_used: U256::from(20000), - success: true, - result: Bytes::new(), - } - .encode() - .into(); - - let json_rpc_error = JsonRpcError { - code: -32000, - message: "execution reverted".to_string(), - data: Some(serde_json::Value::String(result_data.to_string())), - }; - Err(ProviderError::JsonRpcError(json_rpc_error)) - }); - - let (estimator, _) = create_estimator(entry, provider); - let user_op = demo_user_op(); - let estimation = estimator - .binary_search_verification_gas(&user_op, H256::zero(), &spoof::state()) - .await; - - assert!(estimation.is_err()); - } - - #[tokio::test] - async fn test_binary_search_verification_gas_invalid_message() { - let (mut entry, mut provider) = create_base_config(); - - entry.expect_address().return_const(Address::zero()); - // checking for this simulated revert - entry - .expect_decode_simulate_handle_ops_revert() - .returning(|_a| Err(String::from("Error with reverted message"))); - entry - .expect_call_spoofed_simulate_op() - .returning(|_a, _b, _c, _d, _e, _f| { - Ok(Ok(ExecutionResult { - target_result: EstimateCallGasResult { - gas_estimate: U256::from(100), - num_rounds: U256::from(10), - } - .encode() - .into(), - target_success: true, - ..Default::default() - })) - }); - - provider.expect_call().returning(|_a, _b, _c| { - let result_data: Bytes = GasUsedResult { - gas_used: U256::from(20000), - success: false, - result: Bytes::new(), - } - .encode() - .into(); - - let json_rpc_error = JsonRpcError { - code: -32000, - message: "execution reverted".to_string(), - data: Some(serde_json::Value::String(result_data.to_string())), - }; - Err(ProviderError::JsonRpcError(json_rpc_error)) - }); - - let (estimator, _) = create_estimator(entry, provider); - let user_op = demo_user_op(); - let estimation = estimator - .binary_search_verification_gas(&user_op, H256::zero(), &spoof::state()) - .await; - - assert!(estimation.is_err()); - } - - #[tokio::test] - async fn test_binary_search_verification_gas_invalid_spoof() { - let (mut entry, mut provider) = create_base_config(); - - entry.expect_address().return_const(Address::zero()); - entry - .expect_decode_simulate_handle_ops_revert() - .returning(|_a| { - Ok(ExecutionResult { - pre_op_gas: U256::from(10000), - paid: U256::from(100000), - valid_after: 100000000000, - valid_until: 100000000001, - target_success: true, - target_result: Bytes::new(), - }) - }); - - //this mocked response causes error - entry - .expect_call_spoofed_simulate_op() - .returning(|_a, _b, _c, _d, _e, _f| Err(anyhow!("Invalid spoof error"))); - - provider.expect_call().returning(|_a, _b, _c| { - let result_data: Bytes = GasUsedResult { - gas_used: U256::from(20000), - success: false, - result: Bytes::new(), - } - .encode() - .into(); - - let json_rpc_error = JsonRpcError { - code: -32000, - message: "execution reverted".to_string(), - data: Some(serde_json::Value::String(result_data.to_string())), - }; - Err(ProviderError::JsonRpcError(json_rpc_error)) - }); - - let (estimator, _) = create_estimator(entry, provider); - let user_op = demo_user_op(); - let estimation = estimator - .binary_search_verification_gas(&user_op, H256::zero(), &spoof::state()) - .await; - - assert!(estimation.is_err()); - } - - #[tokio::test] - async fn test_binary_search_verification_gas_success_response() { - let (mut entry, mut provider) = create_base_config(); - - entry.expect_address().return_const(Address::zero()); - entry - .expect_decode_simulate_handle_ops_revert() - .returning(|_a| { - Ok(ExecutionResult { - pre_op_gas: U256::from(10000), - paid: U256::from(100000), - valid_after: 100000000000, - valid_until: 100000000001, - target_success: true, - target_result: Bytes::new(), - }) - }); - - // this should always revert instead of return success - entry - .expect_call_spoofed_simulate_op() - .returning(|_a, _b, _c, _d, _e, _f| { - Ok(Ok(ExecutionResult { - target_result: EstimateCallGasResult { - gas_estimate: U256::from(10000), - num_rounds: U256::from(10), - } - .encode() - .into(), - target_success: true, - ..Default::default() - })) - }); - - provider - .expect_call() - .returning(|_a, _b, _c| Ok(Bytes::new())); - - let (estimator, _) = create_estimator(entry, provider); - let user_op = demo_user_op(); - let estimation = estimator - .binary_search_verification_gas(&user_op, H256::zero(), &spoof::state()) - .await; - - assert!(estimation.is_err()); - } - - #[tokio::test] - async fn test_estimate_call_gas() { - let (mut entry, mut provider) = create_base_config(); - - entry.expect_address().return_const(Address::zero()); - entry - .expect_call_spoofed_simulate_op() - .returning(|_a, _b, _c, _d, _e, _f| { - Ok(Ok(ExecutionResult { - target_result: EstimateCallGasResult { - gas_estimate: U256::from(100), - num_rounds: U256::from(10), - } - .encode() - .into(), - target_success: true, - ..Default::default() - })) - }); - - provider - .expect_get_code() - .returning(|_a, _b| Ok(Bytes::new())); - - let (estimator, _) = create_estimator(entry, provider); - let user_op = demo_user_op(); - let estimation = estimator - .estimate_call_gas(&user_op, H256::zero(), spoof::state()) - .await - .unwrap(); - - // result is derived from the spoofed gas_estimate field - - assert_eq!(estimation, U256::from(100)); - } - - #[tokio::test] - async fn test_estimate_call_gas_error() { - let (mut entry, mut provider) = create_base_config(); - - entry.expect_address().return_const(Address::zero()); - - // return an invalid response for the ExecutionResult - // for a successful gas estimation - entry - .expect_call_spoofed_simulate_op() - .returning(|_a, _b, _c, _d, _e, _f| { - Ok(Ok(ExecutionResult { - target_result: EstimateCallGasRevertAtMax { - revert_data: Bytes::new(), - } - .encode() - .into(), - target_success: false, - ..Default::default() - })) - }); - - provider - .expect_get_code() - .returning(|_a, _b| Ok(Bytes::new())); - - let (estimator, _) = create_estimator(entry, provider); - let user_op = demo_user_op(); - let estimation = estimator - .estimate_call_gas(&user_op, H256::zero(), spoof::state()) - .await - .err() - .unwrap(); - - assert!(matches!( - estimation, - GasEstimationError::RevertInCallWithBytes(_) - )); - } - - #[tokio::test] - async fn test_estimate_call_gas_continuation() { - let (mut entry, mut provider) = create_base_config(); - - entry.expect_address().return_const(Address::zero()); - entry - .expect_call_spoofed_simulate_op() - .returning(|_a, _b, _c, _d, _e, _f| { - Ok(Ok(ExecutionResult { - target_result: EstimateCallGasContinuation { - min_gas: U256::from(100), - max_gas: U256::from(100000), - num_rounds: U256::from(10), - } - .encode() - .into(), - target_success: false, - ..Default::default() - })) - }) - .times(1); - entry - .expect_call_spoofed_simulate_op() - .returning(|_a, _b, _c, _d, _e, _f| { - Ok(Ok(ExecutionResult { - target_result: EstimateCallGasResult { - gas_estimate: U256::from(200), - num_rounds: U256::from(10), - } - .encode() - .into(), - target_success: true, - ..Default::default() - })) - }) - .times(1); - - provider - .expect_get_code() - .returning(|_a, _b| Ok(Bytes::new())); - - let (estimator, _) = create_estimator(entry, provider); - let user_op = demo_user_op(); - let estimation = estimator - .estimate_call_gas(&user_op, H256::zero(), spoof::state()) - .await - .unwrap(); - - // on the second loop of the estimate gas continuation - // I update the spoofed value to 200 - - assert_eq!(estimation, U256::from(200)); - } - - #[tokio::test] - async fn test_estimation_optional_gas_used() { - let (mut entry, mut provider) = create_base_config(); - let gas_usage = 10_000.into(); - - entry.expect_address().return_const(Address::zero()); - entry - .expect_call_spoofed_simulate_op() - .returning(move |op, _b, _c, _d, _e, _f| { - if op.verification_gas_limit < gas_usage { - return Ok(Err("AA23".to_string())); - } - - Ok(Ok(ExecutionResult { - target_result: EstimateCallGasResult { - gas_estimate: U256::from(10000), - num_rounds: U256::from(10), - } - .encode() - .into(), - target_success: true, - ..Default::default() - })) - }); - entry - .expect_decode_simulate_handle_ops_revert() - .returning(|_a| { - Ok(ExecutionResult { - pre_op_gas: U256::from(10000), - paid: U256::from(100000), - valid_after: 100000000000, - valid_until: 100000000001, - target_success: true, - target_result: Bytes::new(), - }) - }); - - provider - .expect_get_code() - .returning(|_a, _b| Ok(Bytes::new())); - provider - .expect_get_latest_block_hash_and_number() - .returning(|| Ok((H256::zero(), U64::zero()))); - provider.expect_call().returning(move |_a, _b, _c| { - let result_data: Bytes = GasUsedResult { - gas_used: gas_usage, - success: false, - result: Bytes::new(), - } - .encode() - .into(); - - let json_rpc_error = JsonRpcError { - code: -32000, - message: "execution reverted".to_string(), - data: Some(serde_json::Value::String(result_data.to_string())), - }; - Err(ProviderError::JsonRpcError(json_rpc_error)) - }); - provider - .expect_get_base_fee() - .returning(|| Ok(U256::from(1000))); - provider - .expect_get_max_priority_fee() - .returning(|| Ok(U256::from(1000))); - - let (estimator, _) = create_estimator(entry, provider); - - let user_op = demo_user_op_optional_gas(); - - let estimation = estimator - .estimate_op_gas(user_op, spoof::state(), None) - .await - .unwrap(); - - // this number uses the same logic as the pre_verification tests - assert_eq!(estimation.pre_verification_gas, U256::from(43296)); - - // gas used increased by 10% - assert_eq!( - estimation.verification_gas_limit, - math::increase_by_percent(gas_usage, 10) - ); - - // input gas limit clamped with the set limit in settings and constant MIN - assert_eq!(estimation.call_gas_limit, U256::from(10000)); - } - - #[tokio::test] - async fn test_estimation_optional_gas_invalid_settings() { - let (mut entry, mut provider) = create_base_config(); - - entry.expect_address().return_const(Address::zero()); - entry - .expect_call_spoofed_simulate_op() - .returning(|_a, _b, _c, _d, _e, _f| { - Ok(Ok(ExecutionResult { - target_result: EstimateCallGasResult { - gas_estimate: U256::from(10000), - num_rounds: U256::from(10), - } - .encode() - .into(), - target_success: true, - ..Default::default() - })) - }); - entry - .expect_decode_simulate_handle_ops_revert() - .returning(|_a| { - Ok(ExecutionResult { - pre_op_gas: U256::from(10000), - paid: U256::from(100000), - valid_after: 100000000000, - valid_until: 100000000001, - target_success: true, - target_result: Bytes::new(), - }) - }); - - provider - .expect_get_code() - .returning(|_a, _b| Ok(Bytes::new())); - provider - .expect_get_latest_block_hash_and_number() - .returning(|| Ok((H256::zero(), U64::zero()))); - provider.expect_call().returning(|_a, _b, _c| { - let result_data: Bytes = GasUsedResult { - gas_used: U256::from(100000), - success: false, - result: Bytes::new(), - } - .encode() - .into(); - - let json_rpc_error = JsonRpcError { - code: -32000, - message: "execution reverted".to_string(), - data: Some(serde_json::Value::String(result_data.to_string())), - }; - Err(ProviderError::JsonRpcError(json_rpc_error)) - }); - provider - .expect_get_base_fee() - .returning(|| Ok(U256::from(1000))); - provider - .expect_get_max_priority_fee() - .returning(|| Ok(U256::from(1000))); - - //max_call_gas is less than MIN_CALL_GAS_LIMIT - - let settings = Settings { - max_verification_gas: 10, - max_call_gas: 10, - max_simulate_handle_ops_gas: 10, - validation_estimation_gas_fee: 1_000_000_000_000, - }; - - let provider = Arc::new(provider); - let estimator: GasEstimatorImpl = GasEstimatorImpl::new( - 0, - provider.clone(), - entry, - settings, - create_fee_estimator(provider), - ); - let user_op = demo_user_op_optional_gas(); - let estimation = estimator - .estimate_op_gas(user_op, spoof::state(), None) - .await - .err(); - - assert!(matches!( - estimation, - Some(GasEstimationError::RevertInValidation(..)) - )); - } -} diff --git a/crates/sim/src/estimation/mod.rs b/crates/sim/src/estimation/mod.rs index ce3a96c4..c5163e2e 100644 --- a/crates/sim/src/estimation/mod.rs +++ b/crates/sim/src/estimation/mod.rs @@ -11,9 +11,107 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -#[allow(clippy::module_inception)] -mod estimation; -pub use estimation::*; +use ethers::types::{Bytes, U128}; +#[cfg(feature = "test-utils")] +use mockall::automock; +use rundler_types::{GasEstimate, ValidationRevert}; -mod types; -pub use types::{GasEstimate, Settings, UserOperationOptionalGas}; +use crate::precheck::MIN_CALL_GAS_LIMIT; +use ethers::types::U256; + +mod estimate_verification_gas; +pub use estimate_verification_gas::{VerificationGasEstimator, VerificationGasEstimatorImpl}; +mod estimate_call_gas; +pub use estimate_call_gas::{ + CallGasEstimator, CallGasEstimatorImpl, CallGasEstimatorSpecialization, +}; + +/// Gas estimation module for Entry Point v0.6 +mod v0_6; +pub use v0_6::GasEstimator as GasEstimatorV0_6; +mod v0_7; +pub use v0_7::GasEstimator as GasEstimatorV0_7; + +/// Percentage by which to increase the verification gas limit after binary search +const VERIFICATION_GAS_BUFFER_PERCENT: u64 = 10; +/// Absolute value by which to increase the call gas limit after binary search +const CALL_GAS_BUFFER_VALUE: U128 = U128([3000, 0]); + +/// Error type for gas estimation +#[derive(Debug, thiserror::Error)] +pub enum GasEstimationError { + /// Validation reverted + #[error("{0}")] + RevertInValidation(ValidationRevert), + /// Call reverted with a string message + #[error("user operation's call reverted: {0}")] + RevertInCallWithMessage(String), + /// Call reverted with bytes + #[error("user operation's call reverted: {0:#x}")] + RevertInCallWithBytes(Bytes), + /// Call used too much gas + #[error("gas_used cannot be larger than a u64 integer")] + GasUsedTooLarge, + /// Supplied gas was too large + #[error("{0} cannot be larger than {1}")] + GasFieldTooLarge(&'static str, u64), + /// The total amount of gas used by the UO is greater than allowed + #[error("total gas used by the user operation {0} is greater than the allowed limit: {1}")] + GasTotalTooLarge(u64, u64), + /// Other error + #[error(transparent)] + Other(#[from] anyhow::Error), +} + +/// Gas estimator trait +#[cfg_attr(feature = "test-utils", automock(type UserOperationOptionalGas = rundler_types::v0_6::UserOperationOptionalGas;))] +#[async_trait::async_trait] +pub trait GasEstimator: Send + Sync + 'static { + /// The user operation type estimated by this gas estimator + type UserOperationOptionalGas; + + /// Returns a gas estimate or a revert message, or an anyhow error on any + /// other error. + async fn estimate_op_gas( + &self, + op: Self::UserOperationOptionalGas, + state_override: ethers::types::spoof::State, + at_price: Option, + ) -> Result; +} + +/// Settings for gas estimation +#[derive(Clone, Copy, Debug)] +pub struct Settings { + /// The maximum amount of gas that can be used for the verification step of a user operation + pub max_verification_gas: u64, + /// The maximum amount of gas that can be used for the call step of a user operation + pub max_call_gas: u64, + /// The maximum amount of gas that can be used for the paymaster verification step of a user operation + pub max_paymaster_verification_gas: u64, + /// The maximum amount of gas that can be used for the paymaster post op step of a user operation + pub max_paymaster_post_op_gas: u64, + /// The maximum amount of total execution gas to check after estimation + pub max_total_execution_gas: u64, + /// The maximum amount of gas that can be used in a call to `simulateHandleOps` + pub max_simulate_handle_ops_gas: u64, + /// The gas fee to use during verification gas estimation, required to be held by the fee-payer + /// during estimation. If using a paymaster, the fee-payer must have 3x this value. + /// As the gas limit is varied during estimation, the fee is held constant by varying the + /// gas price. + /// Clients can use state overrides to set the balance of the fee-payer to at least this value. + pub verification_estimation_gas_fee: u64, +} + +impl Settings { + /// Check if the settings are valid + pub fn validate(&self) -> Option { + if U128::from(self.max_call_gas) + .cmp(&MIN_CALL_GAS_LIMIT) + .is_lt() + { + return Some("max_call_gas field cannot be lower than MIN_CALL_GAS_LIMIT".to_string()); + } + None + } +} diff --git a/crates/sim/src/estimation/types.rs b/crates/sim/src/estimation/types.rs index 664d55bf..b9c87067 100644 --- a/crates/sim/src/estimation/types.rs +++ b/crates/sim/src/estimation/types.rs @@ -27,12 +27,12 @@ pub struct Settings { pub max_call_gas: u64, /// The maximum amount of gas that can be used in a call to `simulateHandleOps` pub max_simulate_handle_ops_gas: u64, - /// The gas fee to use during validation gas estimation, required to be held by the fee-payer + /// The gas fee to use during verification gas estimation, required to be held by the fee-payer /// during estimation. If using a paymaster, the fee-payer must have 3x this value. /// As the gas limit is varied during estimation, the fee is held constant by varied the /// gas price. /// Clients can use state overrides to set the balance of the fee-payer to at least this value. - pub validation_estimation_gas_fee: u64, + pub verification_estimation_gas_fee: u64, } impl Settings { diff --git a/crates/sim/src/estimation/v0_6.rs b/crates/sim/src/estimation/v0_6.rs new file mode 100644 index 00000000..9d0251c8 --- /dev/null +++ b/crates/sim/src/estimation/v0_6.rs @@ -0,0 +1,1508 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use std::{cmp, ops::Add, sync::Arc}; + +use ethers::{ + contract::EthCall, + providers::spoof, + types::{Address, Bytes, H256, U256}, +}; +use rand::Rng; +use rundler_provider::{EntryPoint, L1GasProvider, Provider, SimulationProvider}; +use rundler_types::{ + chain::ChainSpec, + contracts::{ + v0_6::call_gas_estimation_proxy::{ + EstimateCallGasArgs, EstimateCallGasCall, TestCallGasCall, + CALLGASESTIMATIONPROXY_DEPLOYED_BYTECODE, + }, + ENTRY_POINT_V0_6_DEPLOYED_BYTECODE, + }, + v0_6::{UserOperation, UserOperationOptionalGas}, + GasEstimate, +}; +use rundler_utils::{eth, math}; +use tokio::join; + +use super::{ + CallGasEstimator, CallGasEstimatorImpl, CallGasEstimatorSpecialization, GasEstimationError, + Settings, VerificationGasEstimator, +}; +use crate::{ + estimation::estimate_verification_gas::GetOpWithLimitArgs, gas, precheck::MIN_CALL_GAS_LIMIT, + simulation, FeeEstimator, GasEstimator as GasEstimatorTrait, VerificationGasEstimatorImpl, +}; + +/// Gas estimator implementation +#[derive(Debug)] +pub struct GasEstimator { + chain_spec: ChainSpec, + provider: Arc

, + entry_point: E, + settings: Settings, + fee_estimator: FeeEstimator

, + verification_gas_estimator: VGE, + call_gas_estimator: CGE, +} + +#[async_trait::async_trait] +impl GasEstimatorTrait for GasEstimator +where + P: Provider, + E: EntryPoint + SimulationProvider + L1GasProvider, + VGE: VerificationGasEstimator, + CGE: CallGasEstimator, +{ + type UserOperationOptionalGas = UserOperationOptionalGas; + + async fn estimate_op_gas( + &self, + op: UserOperationOptionalGas, + state_override: spoof::State, + at_price: Option, + ) -> Result { + self.check_provided_limits(&op)?; + + let (block_hash, _) = self + .provider + .get_latest_block_hash_and_number() + .await + .map_err(anyhow::Error::from)?; + + let pre_verification_gas = self.estimate_pre_verification_gas(&op, at_price).await?; + + let full_op = UserOperation { + pre_verification_gas, + ..op.clone().into_user_operation( + self.settings.max_call_gas.into(), + self.settings.max_verification_gas.into(), + ) + }; + + let verification_future = + self.estimate_verification_gas(&op, &full_op, block_hash, &state_override); + let call_future = + self.estimate_call_gas(&op, full_op.clone(), block_hash, state_override.clone()); + + // Not try_join! because then the output is nondeterministic if both + // verification and call estimation fail. + let timer = std::time::Instant::now(); + let (verification_gas_limit, call_gas_limit) = join!(verification_future, call_future); + tracing::debug!("gas estimation took {}ms", timer.elapsed().as_millis()); + + let verification_gas_limit = verification_gas_limit?; + let call_gas_limit = call_gas_limit?; + + // Verify total gas limit + let mut op_with_gas = full_op; + op_with_gas.verification_gas_limit = verification_gas_limit; + op_with_gas.call_gas_limit = call_gas_limit; + let gas_limit = + gas::user_operation_execution_gas_limit(&self.chain_spec, &op_with_gas, true); + if gas_limit > self.settings.max_total_execution_gas.into() { + return Err(GasEstimationError::GasTotalTooLarge( + gas_limit.as_u64(), + self.settings.max_total_execution_gas, + )); + } + + Ok(GasEstimate { + pre_verification_gas, + verification_gas_limit, + call_gas_limit, + paymaster_verification_gas_limit: None, + }) + } +} + +impl + GasEstimator< + P, + E, + VerificationGasEstimatorImpl, + CallGasEstimatorImpl, + > +where + P: Provider, + E: EntryPoint + + SimulationProvider + + L1GasProvider + + Clone, +{ + /// Create a new gas estimator + pub fn new( + chain_spec: ChainSpec, + provider: Arc

, + entry_point: E, + settings: Settings, + fee_estimator: FeeEstimator

, + ) -> Self { + if let Some(err) = settings.validate() { + panic!("Invalid gas estimator settings: {}", err); + } + + let verification_gas_estimator = VerificationGasEstimatorImpl::new( + chain_spec.clone(), + Arc::clone(&provider), + entry_point.clone(), + settings, + ); + let call_gas_estimator = CallGasEstimatorImpl::new( + entry_point.clone(), + settings, + CallGasEstimatorSpecializationV06, + ); + Self { + chain_spec, + provider, + entry_point, + settings, + fee_estimator, + verification_gas_estimator, + call_gas_estimator, + } + } +} + +impl GasEstimator +where + P: Provider, + E: EntryPoint + SimulationProvider + L1GasProvider, + VGE: VerificationGasEstimator, + CGE: CallGasEstimator, +{ + fn check_provided_limits( + &self, + optional_op: &UserOperationOptionalGas, + ) -> Result<(), GasEstimationError> { + if let Some(pvg) = optional_op.pre_verification_gas { + if pvg > self.settings.max_verification_gas.into() { + return Err(GasEstimationError::GasFieldTooLarge( + "preVerificationGas", + self.settings.max_verification_gas, + )); + } + } + if let Some(vl) = optional_op.verification_gas_limit { + if vl > self.settings.max_verification_gas.into() { + return Err(GasEstimationError::GasFieldTooLarge( + "verificationGasLimit", + self.settings.max_verification_gas, + )); + } + } + if let Some(cl) = optional_op.call_gas_limit { + if cl > self.settings.max_call_gas.into() { + return Err(GasEstimationError::GasFieldTooLarge( + "callGasLimit", + self.settings.max_call_gas, + )); + } + } + + Ok(()) + } + + async fn estimate_verification_gas( + &self, + optional_op: &UserOperationOptionalGas, + full_op: &UserOperation, + block_hash: H256, + state_override: &spoof::State, + ) -> Result { + // if set and non-zero, don't estimate + if let Some(vl) = optional_op.verification_gas_limit { + if vl != U256::zero() { + // No need to do an extra simulation here, if the user provides a value that is + // insufficient it will cause a revert during call gas estimation (or simulation). + return Ok(vl); + } + } + + fn get_op_with_limit(op: UserOperation, args: GetOpWithLimitArgs) -> UserOperation { + let GetOpWithLimitArgs { gas, fee } = args; + UserOperation { + verification_gas_limit: gas.into(), + max_fee_per_gas: fee.into(), + max_priority_fee_per_gas: fee.into(), + call_gas_limit: U256::zero(), + ..op + } + } + + let verification_gas_limit: U256 = self + .verification_gas_estimator + .estimate_verification_gas( + full_op, + block_hash, + state_override, + self.settings.max_verification_gas.into(), + get_op_with_limit, + ) + .await + .map(|gas_u128| gas_u128.into())?; + + // Add a buffer to the verification gas limit. Add 10% or 2000 gas, whichever is larger + // to ensure we get at least a 2000 gas buffer. Cap at the max verification gas. + let verification_gas_limit = cmp::max( + math::increase_by_percent( + verification_gas_limit, + super::VERIFICATION_GAS_BUFFER_PERCENT, + ), + verification_gas_limit + simulation::v0_6::REQUIRED_VERIFICATION_GAS_LIMIT_BUFFER, + ) + .min(self.settings.max_verification_gas.into()); + + Ok(verification_gas_limit) + } + + async fn estimate_pre_verification_gas( + &self, + optional_op: &UserOperationOptionalGas, + at_gas_price: Option, + ) -> Result { + if let Some(pvg) = optional_op.pre_verification_gas { + if pvg != U256::zero() { + return Ok(pvg); + } + } + + // If not using calldata pre-verification gas, return 0 + let mut gas_price = if !self.chain_spec.calldata_pre_verification_gas { + U256::zero() + } else { + // If the user provides fees, use them, otherwise use the current bundle fees + let (bundle_fees, base_fee) = self.fee_estimator.required_bundle_fees(None).await?; + if let (Some(max_fee), Some(prio_fee)) = ( + optional_op.max_fee_per_gas.filter(|fee| !fee.is_zero()), + optional_op + .max_priority_fee_per_gas + .filter(|fee| !fee.is_zero()), + ) { + cmp::min(max_fee, base_fee.saturating_add(prio_fee)) + } else { + base_fee.saturating_add(bundle_fees.max_priority_fee_per_gas) + } + }; + + if let Some(at_gas_price) = at_gas_price { + println!("HC Override estimate_pvg gas price from {:?} to {:?}", gas_price, at_gas_price); + gas_price = at_gas_price; + } + + Ok(gas::estimate_pre_verification_gas( + &self.chain_spec, + &self.entry_point, + &optional_op.max_fill( + self.settings.max_call_gas.into(), + self.settings.max_verification_gas.into(), + ), + &optional_op.random_fill( + self.settings.max_call_gas.into(), + self.settings.max_verification_gas.into(), + ), + gas_price, + ) + .await?) + } + + async fn estimate_call_gas( + &self, + optional_op: &UserOperationOptionalGas, + full_op: UserOperation, + block_hash: H256, + state_override: spoof::State, + ) -> Result { + // if set and non-zero, don't estimate + if let Some(cl) = optional_op.call_gas_limit { + if cl != U256::zero() { + // The user provided a non-zero value, simulate once + self.call_gas_estimator + .simulate_handle_op_with_result(full_op, block_hash, state_override) + .await?; + return Ok(cl); + } + } + + let call_gas_limit: U256 = self + .call_gas_estimator + .estimate_call_gas(full_op, block_hash, state_override) + .await? + .into(); + + // Add a buffer to the call gas limit and clamp + let call_gas_limit = call_gas_limit + .add(super::CALL_GAS_BUFFER_VALUE) + .clamp(MIN_CALL_GAS_LIMIT.into(), self.settings.max_call_gas.into()); + + Ok(call_gas_limit) + } +} + +/// Implementation of functions that specialize the call gas estimator to the +/// v0.6 entry point. +#[derive(Debug)] +pub struct CallGasEstimatorSpecializationV06; + +impl CallGasEstimatorSpecialization for CallGasEstimatorSpecializationV06 { + type UO = UserOperation; + + fn add_proxy_to_overrides(&self, ep_to_override: Address, state_override: &mut spoof::State) { + // For an explanation of what's going on here, see the comment at the + // top of `CallGasEstimationProxy.sol`. + // Use a random address for the moved entry point so that users can't + // intentionally get bad estimates by interacting with the hardcoded + // address. + let moved_entry_point_address: Address = rand::thread_rng().gen(); + let estimation_proxy_bytecode = + estimation_proxy_bytecode_with_target(moved_entry_point_address); + state_override + .account(moved_entry_point_address) + .code(ENTRY_POINT_V0_6_DEPLOYED_BYTECODE.clone()); + state_override + .account(ep_to_override) + .code(estimation_proxy_bytecode); + } + + fn get_op_with_no_call_gas(&self, op: Self::UO) -> Self::UO { + UserOperation { + call_gas_limit: 0.into(), + max_fee_per_gas: 0.into(), + ..op + } + } + + fn get_estimate_call_gas_calldata( + &self, + callless_op: Self::UO, + min_gas: U256, + max_gas: U256, + rounding: U256, + is_continuation: bool, + ) -> Bytes { + eth::call_data_of( + EstimateCallGasCall::selector(), + (EstimateCallGasArgs { + call_data: callless_op.call_data, + sender: callless_op.sender, + min_gas, + max_gas, + rounding, + is_continuation, + },), + ) + } + + fn get_test_call_gas_calldata(&self, callless_op: Self::UO, call_gas_limit: U256) -> Bytes { + eth::call_data_of( + TestCallGasCall::selector(), + (callless_op.sender, callless_op.call_data, call_gas_limit), + ) + } +} + +/// Offset at which the proxy target address appears in the proxy bytecode. Must +/// be updated whenever `CallGasEstimationProxy.sol` changes. +/// +/// The easiest way to get the updated value is to run this module's tests. The +/// failure will tell you the new value. +const PROXY_TARGET_OFFSET: usize = 163; + +// Replaces the address of the proxy target where it appears in the proxy +// bytecode so we don't need the same fixed address every time. +fn estimation_proxy_bytecode_with_target(target: Address) -> Bytes { + let mut vec = CALLGASESTIMATIONPROXY_DEPLOYED_BYTECODE.to_vec(); + vec[PROXY_TARGET_OFFSET..PROXY_TARGET_OFFSET + 20].copy_from_slice(target.as_bytes()); + vec.into() +} + +#[cfg(test)] +mod tests { + use anyhow::anyhow; + use ethers::{ + abi::{AbiEncode, Address}, + contract::EthCall, + types::{U128, U64}, + utils::hex, + }; + use rundler_provider::{ExecutionResult, MockEntryPointV0_6, MockProvider, SimulateOpCallData}; + use rundler_types::{ + chain::L1GasOracleContractType, + contracts::{ + utils::get_gas_used::GasUsedResult, + v0_6::{ + call_gas_estimation_proxy::{ + EstimateCallGasContinuation, EstimateCallGasResult, EstimateCallGasRevertAtMax, + TestCallGasResult, + }, + i_entry_point, + }, + }, + v0_6::{UserOperation, UserOperationOptionalGas}, + UserOperation as UserOperationTrait, ValidationRevert, + }; + use rundler_utils::eth::{self, ContractRevertError}; + + use super::*; + use crate::{ + estimation::{ + estimate_call_gas::PROXY_IMPLEMENTATION_ADDRESS_MARKER, CALL_GAS_BUFFER_VALUE, + VERIFICATION_GAS_BUFFER_PERCENT, + }, + simulation::v0_6::REQUIRED_VERIFICATION_GAS_LIMIT_BUFFER, + PriorityFeeMode, VerificationGasEstimatorImpl, + }; + + // Gas overhead defaults + const FIXED: u32 = 21000; + const PER_USER_OP: u32 = 18300; + const PER_USER_OP_WORD: u32 = 4; + const BUNDLE_SIZE: u32 = 1; + + // Alises for complex types (which also satisfy Clippy) + type VerificationGasEstimatorWithMocks = + VerificationGasEstimatorImpl>; + type CallGasEstimatorWithMocks = + CallGasEstimatorImpl, CallGasEstimatorSpecializationV06>; + type GasEstimatorWithMocks = GasEstimator< + MockProvider, + Arc, + VerificationGasEstimatorWithMocks, + CallGasEstimatorWithMocks, + >; + + fn create_base_config() -> (MockEntryPointV0_6, MockProvider) { + let mut entry = MockEntryPointV0_6::new(); + let provider = MockProvider::new(); + + // Fill in concrete implementations of call data and + // `simulation_should_revert` + entry + .expect_get_simulate_op_call_data() + .returning(|op, spoofed_state| { + let call_data = eth::call_data_of( + i_entry_point::SimulateHandleOpCall::selector(), + (op.clone(), Address::zero(), Bytes::new()), + ); + SimulateOpCallData { + call_data, + spoofed_state: spoofed_state.clone(), + } + }); + entry.expect_simulation_should_revert().return_const(true); + + entry.expect_address().return_const(Address::zero()); + + (entry, provider) + } + + fn create_fee_estimator(provider: Arc) -> FeeEstimator { + FeeEstimator::new( + &ChainSpec::default(), + provider, + PriorityFeeMode::BaseFeePercent(0), + 0, + ) + } + + fn create_custom_estimator( + chain_spec: ChainSpec, + provider: MockProvider, + entry: MockEntryPointV0_6, + settings: Settings, + ) -> GasEstimatorWithMocks { + let provider = Arc::new(provider); + GasEstimator::new( + chain_spec.clone(), + Arc::clone(&provider), + Arc::new(entry), + settings, + create_fee_estimator(provider), + ) + } + + const TEST_MAX_GAS_LIMITS: u64 = 10000000000; + const TEST_FEE: U256 = U256([1000, 0, 0, 0]); + + fn create_estimator( + entry: MockEntryPointV0_6, + provider: MockProvider, + ) -> (GasEstimatorWithMocks, Settings) { + let settings = Settings { + max_verification_gas: TEST_MAX_GAS_LIMITS, + max_call_gas: TEST_MAX_GAS_LIMITS, + max_paymaster_verification_gas: TEST_MAX_GAS_LIMITS, + max_paymaster_post_op_gas: TEST_MAX_GAS_LIMITS, + max_total_execution_gas: TEST_MAX_GAS_LIMITS, + max_simulate_handle_ops_gas: TEST_MAX_GAS_LIMITS, + verification_estimation_gas_fee: 1_000_000_000_000, + }; + let estimator = create_custom_estimator(ChainSpec::default(), provider, entry, settings); + (estimator, settings) + } + + fn demo_user_op_optional_gas(pvg: Option) -> UserOperationOptionalGas { + UserOperationOptionalGas { + sender: Address::zero(), + nonce: U256::zero(), + init_code: Bytes::new(), + call_data: Bytes::new(), + call_gas_limit: None, + verification_gas_limit: None, + pre_verification_gas: pvg, + max_fee_per_gas: None, + max_priority_fee_per_gas: None, + paymaster_and_data: Bytes::new(), + signature: Bytes::new(), + } + } + + fn demo_user_op() -> UserOperation { + UserOperation { + sender: Address::zero(), + nonce: U256::zero(), + init_code: Bytes::new(), + call_data: Bytes::new(), + call_gas_limit: U256::from(1000), + verification_gas_limit: U256::from(1000), + pre_verification_gas: U256::from(1000), + max_fee_per_gas: U256::from(1000), + max_priority_fee_per_gas: U256::from(1000), + paymaster_and_data: Bytes::new(), + signature: Bytes::new(), + } + } + + #[tokio::test] + async fn test_calc_pre_verification_input() { + let (entry, mut provider) = create_base_config(); + provider.expect_get_base_fee().returning(|| Ok(TEST_FEE)); + provider + .expect_get_max_priority_fee() + .returning(|| Ok(TEST_FEE)); + + let (estimator, settings) = create_estimator(entry, provider); + let user_op = demo_user_op_optional_gas(None); + let estimation = estimator + .estimate_pre_verification_gas(&user_op, None) + .await + .unwrap(); + + let u_o = user_op.max_fill( + settings.max_call_gas.into(), + settings.max_verification_gas.into(), + ); + + let u_o_encoded = u_o.encode(); + let length_in_words = (u_o_encoded.len() + 31) / 32; + + //computed by mapping through the calldata bytes + //and adding to the value either 4 or 16 depending + //if the byte is non-zero + let call_data_cost = 3936; + + let result = U256::from(FIXED) / U256::from(BUNDLE_SIZE) + + call_data_cost + + U256::from(PER_USER_OP) + + U256::from(PER_USER_OP_WORD) * length_in_words; + + let dynamic_gas = 0; + + assert_eq!(result + dynamic_gas, estimation); + } + + #[tokio::test] + async fn test_calc_pre_verification_input_arbitrum() { + let (mut entry, mut provider) = create_base_config(); + entry + .expect_calc_l1_gas() + .returning(|_a, _b, _c| Ok(TEST_FEE)); + provider.expect_get_base_fee().returning(|| Ok(TEST_FEE)); + provider + .expect_get_max_priority_fee() + .returning(|| Ok(TEST_FEE)); + + let settings = Settings { + max_verification_gas: 10000000000, + max_call_gas: 10000000000, + max_paymaster_verification_gas: 10000000000, + max_paymaster_post_op_gas: 10000000000, + max_total_execution_gas: 10000000000, + max_simulate_handle_ops_gas: 100000000, + verification_estimation_gas_fee: 1_000_000_000_000, + }; + + // Chose arbitrum + let cs = ChainSpec { + id: 42161, + calldata_pre_verification_gas: true, + l1_gas_oracle_contract_type: L1GasOracleContractType::ArbitrumNitro, + ..Default::default() + }; + let provider = Arc::new(provider); + let estimator = GasEstimator::new( + cs.clone(), + Arc::clone(&provider), + Arc::new(entry), + settings, + create_fee_estimator(provider), + ); + + let user_op = demo_user_op_optional_gas(None); + let estimation = estimator + .estimate_pre_verification_gas(&user_op, None) + .await + .unwrap(); + + let u_o = user_op.max_fill( + settings.max_call_gas.into(), + settings.max_verification_gas.into(), + ); + + let u_o_encoded = u_o.encode(); + let length_in_words = (u_o_encoded.len() + 31) / 32; + + //computed by mapping through the calldata bytes + //and adding to the value either 4 or 16 depending + //if the byte is non-zero + let call_data_cost = 3936; + + let result = U256::from(FIXED) / U256::from(BUNDLE_SIZE) + + call_data_cost + + U256::from(PER_USER_OP) + + U256::from(PER_USER_OP_WORD) * length_in_words; + + //Arbitrum dynamic gas + let dynamic_gas = 1000; + + assert_eq!(result + dynamic_gas, estimation); + } + + #[tokio::test] + async fn test_calc_pre_verification_input_op() { + let (mut entry, mut provider) = create_base_config(); + + entry + .expect_calc_l1_gas() + .returning(|_a, _b, _c| Ok(TEST_FEE)); + provider.expect_get_base_fee().returning(|| Ok(TEST_FEE)); + provider + .expect_get_max_priority_fee() + .returning(|| Ok(TEST_FEE)); + + let settings = Settings { + max_verification_gas: 10000000000, + max_call_gas: 10000000000, + max_paymaster_verification_gas: 10000000000, + max_paymaster_post_op_gas: 10000000000, + max_total_execution_gas: 10000000000, + max_simulate_handle_ops_gas: 100000000, + verification_estimation_gas_fee: 1_000_000_000_000, + }; + + // Chose OP + let cs = ChainSpec { + id: 10, + calldata_pre_verification_gas: true, + l1_gas_oracle_contract_type: L1GasOracleContractType::OptimismBedrock, + ..Default::default() + }; + let estimator = create_custom_estimator(cs, provider, entry, settings); + + let user_op = demo_user_op_optional_gas(None); + let estimation = estimator + .estimate_pre_verification_gas(&user_op, None) + .await + .unwrap(); + + let u_o = user_op.max_fill( + settings.max_call_gas.into(), + settings.max_verification_gas.into(), + ); + + let u_o_encoded: Bytes = u_o.encode().into(); + let length_in_words = (u_o_encoded.len() + 31) / 32; + + //computed by mapping through the calldata bytes + //and adding to the value either 4 or 16 depending + //if the byte is non-zero + let call_data_cost = 3936; + + let result = U256::from(FIXED) / U256::from(BUNDLE_SIZE) + + call_data_cost + + U256::from(PER_USER_OP) + + U256::from(PER_USER_OP_WORD) * length_in_words; + + //OP dynamic gas + let dynamic_gas = 1000; + + assert_eq!(result + dynamic_gas, estimation); + } + + #[tokio::test] + async fn test_binary_search_verification_gas() { + let (mut entry, mut provider) = create_base_config(); + + let gas_usage = 10_000.into(); + + entry + .expect_decode_simulate_handle_ops_revert() + .returning(|_a| { + Ok(ExecutionResult { + pre_op_gas: U256::from(10000), + paid: U256::from(100000), + valid_after: 100000000000.into(), + valid_until: 100000000001.into(), + target_success: true, + target_result: Bytes::new(), + }) + }); + entry + .expect_call_spoofed_simulate_op() + .returning(move |op, _b, _c, _d, _e, _f| { + if op.total_verification_gas_limit() < gas_usage { + return Ok(Err(ValidationRevert::EntryPoint("AA23".to_string()))); + } + + Ok(Ok(ExecutionResult { + target_result: EstimateCallGasResult { + gas_estimate: gas_usage, + num_rounds: 10.into(), + } + .encode() + .into(), + target_success: true, + ..Default::default() + })) + }); + + provider + .expect_get_gas_used() + .returning(move |_a, _b, _c, _d| { + Ok(GasUsedResult { + gas_used: gas_usage * 2, + success: false, + result: Bytes::new(), + }) + }); + + let (estimator, _) = create_estimator(entry, provider); + let optional_op = demo_user_op_optional_gas(Some(U256::from(10000))); + let user_op = demo_user_op(); + let estimation = estimator + .estimate_verification_gas(&optional_op, &user_op, H256::zero(), &spoof::state()) + .await + .unwrap(); + + // the estimation should be the same as the gas usage plus the buffer + let expected = gas_usage + ChainSpec::default().deposit_transfer_overhead; + let expected_with_buffer = + math::increase_by_percent(expected, VERIFICATION_GAS_BUFFER_PERCENT); + + assert_eq!(expected_with_buffer, estimation); + } + + #[tokio::test] + async fn test_binary_search_verification_gas_should_not_overflow() { + let (mut entry, mut provider) = create_base_config(); + + entry + .expect_decode_simulate_handle_ops_revert() + .returning(|_a| { + Ok(ExecutionResult { + pre_op_gas: U256::from(10000), + paid: U256::from(100000), + valid_after: 100000000000.into(), + valid_until: 100000000001.into(), + target_success: true, + target_result: Bytes::new(), + }) + }); + entry + .expect_call_spoofed_simulate_op() + .returning(|_a, _b, _c, _d, _e, _f| { + Ok(Ok(ExecutionResult { + target_result: EstimateCallGasResult { + gas_estimate: U256::from(10000), + num_rounds: U256::from(10), + } + .encode() + .into(), + target_success: true, + ..Default::default() + })) + }); + + // this gas used number is larger than a u64 max number so we need to + // check for this overflow + provider + .expect_get_gas_used() + .returning(move |_a, _b, _c, _d| { + Ok(GasUsedResult { + gas_used: U256::from(18446744073709551616_u128), + success: false, + result: Bytes::new(), + }) + }); + + let (estimator, _) = create_estimator(entry, provider); + let optional_op = demo_user_op_optional_gas(Some(U256::from(10000))); + let user_op = demo_user_op(); + let estimation = estimator + .estimate_verification_gas(&optional_op, &user_op, H256::zero(), &spoof::state()) + .await + .err(); + + assert!(matches!( + estimation, + Some(GasEstimationError::GasUsedTooLarge) + )); + } + + #[tokio::test] + async fn test_binary_search_verification_gas_success_field() { + let (mut entry, mut provider) = create_base_config(); + + entry + .expect_decode_simulate_handle_ops_revert() + .returning(|_a| { + Ok(ExecutionResult { + pre_op_gas: U256::from(10000), + paid: U256::from(100000), + valid_after: 100000000000.into(), + valid_until: 100000000001.into(), + target_success: true, + target_result: Bytes::new(), + }) + }); + entry + .expect_call_spoofed_simulate_op() + .returning(|_a, _b, _c, _d, _e, _f| { + Ok(Ok(ExecutionResult { + target_result: EstimateCallGasResult { + gas_estimate: U256::from(10000), + num_rounds: U256::from(10), + } + .encode() + .into(), + target_success: true, + ..Default::default() + })) + }); + + // the success field should not be true as the + // call should always revert + provider + .expect_get_gas_used() + .returning(move |_a, _b, _c, _d| { + Ok(GasUsedResult { + gas_used: U256::from(20000), + success: true, + result: Bytes::new(), + }) + }); + + let (estimator, _) = create_estimator(entry, provider); + let optional_op = demo_user_op_optional_gas(Some(U256::from(10000))); + let user_op = demo_user_op(); + let estimation = estimator + .estimate_verification_gas(&optional_op, &user_op, H256::zero(), &spoof::state()) + .await; + + assert!(estimation.is_err()); + } + + #[tokio::test] + async fn test_binary_search_verification_gas_invalid_message() { + let (mut entry, mut provider) = create_base_config(); + + // checking for this simulated revert + entry + .expect_decode_simulate_handle_ops_revert() + .returning(|_a| { + Err(ValidationRevert::EntryPoint( + "Error with reverted message".to_string(), + )) + }); + entry + .expect_call_spoofed_simulate_op() + .returning(|_a, _b, _c, _d, _e, _f| { + Ok(Ok(ExecutionResult { + target_result: EstimateCallGasResult { + gas_estimate: U256::from(100), + num_rounds: U256::from(10), + } + .encode() + .into(), + target_success: true, + ..Default::default() + })) + }); + + provider + .expect_get_gas_used() + .returning(move |_a, _b, _c, _d| { + Ok(GasUsedResult { + gas_used: U256::from(20000), + success: false, + result: Bytes::new(), + }) + }); + + let (estimator, _) = create_estimator(entry, provider); + let optional_op = demo_user_op_optional_gas(Some(U256::from(10000))); + let user_op = demo_user_op(); + let estimation = estimator + .estimate_verification_gas(&optional_op, &user_op, H256::zero(), &spoof::state()) + .await; + + assert!(estimation.is_err()); + } + + #[tokio::test] + async fn test_binary_search_verification_gas_invalid_spoof() { + let (mut entry, mut provider) = create_base_config(); + + entry + .expect_decode_simulate_handle_ops_revert() + .returning(|_a| { + Ok(ExecutionResult { + pre_op_gas: U256::from(10000), + paid: U256::from(100000), + valid_after: 100000000000.into(), + valid_until: 100000000001.into(), + target_success: true, + target_result: Bytes::new(), + }) + }); + + //this mocked response causes error + entry + .expect_call_spoofed_simulate_op() + .returning(|_a, _b, _c, _d, _e, _f| Err(anyhow!("Invalid spoof error"))); + + provider + .expect_get_gas_used() + .returning(move |_a, _b, _c, _d| { + Ok(GasUsedResult { + gas_used: U256::from(20000), + success: false, + result: Bytes::new(), + }) + }); + + let (estimator, _) = create_estimator(entry, provider); + let optional_op = demo_user_op_optional_gas(Some(U256::from(10000))); + let user_op = demo_user_op(); + let estimation = estimator + .estimate_verification_gas(&optional_op, &user_op, H256::zero(), &spoof::state()) + .await; + + assert!(estimation.is_err()); + } + + #[tokio::test] + async fn test_binary_search_verification_gas_success_response() { + let (mut entry, mut provider) = create_base_config(); + + entry + .expect_decode_simulate_handle_ops_revert() + .returning(|_a| { + Ok(ExecutionResult { + pre_op_gas: U256::from(10000), + paid: U256::from(100000), + valid_after: 100000000000.into(), + valid_until: 100000000001.into(), + target_success: true, + target_result: Bytes::new(), + }) + }); + + // this should always revert instead of return success + entry + .expect_call_spoofed_simulate_op() + .returning(|_a, _b, _c, _d, _e, _f| { + Ok(Ok(ExecutionResult { + target_result: EstimateCallGasResult { + gas_estimate: U256::from(10000), + num_rounds: U256::from(10), + } + .encode() + .into(), + target_success: true, + ..Default::default() + })) + }); + + provider + .expect_get_gas_used() + .returning(move |_a, _b, _c, _d| { + Err(anyhow::anyhow!("This should always revert").into()) + }); + + let (estimator, _) = create_estimator(entry, provider); + let optional_op = demo_user_op_optional_gas(Some(U256::from(10000))); + let user_op = demo_user_op(); + let estimation = estimator + .estimate_verification_gas(&optional_op, &user_op, H256::zero(), &spoof::state()) + .await; + + assert!(estimation.is_err()); + } + + #[tokio::test] + async fn test_estimate_call_gas() { + let (mut entry, mut provider) = create_base_config(); + + let gas_estimate = U256::from(100_000); + entry + .expect_call_spoofed_simulate_op() + .returning(move |_a, _b, _c, _d, _e, _f| { + Ok(Ok(ExecutionResult { + target_result: EstimateCallGasResult { + gas_estimate, + num_rounds: U256::from(10), + } + .encode() + .into(), + target_success: true, + ..Default::default() + })) + }); + + provider + .expect_get_code() + .returning(|_a, _b| Ok(Bytes::new())); + + let (estimator, _) = create_estimator(entry, provider); + let optional_op = demo_user_op_optional_gas(None); + let user_op = demo_user_op(); + let estimation = estimator + .estimate_call_gas(&optional_op, user_op, H256::zero(), spoof::state()) + .await + .unwrap(); + + // result is derived from the spoofed gas_estimate field + let expected = gas_estimate + CALL_GAS_BUFFER_VALUE; + assert_eq!(estimation, expected); + } + + #[tokio::test] + async fn test_estimate_call_gas_error() { + let (mut entry, mut provider) = create_base_config(); + + // return an invalid response for the ExecutionResult + // for a successful gas estimation + entry + .expect_call_spoofed_simulate_op() + .returning(|_a, _b, _c, _d, _e, _f| { + Ok(Ok(ExecutionResult { + target_result: EstimateCallGasRevertAtMax { + revert_data: Bytes::new(), + } + .encode() + .into(), + target_success: false, + ..Default::default() + })) + }); + + provider + .expect_get_code() + .returning(|_a, _b| Ok(Bytes::new())); + + let (estimator, _) = create_estimator(entry, provider); + let user_op = demo_user_op(); + let estimation = estimator + .call_gas_estimator + .estimate_call_gas(user_op, H256::zero(), spoof::state()) + .await + .err() + .unwrap(); + + assert!(matches!( + estimation, + GasEstimationError::RevertInCallWithBytes(_) + )); + } + + #[tokio::test] + async fn test_estimate_call_gas_continuation() { + let (mut entry, mut provider) = create_base_config(); + + entry + .expect_call_spoofed_simulate_op() + .returning(|_a, _b, _c, _d, _e, _f| { + Ok(Ok(ExecutionResult { + target_result: EstimateCallGasContinuation { + min_gas: U256::from(100), + max_gas: U256::from(100000), + num_rounds: U256::from(10), + } + .encode() + .into(), + target_success: false, + ..Default::default() + })) + }) + .times(1); + entry + .expect_call_spoofed_simulate_op() + .returning(|_a, _b, _c, _d, _e, _f| { + Ok(Ok(ExecutionResult { + target_result: EstimateCallGasResult { + gas_estimate: U256::from(200), + num_rounds: U256::from(10), + } + .encode() + .into(), + target_success: true, + ..Default::default() + })) + }) + .times(1); + + provider + .expect_get_code() + .returning(|_a, _b| Ok(Bytes::new())); + + let (estimator, _) = create_estimator(entry, provider); + let user_op = demo_user_op(); + let estimation = estimator + .call_gas_estimator + .estimate_call_gas(user_op, H256::zero(), spoof::state()) + .await + .unwrap(); + + // on the second loop of the estimate gas continuation + // I update the spoofed value to 200 + + assert_eq!(estimation, U128::from(200)); + } + + #[tokio::test] + async fn test_estimation_optional_gas_used() { + let (mut entry, mut provider) = create_base_config(); + let gas_usage = 10_000.into(); + + entry + .expect_call_spoofed_simulate_op() + .returning(move |op, _b, _c, _d, _e, _f| { + if op.total_verification_gas_limit() < gas_usage { + return Ok(Err(ValidationRevert::EntryPoint("AA23".to_string()))); + } + + Ok(Ok(ExecutionResult { + target_result: EstimateCallGasResult { + gas_estimate: U256::from(10000), + num_rounds: U256::from(10), + } + .encode() + .into(), + target_success: true, + ..Default::default() + })) + }); + entry + .expect_decode_simulate_handle_ops_revert() + .returning(|_a| { + Ok(ExecutionResult { + pre_op_gas: U256::from(10000), + paid: U256::from(100000), + valid_after: 100000000000.into(), + valid_until: 100000000001.into(), + target_success: true, + target_result: Bytes::new(), + }) + }); + + provider + .expect_get_code() + .returning(|_a, _b| Ok(Bytes::new())); + provider + .expect_get_latest_block_hash_and_number() + .returning(|| Ok((H256::zero(), U64::zero()))); + provider + .expect_get_gas_used() + .returning(move |_a, _b, _c, _d| { + Ok(GasUsedResult { + gas_used: gas_usage, + success: false, + result: Bytes::new(), + }) + }); + + provider.expect_get_base_fee().returning(|| Ok(TEST_FEE)); + provider + .expect_get_max_priority_fee() + .returning(|| Ok(TEST_FEE)); + + let (estimator, _) = create_estimator(entry, provider); + + let optional_op = demo_user_op_optional_gas(Some(U256::from(10000))); + + let estimation = estimator + .estimate_op_gas(optional_op, spoof::state(), None) + .await + .unwrap(); + + // this should be a pass through + assert_eq!(estimation.pre_verification_gas, U256::from(10000)); + + // gas used increased by 10% + let expected = gas_usage + ChainSpec::default().deposit_transfer_overhead; + assert_eq!( + estimation.verification_gas_limit, + cmp::max( + math::increase_by_percent(expected, 10), + expected + REQUIRED_VERIFICATION_GAS_LIMIT_BUFFER + ) + ); + + // input gas limit clamped with the set limit in settings and constant MIN + assert_eq!( + estimation.call_gas_limit, + U256::from(10000) + CALL_GAS_BUFFER_VALUE + ); + } + + #[test] + #[should_panic] + fn test_estimation_optional_gas_invalid_settings() { + let (entry, provider) = create_base_config(); + + //max_call_gas is less than MIN_CALL_GAS_LIMIT + + let settings = Settings { + max_verification_gas: 10, + max_call_gas: 10, + max_paymaster_post_op_gas: 10, + max_paymaster_verification_gas: 10, + max_total_execution_gas: 10, + max_simulate_handle_ops_gas: 10, + verification_estimation_gas_fee: 1_000_000_000_000, + }; + + create_custom_estimator(ChainSpec::default(), provider, entry, settings); + } + + #[tokio::test] + async fn test_pvg_over_max() { + let (entry, provider) = create_base_config(); + let (estimator, _) = create_estimator(entry, provider); + + let optional_op = demo_user_op_optional_gas(Some(U256::from(TEST_MAX_GAS_LIMITS + 1))); + + let estimation = estimator + .estimate_op_gas(optional_op, spoof::state(), None) + .await + .err() + .unwrap(); + + assert!(matches!( + estimation, + GasEstimationError::GasFieldTooLarge("preVerificationGas", TEST_MAX_GAS_LIMITS) + )); + } + + #[tokio::test] + async fn test_vgl_over_max() { + let (entry, provider) = create_base_config(); + let (estimator, _) = create_estimator(entry, provider); + + let mut optional_op = demo_user_op_optional_gas(Some(U256::from(10000))); + optional_op.verification_gas_limit = Some(U256::from(TEST_MAX_GAS_LIMITS + 1)); + + let estimation = estimator + .estimate_op_gas(optional_op, spoof::state(), None) + .await + .err() + .unwrap(); + + assert!(matches!( + estimation, + GasEstimationError::GasFieldTooLarge("verificationGasLimit", TEST_MAX_GAS_LIMITS) + )); + } + + #[tokio::test] + async fn test_cgl_over_max() { + let (entry, provider) = create_base_config(); + let (estimator, _) = create_estimator(entry, provider); + + let mut optional_op = demo_user_op_optional_gas(Some(U256::from(10000))); + optional_op.call_gas_limit = Some(U256::from(TEST_MAX_GAS_LIMITS + 1)); + + let estimation = estimator + .estimate_op_gas(optional_op, spoof::state(), None) + .await + .err() + .unwrap(); + + assert!(matches!( + estimation, + GasEstimationError::GasFieldTooLarge("callGasLimit", TEST_MAX_GAS_LIMITS) + )); + } + + #[tokio::test] + async fn test_return_provided_limits() { + let (mut entry, mut provider) = create_base_config(); + + provider + .expect_get_latest_block_hash_and_number() + .returning(|| Ok((H256::zero(), U64::zero()))); + + entry + .expect_call_spoofed_simulate_op() + .returning(move |_a, _b, _c, _d, _e, _f| { + Ok(Ok(ExecutionResult { + target_result: TestCallGasResult { + success: true, + gas_used: 0.into(), + revert_data: Bytes::new(), + } + .encode() + .into(), + target_success: true, + ..Default::default() + })) + }); + + let (estimator, _) = create_estimator(entry, provider); + + let mut optional_op = demo_user_op_optional_gas(Some(U256::from(10000))); + optional_op.call_gas_limit = Some(U256::from(10000)); + optional_op.verification_gas_limit = Some(U256::from(10000)); + + let estimation = estimator + .estimate_op_gas(optional_op.clone(), spoof::state(), None) + .await + .unwrap(); + + assert_eq!( + estimation.pre_verification_gas, + optional_op.pre_verification_gas.unwrap() + ); + assert_eq!( + estimation.verification_gas_limit, + optional_op.verification_gas_limit.unwrap() + ); + assert_eq!( + estimation.call_gas_limit, + optional_op.call_gas_limit.unwrap() + ); + } + + #[tokio::test] + async fn test_provided_reverts() { + let (mut entry, mut provider) = create_base_config(); + + provider + .expect_get_latest_block_hash_and_number() + .returning(|| Ok((H256::zero(), U64::zero()))); + + let revert_msg = "test revert".to_string(); + let err = ContractRevertError { + reason: revert_msg.clone(), + }; + + entry + .expect_call_spoofed_simulate_op() + .returning(move |_a, _b, _c, _d, _e, _f| { + Ok(Ok(ExecutionResult { + target_result: TestCallGasResult { + success: false, + gas_used: 0.into(), + revert_data: err.clone().encode().into(), + } + .encode() + .into(), + target_success: true, + ..Default::default() + })) + }); + + let (estimator, _) = create_estimator(entry, provider); + + let mut optional_op = demo_user_op_optional_gas(Some(U256::from(10000))); + optional_op.call_gas_limit = Some(U256::from(10000)); + optional_op.verification_gas_limit = Some(U256::from(10000)); + + let estimation_error = estimator + .estimate_op_gas(optional_op.clone(), spoof::state(), None) + .await + .err() + .unwrap(); + + assert!(matches!( + estimation_error, + GasEstimationError::RevertInCallWithMessage(msg) if msg == revert_msg + )); + } + + #[tokio::test] + async fn test_total_limit() { + let (mut entry, mut provider) = create_base_config(); + + provider + .expect_get_latest_block_hash_and_number() + .returning(|| Ok((H256::zero(), U64::zero()))); + + entry + .expect_call_spoofed_simulate_op() + .returning(move |_a, _b, _c, _d, _e, _f| { + Ok(Ok(ExecutionResult { + target_result: TestCallGasResult { + success: true, + gas_used: 0.into(), + revert_data: Bytes::new(), + } + .encode() + .into(), + target_success: true, + ..Default::default() + })) + }); + + let (estimator, _) = create_estimator(entry, provider); + + let mut optional_op = demo_user_op_optional_gas(Some(U256::from(10000))); + optional_op.call_gas_limit = Some(TEST_MAX_GAS_LIMITS.into()); + optional_op.verification_gas_limit = Some(TEST_MAX_GAS_LIMITS.into()); + + let err = estimator + .estimate_op_gas(optional_op.clone(), spoof::state(), None) + .await + .err() + .unwrap(); + + assert!(matches!( + err, + GasEstimationError::GasTotalTooLarge(_, TEST_MAX_GAS_LIMITS) + )) + } + + #[test] + fn test_proxy_target_offset() { + let proxy_target_bytes = hex::decode(PROXY_IMPLEMENTATION_ADDRESS_MARKER).unwrap(); + let mut offsets = Vec::::new(); + for i in 0..CALLGASESTIMATIONPROXY_DEPLOYED_BYTECODE.len() - 20 { + if CALLGASESTIMATIONPROXY_DEPLOYED_BYTECODE[i..i + 20] == proxy_target_bytes { + offsets.push(i); + } + } + assert_eq!(vec![PROXY_TARGET_OFFSET], offsets); + } +} diff --git a/crates/sim/src/estimation/v0_7.rs b/crates/sim/src/estimation/v0_7.rs new file mode 100644 index 00000000..dfea977a --- /dev/null +++ b/crates/sim/src/estimation/v0_7.rs @@ -0,0 +1,890 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use std::{cmp, ops::Add, sync::Arc}; + +use ethers::{ + contract::EthCall, + types::{spoof, Address, Bytes, H256, U128, U256}, +}; +use rand::Rng; +use rundler_provider::{EntryPoint, L1GasProvider, Provider, SimulationProvider}; +use rundler_types::{ + chain::ChainSpec, + contracts::v0_7::{ + call_gas_estimation_proxy::{ + EstimateCallGasArgs, EstimateCallGasCall, TestCallGasCall, + CALLGASESTIMATIONPROXY_DEPLOYED_BYTECODE, + }, + entry_point_simulations::ENTRYPOINTSIMULATIONS_DEPLOYED_BYTECODE, + }, + v0_7::{UserOperation, UserOperationBuilder, UserOperationOptionalGas}, + GasEstimate, +}; +use rundler_utils::{eth, math}; +use tokio::join; + +use super::{estimate_verification_gas::GetOpWithLimitArgs, GasEstimationError, Settings}; +use crate::{ + gas, CallGasEstimator, CallGasEstimatorImpl, CallGasEstimatorSpecialization, FeeEstimator, + VerificationGasEstimator, VerificationGasEstimatorImpl, MIN_CALL_GAS_LIMIT, +}; + +/// Gas estimator for entry point v0.7 +#[derive(Debug)] +pub struct GasEstimator { + chain_spec: ChainSpec, + provider: Arc

, + entry_point: E, + settings: Settings, + fee_estimator: FeeEstimator

, + verification_gas_estimator: VGE, + call_gas_estimator: CGE, +} + +#[async_trait::async_trait] +impl super::GasEstimator for GasEstimator +where + P: Provider, + E: EntryPoint + SimulationProvider + L1GasProvider, + VGE: VerificationGasEstimator, + CGE: CallGasEstimator, +{ + type UserOperationOptionalGas = UserOperationOptionalGas; + + /// Returns a gas estimate or a revert message, or an anyhow error on any + /// other error. + async fn estimate_op_gas( + &self, + op: UserOperationOptionalGas, + state_override: spoof::State, + _at_price: Option, + ) -> Result { + self.check_provided_limits(&op)?; + + let Self { + provider, settings, .. + } = self; + + let (block_hash, _) = provider + .get_latest_block_hash_and_number() + .await + .map_err(anyhow::Error::from)?; + + let pre_verification_gas = self.estimate_pre_verification_gas(&op).await?; + + let full_op = op + .clone() + .into_user_operation_builder( + &self.chain_spec, + settings.max_call_gas.into(), + settings.max_verification_gas.into(), + settings.max_paymaster_verification_gas.into(), + ) + .pre_verification_gas(pre_verification_gas) + .build(); + + let verification_gas_future = + self.estimate_verification_gas(&op, &full_op, block_hash, &state_override); + let paymaster_verification_gas_future = + self.estimate_paymaster_verification_gas(&op, &full_op, block_hash, &state_override); + let call_gas_future = + self.estimate_call_gas(&op, full_op.clone(), block_hash, state_override.clone()); + + // Not try_join! because then the output is nondeterministic if multiple calls fail. + let timer = std::time::Instant::now(); + let (verification_gas_limit, paymaster_verification_gas_limit, call_gas_limit) = join!( + verification_gas_future, + paymaster_verification_gas_future, + call_gas_future + ); + tracing::debug!("gas estimation took {}ms", timer.elapsed().as_millis()); + + let verification_gas_limit = verification_gas_limit?; + let paymaster_verification_gas_limit = paymaster_verification_gas_limit?; + let call_gas_limit = call_gas_limit?; + + // check the total gas limit + let mut op_with_gas = full_op; + op_with_gas.pre_verification_gas = pre_verification_gas; + op_with_gas.call_gas_limit = call_gas_limit; + op_with_gas.verification_gas_limit = verification_gas_limit; + op_with_gas.paymaster_verification_gas_limit = paymaster_verification_gas_limit; + let gas_limit = + gas::user_operation_execution_gas_limit(&self.chain_spec, &op_with_gas, true); + if gas_limit > self.settings.max_total_execution_gas.into() { + return Err(GasEstimationError::GasTotalTooLarge( + gas_limit.as_u64(), + self.settings.max_total_execution_gas, + )); + } + + Ok(GasEstimate { + pre_verification_gas, + call_gas_limit: call_gas_limit.into(), + verification_gas_limit: verification_gas_limit.into(), + paymaster_verification_gas_limit: op + .paymaster + .map(|_| paymaster_verification_gas_limit.into()), + }) + } +} + +impl + GasEstimator< + P, + E, + VerificationGasEstimatorImpl, + CallGasEstimatorImpl, + > +where + P: Provider, + E: EntryPoint + + SimulationProvider + + L1GasProvider + + Clone, +{ + /// Create a new gas estimator + pub fn new( + chain_spec: ChainSpec, + provider: Arc

, + entry_point: E, + settings: Settings, + fee_estimator: FeeEstimator

, + ) -> Self { + if let Some(err) = settings.validate() { + panic!("Invalid gas estimator settings: {}", err); + } + + let verification_gas_estimator = VerificationGasEstimatorImpl::new( + chain_spec.clone(), + Arc::clone(&provider), + entry_point.clone(), + settings, + ); + let call_gas_estimator = CallGasEstimatorImpl::new( + entry_point.clone(), + settings, + CallGasEstimatorSpecializationV07 { + chain_spec: chain_spec.clone(), + }, + ); + Self { + chain_spec, + provider, + entry_point, + settings, + fee_estimator, + verification_gas_estimator, + call_gas_estimator, + } + } +} + +impl GasEstimator +where + P: Provider, + E: EntryPoint + SimulationProvider + L1GasProvider, + VGE: VerificationGasEstimator, + CGE: CallGasEstimator, +{ + fn check_provided_limits( + &self, + optional_op: &UserOperationOptionalGas, + ) -> Result<(), GasEstimationError> { + if let Some(pvg) = optional_op.pre_verification_gas { + if pvg > self.settings.max_verification_gas.into() { + return Err(GasEstimationError::GasFieldTooLarge( + "preVerificationGas", + self.settings.max_verification_gas, + )); + } + } + if let Some(vl) = optional_op.verification_gas_limit { + if vl > self.settings.max_verification_gas.into() { + return Err(GasEstimationError::GasFieldTooLarge( + "verificationGasLimit", + self.settings.max_verification_gas, + )); + } + } + if let Some(vl) = optional_op.paymaster_verification_gas_limit { + if vl > self.settings.max_verification_gas.into() { + return Err(GasEstimationError::GasFieldTooLarge( + "paymasterVerificationGasLimit", + self.settings.max_verification_gas, + )); + } + } + if let Some(cl) = optional_op.call_gas_limit { + if cl > self.settings.max_call_gas.into() { + return Err(GasEstimationError::GasFieldTooLarge( + "callGasLimit", + self.settings.max_call_gas, + )); + } + } + if let Some(cl) = optional_op.paymaster_post_op_gas_limit { + if cl > self.settings.max_call_gas.into() { + return Err(GasEstimationError::GasFieldTooLarge( + "paymasterPostOpGasLimit", + self.settings.max_call_gas, + )); + } + } + + Ok(()) + } + + async fn estimate_verification_gas( + &self, + optional_op: &UserOperationOptionalGas, + full_op: &UserOperation, + block_hash: H256, + state_override: &spoof::State, + ) -> Result { + // if set and non-zero, don't estimate + if let Some(vl) = optional_op.verification_gas_limit { + if vl != U128::zero() { + // No need to do an extra simulation here, if the user provides a value that is + // insufficient it will cause a revert during call gas estimation (or simulation). + return Ok(vl); + } + } + + let get_op_with_limit = |op: UserOperation, args: GetOpWithLimitArgs| { + let GetOpWithLimitArgs { gas, fee } = args; + UserOperationBuilder::from_uo(op, &self.chain_spec) + .verification_gas_limit(gas) + .max_fee_per_gas(fee) + .max_priority_fee_per_gas(fee) + .paymaster_post_op_gas_limit(U128::zero()) + .call_gas_limit(U128::zero()) + .build() + }; + + let verification_gas_limit = self + .verification_gas_estimator + .estimate_verification_gas( + full_op, + block_hash, + state_override, + self.settings.max_verification_gas.into(), + get_op_with_limit, + ) + .await?; + + let verification_gas_limit = math::increase_by_percent( + verification_gas_limit, + super::VERIFICATION_GAS_BUFFER_PERCENT, + ) + .min(self.settings.max_verification_gas.into()); + + Ok(verification_gas_limit) + } + + async fn estimate_paymaster_verification_gas( + &self, + optional_op: &UserOperationOptionalGas, + full_op: &UserOperation, + block_hash: H256, + state_override: &spoof::State, + ) -> Result { + // If not using paymaster, return zero, else if set and non-zero, don't estimate and return value + if let Some(pvl) = optional_op.verification_gas_limit { + if pvl != U128::zero() { + return Ok(pvl); + } + } + + let get_op_with_limit = |op: UserOperation, args: GetOpWithLimitArgs| { + let GetOpWithLimitArgs { gas, fee } = args; + UserOperationBuilder::from_uo(op, &self.chain_spec) + .max_fee_per_gas(fee) + .max_priority_fee_per_gas(fee) + .paymaster_verification_gas_limit(gas) + .paymaster_post_op_gas_limit(U128::zero()) + .call_gas_limit(U128::zero()) + .build() + }; + + let paymaster_verification_gas_limit = self + .verification_gas_estimator + .estimate_verification_gas( + full_op, + block_hash, + state_override, + self.settings.max_paymaster_verification_gas.into(), + get_op_with_limit, + ) + .await?; + + let paymaster_verification_gas_limit = math::increase_by_percent( + paymaster_verification_gas_limit, + super::VERIFICATION_GAS_BUFFER_PERCENT, + ) + .min(self.settings.max_verification_gas.into()); + + Ok(paymaster_verification_gas_limit) + } + + async fn estimate_pre_verification_gas( + &self, + optional_op: &UserOperationOptionalGas, + ) -> Result { + if let Some(pvg) = optional_op.pre_verification_gas { + if pvg != U256::zero() { + return Ok(pvg); + } + } + + // If not using calldata pre-verification gas, return 0 + let gas_price = if !self.chain_spec.calldata_pre_verification_gas { + U256::zero() + } else { + // If the user provides fees, use them, otherwise use the current bundle fees + let (bundle_fees, base_fee) = self.fee_estimator.required_bundle_fees(None).await?; + if let (Some(max_fee), Some(prio_fee)) = ( + optional_op.max_fee_per_gas.filter(|fee| !fee.is_zero()), + optional_op + .max_priority_fee_per_gas + .filter(|fee| !fee.is_zero()), + ) { + cmp::min(max_fee.into(), base_fee.saturating_add(prio_fee.into())) + } else { + base_fee.saturating_add(bundle_fees.max_priority_fee_per_gas) + } + }; + + Ok(gas::estimate_pre_verification_gas( + &self.chain_spec, + &self.entry_point, + &optional_op.max_fill(&self.chain_spec), + &optional_op.random_fill(&self.chain_spec), + gas_price, + ) + .await?) + } + + async fn estimate_call_gas( + &self, + optional_op: &UserOperationOptionalGas, + full_op: UserOperation, + block_hash: H256, + state_override: spoof::State, + ) -> Result { + // if set and non-zero, don't estimate + if let Some(cl) = optional_op.call_gas_limit { + if cl != U128::zero() { + // The user provided a non-zero value, simulate once + self.call_gas_estimator + .simulate_handle_op_with_result(full_op, block_hash, state_override) + .await?; + return Ok(cl); + } + } + + let call_gas_limit = self + .call_gas_estimator + .estimate_call_gas(full_op, block_hash, state_override) + .await?; + + // Add a buffer to the call gas limit and clamp + let call_gas_limit = call_gas_limit + .add(super::CALL_GAS_BUFFER_VALUE) + .clamp(MIN_CALL_GAS_LIMIT, self.settings.max_call_gas.into()); + + Ok(call_gas_limit) + } +} + +/// Implementation of functions that specialize the call gas estimator to the +/// v0.7 entry point. +#[derive(Debug)] +pub struct CallGasEstimatorSpecializationV07 { + chain_spec: ChainSpec, +} + +impl CallGasEstimatorSpecialization for CallGasEstimatorSpecializationV07 { + type UO = UserOperation; + + fn add_proxy_to_overrides(&self, ep_to_override: Address, state_override: &mut spoof::State) { + // For an explanation of what's going on here, see the comment at the + // top of `CallGasEstimationProxy.sol`. + // Use a random address for the moved entry point so that users can't + // intentionally get bad estimates by interacting with the hardcoded + // address. + let moved_entry_point_address: Address = rand::thread_rng().gen(); + let estimation_proxy_bytecode = + estimation_proxy_bytecode_with_target(moved_entry_point_address); + state_override + .account(moved_entry_point_address) + .code(ENTRYPOINTSIMULATIONS_DEPLOYED_BYTECODE.clone()); + state_override + .account(ep_to_override) + .code(estimation_proxy_bytecode); + } + + fn get_op_with_no_call_gas(&self, op: Self::UO) -> Self::UO { + UserOperationBuilder::from_uo(op, &self.chain_spec) + .call_gas_limit(U128::zero()) + .max_fee_per_gas(U128::zero()) + .build() + } + + fn get_estimate_call_gas_calldata( + &self, + callless_op: Self::UO, + min_gas: U256, + max_gas: U256, + rounding: U256, + is_continuation: bool, + ) -> Bytes { + eth::call_data_of( + EstimateCallGasCall::selector(), + (EstimateCallGasArgs { + user_op: callless_op.pack(), + min_gas, + max_gas, + rounding, + is_continuation, + },), + ) + } + + fn get_test_call_gas_calldata(&self, callless_op: Self::UO, call_gas_limit: U256) -> Bytes { + eth::call_data_of( + TestCallGasCall::selector(), + (callless_op.pack(), call_gas_limit), + ) + } +} + +/// Offset at which the proxy target address appears in the proxy bytecode. Must +/// be updated whenever `CallGasEstimationProxy.sol` changes. +/// +/// The easiest way to get the updated value is to run this module's tests. The +/// failure will tell you the new value. +const PROXY_TARGET_OFFSET: usize = 163; + +// Replaces the address of the proxy target where it appears in the proxy +// bytecode so we don't need the same fixed address every time. +fn estimation_proxy_bytecode_with_target(target: Address) -> Bytes { + let mut vec = CALLGASESTIMATIONPROXY_DEPLOYED_BYTECODE.to_vec(); + vec[PROXY_TARGET_OFFSET..PROXY_TARGET_OFFSET + 20].copy_from_slice(target.as_bytes()); + vec.into() +} + +#[cfg(test)] +mod tests { + use ethers::{ + abi::AbiEncode, + contract::EthCall, + types::{Address, U64}, + utils::hex, + }; + use rundler_provider::{ExecutionResult, MockEntryPointV0_7, MockProvider, SimulateOpCallData}; + use rundler_types::{ + contracts::v0_7::{ + call_gas_estimation_proxy::TestCallGasResult, + entry_point_simulations::SimulateHandleOpCall, + }, + v0_7::UserOperationOptionalGas, + }; + use rundler_utils::eth::{self, ContractRevertError}; + + use super::*; + use crate::{ + estimation::estimate_call_gas::PROXY_IMPLEMENTATION_ADDRESS_MARKER, GasEstimator as _, + PriorityFeeMode, + }; + + // Alises for complex types (which also satisfy Clippy) + type VerificationGasEstimatorWithMocks = + VerificationGasEstimatorImpl>; + type CallGasEstimatorWithMocks = + CallGasEstimatorImpl, CallGasEstimatorSpecializationV07>; + type GasEstimatorWithMocks = GasEstimator< + MockProvider, + Arc, + VerificationGasEstimatorWithMocks, + CallGasEstimatorWithMocks, + >; + + fn create_base_config() -> (MockEntryPointV0_7, MockProvider) { + let mut entry = MockEntryPointV0_7::new(); + let provider = MockProvider::new(); + + // Fill in concrete implementations of call data and + // `simulation_should_revert` + entry + .expect_get_simulate_op_call_data() + .returning(|op, spoofed_state| { + let call_data = eth::call_data_of( + SimulateHandleOpCall::selector(), + (op.packed().clone(), Address::zero(), Bytes::new()), + ); + SimulateOpCallData { + call_data, + spoofed_state: spoofed_state.clone(), + } + }); + entry.expect_simulation_should_revert().return_const(true); + + entry.expect_address().return_const(Address::zero()); + + (entry, provider) + } + + fn create_fee_estimator(provider: Arc) -> FeeEstimator { + FeeEstimator::new( + &ChainSpec::default(), + provider, + PriorityFeeMode::BaseFeePercent(0), + 0, + ) + } + + fn create_custom_estimator( + chain_spec: ChainSpec, + provider: MockProvider, + entry: MockEntryPointV0_7, + settings: Settings, + ) -> GasEstimatorWithMocks { + let provider = Arc::new(provider); + GasEstimator::new( + chain_spec.clone(), + Arc::clone(&provider), + Arc::new(entry), + settings, + create_fee_estimator(provider), + ) + } + + const TEST_MAX_GAS_LIMITS: u64 = 10000000000; + + fn create_estimator( + entry: MockEntryPointV0_7, + provider: MockProvider, + ) -> (GasEstimatorWithMocks, Settings) { + let settings = Settings { + max_verification_gas: TEST_MAX_GAS_LIMITS, + max_call_gas: TEST_MAX_GAS_LIMITS, + max_paymaster_verification_gas: TEST_MAX_GAS_LIMITS, + max_paymaster_post_op_gas: TEST_MAX_GAS_LIMITS, + max_total_execution_gas: TEST_MAX_GAS_LIMITS, + max_simulate_handle_ops_gas: TEST_MAX_GAS_LIMITS, + verification_estimation_gas_fee: 1_000_000_000_000, + }; + let estimator = create_custom_estimator(ChainSpec::default(), provider, entry, settings); + (estimator, settings) + } + + fn demo_user_op_optional_gas(pvg: Option) -> UserOperationOptionalGas { + UserOperationOptionalGas { + sender: Address::zero(), + nonce: U256::zero(), + call_data: Bytes::new(), + call_gas_limit: None, + verification_gas_limit: None, + pre_verification_gas: pvg, + max_fee_per_gas: None, + max_priority_fee_per_gas: None, + signature: Bytes::new(), + + paymaster: None, + paymaster_data: Bytes::new(), + paymaster_verification_gas_limit: None, + paymaster_post_op_gas_limit: None, + + factory: None, + factory_data: Bytes::new(), + } + } + + #[tokio::test] + async fn test_pvg_over_max() { + let (entry, provider) = create_base_config(); + let (estimator, _) = create_estimator(entry, provider); + + let optional_op = demo_user_op_optional_gas(Some(U256::from(TEST_MAX_GAS_LIMITS + 1))); + + let estimation = estimator + .estimate_op_gas(optional_op, spoof::state(), None) + .await + .err() + .unwrap(); + + assert!(matches!( + estimation, + GasEstimationError::GasFieldTooLarge("preVerificationGas", TEST_MAX_GAS_LIMITS) + )); + } + + #[tokio::test] + async fn test_vgl_over_max() { + let (entry, provider) = create_base_config(); + let (estimator, _) = create_estimator(entry, provider); + + let mut optional_op = demo_user_op_optional_gas(Some(U256::from(10000))); + optional_op.verification_gas_limit = Some(U128::from(TEST_MAX_GAS_LIMITS + 1)); + + let estimation = estimator + .estimate_op_gas(optional_op, spoof::state(), None) + .await + .err() + .unwrap(); + + assert!(matches!( + estimation, + GasEstimationError::GasFieldTooLarge("verificationGasLimit", TEST_MAX_GAS_LIMITS) + )); + } + + #[tokio::test] + async fn test_pgl_over_max() { + let (entry, provider) = create_base_config(); + let (estimator, _) = create_estimator(entry, provider); + + let mut optional_op = demo_user_op_optional_gas(Some(U256::from(10000))); + optional_op.paymaster_verification_gas_limit = Some(U128::from(TEST_MAX_GAS_LIMITS + 1)); + + let estimation = estimator + .estimate_op_gas(optional_op, spoof::state(), None) + .await + .err() + .unwrap(); + + assert!(matches!( + estimation, + GasEstimationError::GasFieldTooLarge( + "paymasterVerificationGasLimit", + TEST_MAX_GAS_LIMITS + ) + )); + } + + #[tokio::test] + async fn test_cgl_over_max() { + let (entry, provider) = create_base_config(); + let (estimator, _) = create_estimator(entry, provider); + + let mut optional_op = demo_user_op_optional_gas(Some(U256::from(10000))); + optional_op.call_gas_limit = Some(U128::from(TEST_MAX_GAS_LIMITS + 1)); + + let estimation = estimator + .estimate_op_gas(optional_op, spoof::state(), None) + .await + .err() + .unwrap(); + + assert!(matches!( + estimation, + GasEstimationError::GasFieldTooLarge("callGasLimit", TEST_MAX_GAS_LIMITS) + )); + } + + #[tokio::test] + async fn test_postop_over_max() { + let (entry, provider) = create_base_config(); + let (estimator, _) = create_estimator(entry, provider); + + let mut optional_op = demo_user_op_optional_gas(Some(U256::from(10000))); + optional_op.paymaster_post_op_gas_limit = Some(U128::from(TEST_MAX_GAS_LIMITS + 1)); + + let estimation = estimator + .estimate_op_gas(optional_op, spoof::state(), None) + .await + .err() + .unwrap(); + + assert!(matches!( + estimation, + GasEstimationError::GasFieldTooLarge("paymasterPostOpGasLimit", TEST_MAX_GAS_LIMITS) + )); + } + + #[tokio::test] + async fn test_return_provided_limits() { + let (mut entry, mut provider) = create_base_config(); + + provider + .expect_get_latest_block_hash_and_number() + .returning(|| Ok((H256::zero(), U64::zero()))); + + entry + .expect_call_spoofed_simulate_op() + .returning(move |_a, _b, _c, _d, _e, _f| { + Ok(Ok(ExecutionResult { + target_result: TestCallGasResult { + success: true, + gas_used: 0.into(), + revert_data: Bytes::new(), + } + .encode() + .into(), + target_success: true, + ..Default::default() + })) + }); + + let (estimator, _) = create_estimator(entry, provider); + + let mut optional_op = demo_user_op_optional_gas(Some(U256::from(10000))); + optional_op.call_gas_limit = Some(U128::from(10000)); + optional_op.verification_gas_limit = Some(U128::from(10000)); + optional_op.paymaster = Some(Address::random()); + optional_op.paymaster_verification_gas_limit = Some(U128::from(10000)); + optional_op.paymaster_post_op_gas_limit = Some(U128::from(10000)); + + let estimation = estimator + .estimate_op_gas(optional_op.clone(), spoof::state(), None) + .await + .unwrap(); + + assert_eq!( + estimation.pre_verification_gas, + optional_op.pre_verification_gas.unwrap() + ); + assert_eq!( + estimation.verification_gas_limit, + optional_op.verification_gas_limit.unwrap().into() + ); + assert_eq!( + estimation.paymaster_verification_gas_limit, + optional_op + .paymaster_verification_gas_limit + .map(|v| v.into()) + ); + assert_eq!( + estimation.call_gas_limit, + optional_op.call_gas_limit.unwrap().into() + ); + } + + #[tokio::test] + async fn test_provided_reverts() { + let (mut entry, mut provider) = create_base_config(); + + provider + .expect_get_latest_block_hash_and_number() + .returning(|| Ok((H256::zero(), U64::zero()))); + + let revert_msg = "test revert".to_string(); + let err = ContractRevertError { + reason: revert_msg.clone(), + }; + + entry + .expect_call_spoofed_simulate_op() + .returning(move |_a, _b, _c, _d, _e, _f| { + Ok(Ok(ExecutionResult { + target_result: TestCallGasResult { + success: false, + gas_used: 0.into(), + revert_data: err.clone().encode().into(), + } + .encode() + .into(), + target_success: true, + ..Default::default() + })) + }); + + let (estimator, _) = create_estimator(entry, provider); + + let mut optional_op = demo_user_op_optional_gas(Some(U256::from(10000))); + optional_op.call_gas_limit = Some(U128::from(10000)); + optional_op.verification_gas_limit = Some(U128::from(10000)); + + let estimation_error = estimator + .estimate_op_gas(optional_op.clone(), spoof::state(), None) + .await + .err() + .unwrap(); + + assert!(matches!( + estimation_error, + GasEstimationError::RevertInCallWithMessage(msg) if msg == revert_msg + )); + } + + #[tokio::test] + async fn test_total_limit() { + let (mut entry, mut provider) = create_base_config(); + + entry + .expect_call_spoofed_simulate_op() + .returning(move |_a, _b, _c, _d, _e, _f| { + Ok(Ok(ExecutionResult { + target_result: TestCallGasResult { + success: true, + gas_used: TEST_MAX_GAS_LIMITS.into(), + revert_data: Bytes::new(), + } + .encode() + .into(), + target_success: true, + ..Default::default() + })) + }); + provider + .expect_get_latest_block_hash_and_number() + .returning(|| Ok((H256::zero(), U64::zero()))); + + let (estimator, _) = create_estimator(entry, provider); + + let optional_op = UserOperationOptionalGas { + sender: Address::zero(), + nonce: U256::zero(), + call_data: Bytes::new(), + call_gas_limit: Some(TEST_MAX_GAS_LIMITS.into()), + verification_gas_limit: Some(TEST_MAX_GAS_LIMITS.into()), + pre_verification_gas: Some(TEST_MAX_GAS_LIMITS.into()), + max_fee_per_gas: None, + max_priority_fee_per_gas: None, + signature: Bytes::new(), + + paymaster: None, + paymaster_data: Bytes::new(), + paymaster_verification_gas_limit: Some(TEST_MAX_GAS_LIMITS.into()), + paymaster_post_op_gas_limit: Some(TEST_MAX_GAS_LIMITS.into()), + + factory: None, + factory_data: Bytes::new(), + }; + + let estimation = estimator + .estimate_op_gas(optional_op, spoof::state(), None) + .await + .err() + .unwrap(); + + assert!(matches!( + estimation, + GasEstimationError::GasTotalTooLarge(_, TEST_MAX_GAS_LIMITS) + )); + } + + #[test] + fn test_proxy_target_offset() { + let proxy_target_bytes = hex::decode(PROXY_IMPLEMENTATION_ADDRESS_MARKER).unwrap(); + let mut offsets = Vec::::new(); + for i in 0..CALLGASESTIMATIONPROXY_DEPLOYED_BYTECODE.len() - 20 { + if CALLGASESTIMATIONPROXY_DEPLOYED_BYTECODE[i..i + 20] == proxy_target_bytes { + offsets.push(i); + } + } + assert_eq!(vec![PROXY_TARGET_OFFSET], offsets); + } +} diff --git a/crates/sim/src/gas/gas.rs b/crates/sim/src/gas/gas.rs index 513c0fc4..a0328ca8 100644 --- a/crates/sim/src/gas/gas.rs +++ b/crates/sim/src/gas/gas.rs @@ -14,13 +14,10 @@ use std::{cmp, fmt::Debug, sync::Arc}; use anyhow::Context; -use ethers::{ - abi::AbiEncode, - types::{Address, Chain, U256}, -}; -use rundler_provider::Provider; +use ethers::types::U256; +use rundler_provider::{EntryPoint, L1GasProvider, Provider}; use rundler_types::{ - chain::{ARBITRUM_CHAIN_IDS, OP_BEDROCK_CHAIN_IDS, POLYGON_CHAIN_IDS}, + chain::{self, ChainSpec}, GasFees, UserOperation, }; use rundler_utils::math; @@ -30,32 +27,6 @@ use super::oracle::{ ConstantOracle, FeeOracle, ProviderOracle, UsageBasedFeeOracle, UsageBasedFeeOracleConfig, }; -/// Gas overheads for user operations used in calculating the pre-verification gas. See: https://github.com/eth-infinitism/bundler/blob/main/packages/sdk/src/calcPreVerificationGas.ts -#[derive(Clone, Copy, Debug)] -pub struct GasOverheads { - /// The Entrypoint requires a gas buffer for the bundle to account for the gas spent outside of the major steps in the processing of UOs - pub bundle_transaction_gas_buffer: U256, - /// The fixed gas overhead for any EVM transaction - pub transaction_gas_overhead: U256, - per_user_op: U256, - per_user_op_word: U256, - zero_byte: U256, - non_zero_byte: U256, -} - -impl Default for GasOverheads { - fn default() -> Self { - Self { - bundle_transaction_gas_buffer: 5_000.into(), - transaction_gas_overhead: 21_000.into(), - per_user_op: 18_300.into(), - per_user_op_word: 4.into(), - zero_byte: 4.into(), - non_zero_byte: 16.into(), - } - } -} - /// Returns the required pre_verification_gas for the given user operation /// /// `full_op` is either the user operation submitted via `sendUserOperation` @@ -69,65 +40,56 @@ impl Default for GasOverheads { /// /// Networks that require dynamic pre_verification_gas are typically those that charge extra calldata fees /// that can scale based on dynamic gas prices. -pub async fn estimate_pre_verification_gas( - full_op: &UserOperation, - random_op: &UserOperation, - entry_point: Address, - provider: Arc

, - chain_id: u64, +pub async fn estimate_pre_verification_gas< + UO: UserOperation, + E: EntryPoint + L1GasProvider, +>( + chain_spec: &ChainSpec, + entry_point: &E, + full_op: &UO, + random_op: &UO, gas_price: U256, ) -> anyhow::Result { - let static_gas = calc_static_pre_verification_gas(full_op, true); - let dynamic_gas = match chain_id { - _ if ARBITRUM_CHAIN_IDS.contains(&chain_id) => { - provider - .clone() - .calc_arbitrum_l1_gas(entry_point, random_op.clone()) - .await? - } - _ if OP_BEDROCK_CHAIN_IDS.contains(&chain_id) => { - provider - .clone() - .calc_optimism_l1_gas(entry_point, random_op.clone(), gas_price) - .await? - } - _ => U256::zero(), - }; + let static_gas = full_op.calc_static_pre_verification_gas(chain_spec, true); + if !chain_spec.calldata_pre_verification_gas { + return Ok(static_gas); + } + + let dynamic_gas = entry_point + .calc_l1_gas(entry_point.address(), random_op.clone(), gas_price) + .await?; println!("HC estimate_pre_verification_gas {} = {} + {} price {}", static_gas + dynamic_gas, static_gas, dynamic_gas, gas_price); - Ok(static_gas + dynamic_gas) + Ok(static_gas.saturating_add(dynamic_gas)) } /// Calculate the required pre_verification_gas for the given user operation and the provided base fee. /// /// The effective gas price is calculated as min(base_fee + max_priority_fee_per_gas, max_fee_per_gas) -pub async fn calc_required_pre_verification_gas( - op: &UserOperation, - entry_point: Address, - provider: Arc

, - chain_id: u64, +pub async fn calc_required_pre_verification_gas< + UO: UserOperation, + E: EntryPoint + L1GasProvider, +>( + chain_spec: &ChainSpec, + entry_point: &E, + op: &UO, base_fee: U256, ) -> anyhow::Result { - println!("HC entering calc_pre_verification_gas, base_fee {} op_fees {} {}", base_fee, op.max_priority_fee_per_gas, op.max_fee_per_gas); - let static_gas = calc_static_pre_verification_gas(op, true); - let dynamic_gas = match chain_id { - _ if ARBITRUM_CHAIN_IDS.contains(&chain_id) => { - provider - .clone() - .calc_arbitrum_l1_gas(entry_point, op.clone()) - .await? - } - _ if OP_BEDROCK_CHAIN_IDS.contains(&chain_id) => { - let gas_price = cmp::min(base_fee + op.max_priority_fee_per_gas, op.max_fee_per_gas); + println!("HC entering calc_pre_verification_gas, base_fee {} op_fees {} {}", base_fee, op.max_priority_fee_per_gas(), op.max_fee_per_gas()); + let static_gas = op.calc_static_pre_verification_gas(chain_spec, true); + if !chain_spec.calldata_pre_verification_gas { + return Ok(static_gas); + } - provider - .clone() - .calc_optimism_l1_gas(entry_point, op.clone(), gas_price) - .await? - } - _ => U256::zero(), - }; + let gas_price = cmp::min( + base_fee + op.max_priority_fee_per_gas(), + op.max_fee_per_gas(), + ); + + let dynamic_gas = entry_point + .calc_l1_gas(entry_point.address(), op.clone(), gas_price) + .await?; println!("HC calc_required_pre_verification_gas {} = {} + {}", static_gas + dynamic_gas, static_gas, dynamic_gas); Ok(static_gas + dynamic_gas) @@ -150,102 +112,71 @@ pub async fn calc_required_pre_verification_gas( /// If limiting the size of a bundle transaction to adhere to block gas limit, use the execution gas limit functions. /// Returns the gas limit for the user operation that applies to bundle transaction's limit -pub fn user_operation_gas_limit( - uo: &UserOperation, - chain_id: u64, +/// +/// On an L2 this is the total gas limit for the bundle transaction ~including~ any potential L1 costs +/// if the chain requires it. +/// +/// This is needed to set the gas limit for the bundle transaction. +pub fn user_operation_gas_limit( + chain_spec: &ChainSpec, + uo: &UO, assume_single_op_bundle: bool, - paymaster_post_op: bool, ) -> U256 { - user_operation_pre_verification_gas_limit(uo, chain_id, assume_single_op_bundle) - + uo.call_gas_limit - + uo.verification_gas_limit - * verification_gas_limit_multiplier(assume_single_op_bundle, paymaster_post_op) + user_operation_pre_verification_gas_limit(chain_spec, uo, assume_single_op_bundle) + + uo.total_verification_gas_limit() + + uo.required_pre_execution_buffer() + + uo.call_gas_limit() } /// Returns the gas limit for the user operation that applies to bundle transaction's execution limit -pub fn user_operation_execution_gas_limit( - uo: &UserOperation, - chain_id: u64, +/// +/// On an L2 this is the total gas limit for the bundle transaction ~excluding~ any potential L1 costs. +/// +/// This is needed to limit the size of the bundle transaction to adhere to the block gas limit. +pub fn user_operation_execution_gas_limit( + chain_spec: &ChainSpec, + uo: &UO, assume_single_op_bundle: bool, - paymaster_post_op: bool, ) -> U256 { - user_operation_pre_verification_execution_gas_limit(uo, chain_id, assume_single_op_bundle) - + uo.call_gas_limit - + uo.verification_gas_limit - * verification_gas_limit_multiplier(assume_single_op_bundle, paymaster_post_op) + user_operation_pre_verification_execution_gas_limit(chain_spec, uo, assume_single_op_bundle) + + uo.total_verification_gas_limit() + + uo.required_pre_execution_buffer() + + uo.call_gas_limit() } /// Returns the static pre-verification gas cost of a user operation -pub fn user_operation_pre_verification_execution_gas_limit( - uo: &UserOperation, - chain_id: u64, +/// +/// On an L2 this is the total gas limit for the bundle transaction ~excluding~ any potential L1 costs +pub fn user_operation_pre_verification_execution_gas_limit( + chain_spec: &ChainSpec, + uo: &UO, include_fixed_gas_overhead: bool, ) -> U256 { // On some chains (OP bedrock, Arbitrum) the L1 gas fee is charged via pre_verification_gas // but this not part of the EXECUTION gas limit of the transaction. // In such cases we only consider the static portion of the pre_verification_gas in the gas limit. - if OP_BEDROCK_CHAIN_IDS.contains(&chain_id) | ARBITRUM_CHAIN_IDS.contains(&chain_id) { - calc_static_pre_verification_gas(uo, include_fixed_gas_overhead) + if chain_spec.calldata_pre_verification_gas { + uo.calc_static_pre_verification_gas(chain_spec, include_fixed_gas_overhead) } else { - uo.pre_verification_gas + uo.pre_verification_gas() } } /// Returns the gas limit for the user operation that applies to bundle transaction's limit -pub fn user_operation_pre_verification_gas_limit( - uo: &UserOperation, - chain_id: u64, +/// +/// On an L2 this is the total gas limit for the bundle transaction ~including~ any potential L1 costs +pub fn user_operation_pre_verification_gas_limit( + chain_spec: &ChainSpec, + uo: &UO, include_fixed_gas_overhead: bool, ) -> U256 { // On some chains (OP bedrock) the L1 gas fee is charged via pre_verification_gas // but this not part of the execution TOTAL limit of the transaction. // In such cases we only consider the static portion of the pre_verification_gas in the gas limit. - if OP_BEDROCK_CHAIN_IDS.contains(&chain_id) { - calc_static_pre_verification_gas(uo, include_fixed_gas_overhead) - } else { - uo.pre_verification_gas - } -} - -fn calc_static_pre_verification_gas(op: &UserOperation, include_fixed_gas_overhead: bool) -> U256 { - let ov = GasOverheads::default(); - let encoded_op = op.clone().encode(); - let length_in_words = encoded_op.len() / 32; // size of packed user op is always a multiple of 32 bytes - let call_data_cost: U256 = encoded_op - .iter() - .map(|&x| { - if x == 0 { - ov.zero_byte - } else { - ov.non_zero_byte - } - }) - .reduce(|a, b| a + b) - .unwrap_or_default(); - - call_data_cost - + ov.per_user_op - + ov.per_user_op_word * length_in_words - + (if include_fixed_gas_overhead { - ov.transaction_gas_overhead - } else { - 0.into() - }) -} - -fn verification_gas_limit_multiplier( - assume_single_op_bundle: bool, - paymaster_post_op: bool, -) -> u64 { - // If using a paymaster that has a postOp, we need to account for potentially 2 postOp calls which can each use up to verification_gas_limit gas. - // otherwise the entrypoint expects the gas for 1 postOp call that uses verification_gas_limit plus the actual verification call - // we only add the additional verification_gas_limit only if we know for sure that this is a single op bundle, which what we do to get a worst-case upper bound - if paymaster_post_op { - 3 - } else if assume_single_op_bundle { - 2 + if chain_spec.calldata_pre_verification_gas && !chain_spec.include_l1_gas_in_gas_limit { + uo.calc_static_pre_verification_gas(chain_spec, include_fixed_gas_overhead) } else { - 1 + uo.pre_verification_gas() } } @@ -313,7 +244,7 @@ pub struct FeeEstimator

{ provider: Arc

, priority_fee_mode: PriorityFeeMode, bundle_priority_fee_overhead_percent: u64, - fee_oracle: Arc>, + fee_oracle: Arc, } impl FeeEstimator

{ @@ -324,8 +255,8 @@ impl FeeEstimator

{ /// `bundle_priority_fee_overhead_percent` is used to determine the overhead percentage to add /// to the network returned priority fee to ensure the bundle priority fee is high enough. pub fn new( + chain_spec: &ChainSpec, provider: Arc

, - chain_id: u64, priority_fee_mode: PriorityFeeMode, bundle_priority_fee_overhead_percent: u64, ) -> Self { @@ -333,7 +264,7 @@ impl FeeEstimator

{ provider: provider.clone(), priority_fee_mode, bundle_priority_fee_overhead_percent, - fee_oracle: get_fee_oracle(chain_id, provider), + fee_oracle: get_fee_oracle(chain_spec, provider), } } @@ -389,47 +320,28 @@ impl FeeEstimator

{ } } -// TODO move all of this to ChainSpec -/// ETHEREUM_MAINNET_MAX_PRIORITY_FEE_MIN -pub const ETHEREUM_MAINNET_MAX_PRIORITY_FEE_MIN: u64 = 100_000_000; -/// Polygon Mumbai max priority fee min -pub const POLYGON_MUMBAI_MAX_PRIORITY_FEE_MIN: u64 = 1_500_000_000; -/// Polygon Mainnet max priority fee min -pub const POLYGON_MAINNET_MAX_PRIORITY_FEE_MIN: u64 = 30_000_000_000; -/// Optimism Bedrock chains max priority fee min -pub const OPTIMISM_BEDROCK_MAX_PRIORITY_FEE_MIN: u64 = 100_000; -/// Boba Sepolia max priority fee min -pub const BOBA_TESTNET_MAX_PRIORITY_FEE_MIN: u64 = 1_000_000; - -/// Returns the minimum max priority fee per gas for the given chain id. -pub fn get_min_max_priority_fee_per_gas(chain_id: u64) -> U256 { - match chain_id { - x if x == Chain::Mainnet as u64 => ETHEREUM_MAINNET_MAX_PRIORITY_FEE_MIN.into(), - x if x == Chain::Polygon as u64 => POLYGON_MAINNET_MAX_PRIORITY_FEE_MIN.into(), - x if x == Chain::PolygonMumbai as u64 => POLYGON_MUMBAI_MAX_PRIORITY_FEE_MIN.into(), - x if x == 28882 as u64 => BOBA_TESTNET_MAX_PRIORITY_FEE_MIN.into(), - x if x == 901 as u64 => BOBA_TESTNET_MAX_PRIORITY_FEE_MIN.into(), - x if OP_BEDROCK_CHAIN_IDS.contains(&x) => OPTIMISM_BEDROCK_MAX_PRIORITY_FEE_MIN.into(), - _ => U256::zero(), - } -} - -fn get_fee_oracle

(chain_id: u64, provider: Arc

) -> Arc> +fn get_fee_oracle

(chain_spec: &ChainSpec, provider: Arc

) -> Arc where P: Provider + Debug, { - let minimum_fee = get_min_max_priority_fee_per_gas(chain_id); - println!("HC get_fee_oracle minimum {:?} chain {:?} is_optimism {:?}", minimum_fee, &chain_id, OP_BEDROCK_CHAIN_IDS.contains(&chain_id)); - - if ARBITRUM_CHAIN_IDS.contains(&chain_id) { - Arc::new(Box::new(ConstantOracle::new(U256::zero()))) - } else if OP_BEDROCK_CHAIN_IDS.contains(&chain_id) || POLYGON_CHAIN_IDS.contains(&chain_id) { - let config = UsageBasedFeeOracleConfig { - minimum_fee, - ..Default::default() - }; - Arc::new(Box::new(UsageBasedFeeOracle::new(provider, config))) - } else { - Arc::new(Box::new(ProviderOracle::new(provider))) + if !chain_spec.eip1559_enabled { + return Arc::new(ConstantOracle::new(U256::zero())); + } + + match chain_spec.priority_fee_oracle_type { + chain::PriorityFeeOracleType::Provider => Arc::new(ProviderOracle::new( + provider, + chain_spec.min_max_priority_fee_per_gas, + )), + chain::PriorityFeeOracleType::UsageBased => { + let config = UsageBasedFeeOracleConfig { + minimum_fee: chain_spec.min_max_priority_fee_per_gas, + maximum_fee: chain_spec.max_max_priority_fee_per_gas, + congestion_trigger_usage_ratio_threshold: chain_spec + .congestion_trigger_usage_ratio_threshold, + ..Default::default() + }; + Arc::new(UsageBasedFeeOracle::new(provider, config)) + } } } diff --git a/crates/sim/src/gas/oracle.rs b/crates/sim/src/gas/oracle.rs index 4a910585..632f3a4a 100644 --- a/crates/sim/src/gas/oracle.rs +++ b/crates/sim/src/gas/oracle.rs @@ -224,11 +224,15 @@ fn calculate_estimate_from_rewards(reward: &[Vec]) -> U256 { #[derive(Debug)] pub(crate) struct ProviderOracle

{ provider: Arc

, + min_max_fee_per_gas: U256, } impl

ProviderOracle

{ - pub(crate) fn new(provider: Arc

) -> Self { - Self { provider } + pub(crate) fn new(provider: Arc

, min_max_fee_per_gas: U256) -> Self { + Self { + provider, + min_max_fee_per_gas, + } } } @@ -238,10 +242,12 @@ where P: Provider + Debug, { async fn estimate_priority_fee(&self) -> Result { - self.provider + Ok(self + .provider .get_max_priority_fee() .await - .map_err(|e| FeeOracleError::Other(e.into())) + .map_err(|e| FeeOracleError::Other(e.into()))? + .max(self.min_max_fee_per_gas)) } } @@ -418,6 +424,17 @@ mod tests { assert_eq!(fee, U256::from(300)); } + #[tokio::test] + async fn test_provider_oracle_min() { + let mut mock = MockProvider::default(); + mock.expect_get_max_priority_fee() + .times(1) + .returning(|| Ok(U256::from(400))); + let oracle = ProviderOracle::new(Arc::new(mock), U256::from(401)); + let fee = oracle.estimate_priority_fee().await.unwrap(); + assert_eq!(fee, U256::from(401)); + } + #[tokio::test] async fn test_max_oracle_choose_provider() { let mut mock = MockProvider::default(); @@ -450,7 +467,7 @@ mod tests { ..Default::default() }, )); - oracle.add(ProviderOracle::new(provider)); + oracle.add(ProviderOracle::new(provider, U256::from(0))); let fee = oracle.estimate_priority_fee().await.unwrap(); assert_eq!(fee, U256::from(400)); @@ -488,7 +505,7 @@ mod tests { ..Default::default() }, )); - oracle.add(ProviderOracle::new(provider)); + oracle.add(ProviderOracle::new(provider, U256::from(0))); let fee = oracle.estimate_priority_fee().await.unwrap(); assert_eq!(fee, U256::from(200)); diff --git a/crates/sim/src/lib.rs b/crates/sim/src/lib.rs index db94aeb3..e78986b2 100644 --- a/crates/sim/src/lib.rs +++ b/crates/sim/src/lib.rs @@ -30,10 +30,14 @@ //! //! - `test-utils`: Export mocks and utilities for testing. +/// Gas estimation mod estimation; +#[cfg(feature = "test-utils")] +pub use estimation::MockGasEstimator; pub use estimation::{ - GasEstimate, GasEstimationError, GasEstimator, GasEstimatorImpl, - Settings as EstimationSettings, UserOperationOptionalGas, + CallGasEstimator, CallGasEstimatorImpl, CallGasEstimatorSpecialization, GasEstimationError, + GasEstimator, GasEstimatorV0_6, GasEstimatorV0_7, Settings as EstimationSettings, + VerificationGasEstimator, VerificationGasEstimatorImpl, }; pub mod gas; @@ -43,17 +47,16 @@ mod precheck; #[cfg(feature = "test-utils")] pub use precheck::MockPrechecker; pub use precheck::{ - PrecheckError, PrecheckViolation, Prechecker, PrecheckerImpl, Settings as PrecheckSettings, - MIN_CALL_GAS_LIMIT, + PrecheckError, Prechecker, PrecheckerImpl, Settings as PrecheckSettings, MIN_CALL_GAS_LIMIT, }; -mod simulation; +/// Simulation and violation checking +pub mod simulation; #[cfg(feature = "test-utils")] pub use simulation::MockSimulator; pub use simulation::{ - EntityInfo, EntityInfos, MempoolConfig, NeedsStakeInformation, Settings as SimulationSettings, - SimulateValidationTracer, SimulateValidationTracerImpl, SimulationError, SimulationResult, - SimulationViolation, Simulator, SimulatorImpl, ViolationOpCode, + MempoolConfig, MempoolConfigs, Settings as SimulationSettings, SimulationError, + SimulationResult, Simulator, }; mod types; diff --git a/crates/sim/src/precheck.rs b/crates/sim/src/precheck.rs index b92eaccc..0d73d724 100644 --- a/crates/sim/src/precheck.rs +++ b/crates/sim/src/precheck.rs @@ -11,32 +11,39 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -use std::sync::{Arc, RwLock}; +use std::{ + marker::PhantomData, + sync::{Arc, RwLock}, +}; use anyhow::Context; use arrayvec::ArrayVec; -use ethers::types::{Address, U256}; +use ethers::types::{Address, U128, U256}; #[cfg(feature = "test-utils")] use mockall::automock; -use rundler_provider::{EntryPoint, Provider}; -use rundler_types::{GasFees, UserOperation}; +use rundler_provider::{EntryPoint, L1GasProvider, Provider}; +use rundler_types::{ + chain::ChainSpec, + pool::{MempoolError, PrecheckViolation}, + GasFees, UserOperation, +}; use rundler_utils::math; -use crate::{ - gas::{self, get_min_max_priority_fee_per_gas}, - types::ViolationError, -}; +use crate::{gas, types::ViolationError}; /// The min cost of a `CALL` with nonzero value, as required by the spec. -pub const MIN_CALL_GAS_LIMIT: U256 = U256([9100, 0, 0, 0]); +pub const MIN_CALL_GAS_LIMIT: U128 = U128([9100, 0]); /// Trait for checking if a user operation is valid before simulation /// according to the spec rules. -#[cfg_attr(feature = "test-utils", automock)] +#[cfg_attr(feature = "test-utils", automock(type UO = rundler_types::v0_6::UserOperation;))] #[async_trait::async_trait] pub trait Prechecker: Send + Sync + 'static { + /// The user operation type + type UO: UserOperation; + /// Run the precheck on the given operation and return an error if it fails. - async fn check(&self, op: &UserOperation) -> Result<(), PrecheckError>; + async fn check(&self, op: &Self::UO) -> Result<(), PrecheckError>; /// Update and return the bundle fees. async fn update_fees(&self) -> anyhow::Result<(GasFees, U256)>; } @@ -44,22 +51,39 @@ pub trait Prechecker: Send + Sync + 'static { /// Precheck error pub type PrecheckError = ViolationError; +impl From for MempoolError { + fn from(mut error: PrecheckError) -> Self { + let PrecheckError::Violations(violations) = &mut error else { + return Self::Other(error.into()); + }; + + let Some(violation) = violations.iter_mut().min() else { + return Self::Other(error.into()); + }; + + // extract violation and replace with dummy + Self::PrecheckViolation(std::mem::replace( + violation, + PrecheckViolation::SenderIsNotContractAndNoInitCode(Address::zero()), + )) + } +} + /// Prechecker implementation #[derive(Debug)] -pub struct PrecheckerImpl { +pub struct PrecheckerImpl { + chain_spec: ChainSpec, provider: Arc

, entry_point: E, settings: Settings, fee_estimator: gas::FeeEstimator

, - cache: RwLock, + _uo_type: PhantomData, } /// Precheck settings #[derive(Copy, Clone, Debug)] pub struct Settings { - /// Chain ID - pub chain_id: u64, /// Maximum verification gas allowed for a user operation pub max_verification_gas: U256, /// Maximum total execution gas allowed for a user operation @@ -83,7 +107,6 @@ impl Default for Settings { bundle_priority_fee_overhead_percent: 0, priority_fee_mode: gas::PriorityFeeMode::BaseFeePercent(0), max_total_execution_gas: 10_000_000.into(), - chain_id: 1, base_fee_accept_percent: 50, pre_verification_gas_accept_percent: 100, } @@ -112,8 +135,15 @@ struct FeeCache { } #[async_trait::async_trait] -impl Prechecker for PrecheckerImpl { - async fn check(&self, op: &UserOperation) -> Result<(), PrecheckError> { +impl Prechecker for PrecheckerImpl +where + P: Provider, + E: EntryPoint + L1GasProvider, + UO: UserOperation, +{ + type UO = UO; + + async fn check(&self, op: &Self::UO) -> Result<(), PrecheckError> { let async_data = self.load_async_data(op).await?; let mut violations: Vec = vec![]; violations.extend(self.check_init_code(op, async_data)); @@ -138,63 +168,65 @@ impl Prechecker for PrecheckerImpl { } } -impl PrecheckerImpl { +impl PrecheckerImpl +where + P: Provider, + E: EntryPoint + L1GasProvider, + UO: UserOperation, +{ /// Create a new prechecker - pub fn new(provider: Arc

, entry_point: E, settings: Settings) -> Self { + pub fn new( + chain_spec: ChainSpec, + provider: Arc

, + entry_point: E, + settings: Settings, + ) -> Self { + let fee_estimator = gas::FeeEstimator::new( + &chain_spec, + provider.clone(), + settings.priority_fee_mode, + settings.bundle_priority_fee_overhead_percent, + ); + Self { - provider: provider.clone(), + chain_spec, + provider, entry_point, settings, - fee_estimator: gas::FeeEstimator::new( - provider, - settings.chain_id, - settings.priority_fee_mode, - settings.bundle_priority_fee_overhead_percent, - ), + fee_estimator, cache: RwLock::new(AsyncDataCache { fees: None }), + _uo_type: PhantomData, } } - fn check_init_code( - &self, - op: &UserOperation, - async_data: AsyncData, - ) -> ArrayVec { + fn check_init_code(&self, op: &UO, async_data: AsyncData) -> ArrayVec { let AsyncData { factory_exists, sender_exists, .. } = async_data; let mut violations = ArrayVec::new(); - let len = op.init_code.len(); - if len == 0 { + if op.factory().is_none() { if !sender_exists { violations.push(PrecheckViolation::SenderIsNotContractAndNoInitCode( - op.sender, + op.sender(), )); } } else { - if len < 20 { - violations.push(PrecheckViolation::InitCodeTooShort(len)); - } else if !factory_exists { + if !factory_exists { violations.push(PrecheckViolation::FactoryIsNotContract( op.factory().unwrap(), )) } if sender_exists { - violations.push(PrecheckViolation::ExistingSenderWithInitCode(op.sender)); + violations.push(PrecheckViolation::ExistingSenderWithInitCode(op.sender())); } } violations } - fn check_gas( - &self, - op: &UserOperation, - async_data: AsyncData, - ) -> ArrayVec { + fn check_gas(&self, op: &UO, async_data: AsyncData) -> ArrayVec { let Settings { - chain_id, max_verification_gas, max_total_execution_gas, .. @@ -206,16 +238,16 @@ impl PrecheckerImpl { } = async_data; let mut violations = ArrayVec::new(); - if op.verification_gas_limit > max_verification_gas { + if op.verification_gas_limit() > max_verification_gas { violations.push(PrecheckViolation::VerificationGasLimitTooHigh( - op.verification_gas_limit, + op.verification_gas_limit(), max_verification_gas, )); } // compute the worst case total gas limit by assuming the UO is in its own bundle and has a postOp call. // This is conservative and potentially may invalidate some very large UOs that would otherwise be valid. - let gas_limit = gas::user_operation_execution_gas_limit(op, chain_id, true, true); + let gas_limit = gas::user_operation_execution_gas_limit(&self.chain_spec, op, true); if gas_limit > max_total_execution_gas { violations.push(PrecheckViolation::TotalGasLimitTooHigh( gas_limit, @@ -229,9 +261,9 @@ impl PrecheckerImpl { min_pre_verification_gas, self.settings.pre_verification_gas_accept_percent, ); - if op.pre_verification_gas < min_pre_verification_gas { + if op.pre_verification_gas() < min_pre_verification_gas { violations.push(PrecheckViolation::PreVerificationGasTooLow( - op.pre_verification_gas, + op.pre_verification_gas(), min_pre_verification_gas, )); } @@ -241,55 +273,47 @@ impl PrecheckerImpl { let min_priority_fee = self.settings.priority_fee_mode.minimum_priority_fee( base_fee, self.settings.base_fee_accept_percent, - get_min_max_priority_fee_per_gas(self.settings.chain_id), + self.chain_spec.min_max_priority_fee_per_gas, ); let min_max_fee = min_base_fee + min_priority_fee; - //println!("HC precheck.rs before gas fees {:?}", op.clone()); // check priority fee first, since once ruled out we can check max fee - if op.max_priority_fee_per_gas < min_priority_fee { - //println!("HC prcheck.rs BYPASS priority_fee {:?} {:?}", op.clone(), min_priority_fee); + if op.max_priority_fee_per_gas() < min_priority_fee { violations.push(PrecheckViolation::MaxPriorityFeePerGasTooLow( - op.max_priority_fee_per_gas, + op.max_priority_fee_per_gas(), min_priority_fee, )); } - if op.max_fee_per_gas < min_max_fee { - //println!("HC prcheck.rs BYPASS max_fee {:?} {:?}", op.clone(), min_max_fee); + if op.max_fee_per_gas() < min_max_fee { violations.push(PrecheckViolation::MaxFeePerGasTooLow( - op.max_fee_per_gas, + op.max_fee_per_gas(), min_max_fee, )); } - if op.call_gas_limit < MIN_CALL_GAS_LIMIT { + if op.call_gas_limit() < MIN_CALL_GAS_LIMIT.into() { violations.push(PrecheckViolation::CallGasLimitTooLow( - op.call_gas_limit, - MIN_CALL_GAS_LIMIT, + op.call_gas_limit(), + MIN_CALL_GAS_LIMIT.into(), )); } violations } - fn check_payer(&self, op: &UserOperation, async_data: AsyncData) -> Option { + fn check_payer(&self, op: &UO, async_data: AsyncData) -> Option { let AsyncData { paymaster_exists, payer_funds, .. } = async_data; - if !op.paymaster_and_data.is_empty() { - let Some(paymaster) = op.paymaster() else { - return Some(PrecheckViolation::PaymasterTooShort( - op.paymaster_and_data.len(), - )); - }; + if let Some(paymaster) = op.paymaster() { if !paymaster_exists { return Some(PrecheckViolation::PaymasterIsNotContract(paymaster)); } } let max_gas_cost = op.max_gas_cost(); if payer_funds < max_gas_cost { - if op.paymaster_and_data.is_empty() { + if op.paymaster().is_none() { return Some(PrecheckViolation::SenderFundsTooLow( payer_funds, max_gas_cost, @@ -304,7 +328,7 @@ impl PrecheckerImpl { None } - async fn load_async_data(&self, op: &UserOperation) -> anyhow::Result { + async fn load_async_data(&self, op: &UO) -> anyhow::Result { let (_, base_fee) = self.get_fees().await?; let ( @@ -315,7 +339,7 @@ impl PrecheckerImpl { min_pre_verification_gas, ) = tokio::try_join!( self.is_contract(op.factory()), - self.is_contract(Some(op.sender)), + self.is_contract(Some(op.sender())), self.is_contract(op.paymaster()), self.get_payer_funds(op), self.get_required_pre_verification_gas(op.clone(), base_fee) @@ -342,16 +366,16 @@ impl PrecheckerImpl { Ok(!bytecode.is_empty()) } - async fn get_payer_funds(&self, op: &UserOperation) -> anyhow::Result { + async fn get_payer_funds(&self, op: &UO) -> anyhow::Result { let (deposit, balance) = tokio::try_join!(self.get_payer_deposit(op), self.get_payer_balance(op),)?; Ok(deposit + balance) } - async fn get_payer_deposit(&self, op: &UserOperation) -> anyhow::Result { + async fn get_payer_deposit(&self, op: &UO) -> anyhow::Result { let payer = match op.paymaster() { Some(paymaster) => paymaster, - None => op.sender, + None => op.sender(), }; self.entry_point .balance_of(payer, None) @@ -359,13 +383,13 @@ impl PrecheckerImpl { .context("precheck should get payer balance") } - async fn get_payer_balance(&self, op: &UserOperation) -> anyhow::Result { - if !op.paymaster_and_data.is_empty() { + async fn get_payer_balance(&self, op: &UO) -> anyhow::Result { + if op.paymaster().is_some() { // Paymasters must deposit eth, and cannot pay with their own. return Ok(0.into()); } self.provider - .get_balance(op.sender, None) + .get_balance(op.sender(), None) .await .context("precheck should get sender balance") } @@ -379,83 +403,31 @@ impl PrecheckerImpl { async fn get_required_pre_verification_gas( &self, - op: UserOperation, + op: UO, base_fee: U256, ) -> anyhow::Result { - gas::calc_required_pre_verification_gas( - &op, - self.entry_point.address(), - self.provider.clone(), - self.settings.chain_id, - base_fee, - ) - .await - .context("should calculate pre-verification gas") + gas::calc_required_pre_verification_gas(&self.chain_spec, &self.entry_point, &op, base_fee) + .await + .context("should calculate pre-verification gas") } } -/// Precheck violation enumeration -/// -/// All possible errors that can be returned from a precheck. -#[derive(Clone, Debug, parse_display::Display, Eq, PartialEq, Ord, PartialOrd)] -pub enum PrecheckViolation { - /// The init code is too short to contain a factory address. - #[display("initCode must start with a 20-byte factory address, but was only {0} bytes")] - InitCodeTooShort(usize), - /// The sender is not deployed, and no init code is provided. - #[display("sender {0:?} is not a contract and initCode is empty")] - SenderIsNotContractAndNoInitCode(Address), - /// The sender is already deployed, and an init code is provided. - #[display("sender {0:?} is an existing contract, but initCode is nonempty")] - ExistingSenderWithInitCode(Address), - /// An init code contains a factory address that is not deployed. - #[display("initCode indicates factory with no code: {0:?}")] - FactoryIsNotContract(Address), - /// The total gas limit of the user operation is too high. - /// See `gas::user_operation_execution_gas_limit` for calculation. - #[display("total gas limit is {0} but must be at most {1}")] - TotalGasLimitTooHigh(U256, U256), - /// The verification gas limit of the user operation is too high. - #[display("verificationGasLimit is {0} but must be at most {1}")] - VerificationGasLimitTooHigh(U256, U256), - /// The pre-verification gas of the user operation is too low. - #[display("preVerificationGas is {0} but must be at least {1}")] - PreVerificationGasTooLow(U256, U256), - /// The paymaster and data is too short to contain a paymaster address. - #[display("paymasterAndData must start a 20-byte paymaster address, but was only {0} bytes")] - PaymasterTooShort(usize), - /// A paymaster is provided, but the address is not deployed. - #[display("paymasterAndData indicates paymaster with no code: {0:?}")] - PaymasterIsNotContract(Address), - /// The paymaster deposit is too low to pay for the user operation's maximum cost. - #[display("paymaster deposit is {0} but must be at least {1} to pay for this operation")] - PaymasterDepositTooLow(U256, U256), - /// The sender balance is too low to pay for the user operation's maximum cost. - /// (when not using a paymaster) - #[display("sender balance and deposit together is {0} but must be at least {1} to pay for this operation")] - SenderFundsTooLow(U256, U256), - /// The provided max priority fee per gas is too low based on the current network rate. - #[display("maxPriorityFeePerGas is {0} but must be at least {1}")] - MaxPriorityFeePerGasTooLow(U256, U256), - /// The provided max fee per gas is too low based on the current network rate. - #[display("maxFeePerGas is {0} but must be at least {1}")] - MaxFeePerGasTooLow(U256, U256), - /// The call gas limit is too low to account for any possible call. - #[display("callGasLimit is {0} but must be at least {1}")] - CallGasLimitTooLow(U256, U256), -} - #[cfg(test)] mod tests { use std::str::FromStr; - use ethers::types::{Bytes, Chain}; - use rundler_provider::{MockEntryPoint, MockProvider}; + use ethers::types::Bytes; + use rundler_provider::{MockEntryPointV0_6, MockProvider}; + use rundler_types::v0_6::UserOperation; use super::*; - fn create_base_config() -> (MockProvider, MockEntryPoint) { - (MockProvider::new(), MockEntryPoint::new()) + fn create_base_config() -> (ChainSpec, MockProvider, MockEntryPointV0_6) { + ( + ChainSpec::default(), + MockProvider::new(), + MockEntryPointV0_6::new(), + ) } fn get_test_async_data() -> AsyncData { @@ -471,12 +443,13 @@ mod tests { #[tokio::test] async fn test_check_init_code() { - let (provider, entry_point) = create_base_config(); - let prechecker = PrecheckerImpl::new(Arc::new(provider), entry_point, Settings::default()); + let (cs, provider, entry_point) = create_base_config(); + let prechecker = + PrecheckerImpl::new(cs, Arc::new(provider), entry_point, Settings::default()); let op = UserOperation { sender: Address::from_str("0x3f8a2b6c4d5e1079286fa1b3c0d4e5f6902b7c8d").unwrap(), nonce: 100.into(), - init_code: Bytes::from_str("0x1000").unwrap(), + init_code: Bytes::from_str("0x3f8a2b6c4d5e1079286fa1b3c0d4e5f6902b7c8d").unwrap(), call_data: Bytes::default(), call_gas_limit: 9_000.into(), // large call gas limit high to trigger TotalGasLimitTooHigh verification_gas_limit: 10_000_000.into(), @@ -488,22 +461,17 @@ mod tests { }; let res = prechecker.check_init_code(&op, get_test_async_data()); - assert_eq!( - res, - ArrayVec::::from([ - PrecheckViolation::InitCodeTooShort(2), - PrecheckViolation::ExistingSenderWithInitCode( - Address::from_str("0x3f8a2b6c4d5e1079286fa1b3c0d4e5f6902b7c8d").unwrap() - ) - ]) - ); + let mut expected = ArrayVec::new(); + expected.push(PrecheckViolation::ExistingSenderWithInitCode( + Address::from_str("0x3f8a2b6c4d5e1079286fa1b3c0d4e5f6902b7c8d").unwrap(), + )); + assert_eq!(res, expected); } #[tokio::test] async fn test_check_gas() { - let (provider, entry_point) = create_base_config(); + let (cs, provider, entry_point) = create_base_config(); let test_settings = Settings { - chain_id: 1, max_verification_gas: 5_000_000.into(), max_total_execution_gas: 10_000_000.into(), bundle_priority_fee_overhead_percent: 0, @@ -511,7 +479,7 @@ mod tests { base_fee_accept_percent: 100, pre_verification_gas_accept_percent: 100, }; - let prechecker = PrecheckerImpl::new(Arc::new(provider), entry_point, test_settings); + let prechecker = PrecheckerImpl::new(cs, Arc::new(provider), entry_point, test_settings); let op = UserOperation { sender: Address::from_str("0x3f8a2b6c4d5e1079286fa1b3c0d4e5f6902b7c8d").unwrap(), nonce: 100.into(), @@ -532,7 +500,7 @@ mod tests { res, ArrayVec::::from([ PrecheckViolation::VerificationGasLimitTooHigh(10_000_000.into(), 5_000_000.into(),), - PrecheckViolation::TotalGasLimitTooHigh(30_009_000.into(), 10_000_000.into(),), + PrecheckViolation::TotalGasLimitTooHigh(20_014_000.into(), 10_000_000.into(),), PrecheckViolation::PreVerificationGasTooLow(0.into(), 1_000.into(),), PrecheckViolation::MaxPriorityFeePerGasTooLow(2_000.into(), 4_000.into(),), PrecheckViolation::MaxFeePerGasTooLow(5_000.into(), 8_000.into(),), @@ -543,8 +511,9 @@ mod tests { #[tokio::test] async fn test_check_payer_paymaster_deposit_too_low() { - let (provider, entry_point) = create_base_config(); - let prechecker = PrecheckerImpl::new(Arc::new(provider), entry_point, Settings::default()); + let (cs, provider, entry_point) = create_base_config(); + let prechecker = + PrecheckerImpl::new(cs, Arc::new(provider), entry_point, Settings::default()); let op = UserOperation { sender: Address::from_str("0x3f8a2b6c4d5e1079286fa1b3c0d4e5f6902b7c8d").unwrap(), nonce: 100.into(), @@ -575,19 +544,19 @@ mod tests { #[tokio::test] async fn test_check_fees() { let settings = Settings { - chain_id: Chain::Optimism as u64, base_fee_accept_percent: 80, priority_fee_mode: gas::PriorityFeeMode::PriorityFeeIncreasePercent(0), ..Default::default() }; - let (provider, entry_point) = create_base_config(); - let prechecker = PrecheckerImpl::new(Arc::new(provider), entry_point, settings); + let (mut cs, provider, entry_point) = create_base_config(); + cs.id = 10; + let mintip = cs.min_max_priority_fee_per_gas; + let prechecker = PrecheckerImpl::new(cs, Arc::new(provider), entry_point, settings); let mut async_data = get_test_async_data(); async_data.base_fee = 5_000.into(); async_data.min_pre_verification_gas = 1_000.into(); - let mintip = get_min_max_priority_fee_per_gas(Chain::Optimism as u64); let op = UserOperation { max_fee_per_gas: U256::from(math::percent(5000, settings.base_fee_accept_percent)) + mintip, @@ -597,7 +566,7 @@ mod tests { settings.pre_verification_gas_accept_percent, ) .into(), - call_gas_limit: MIN_CALL_GAS_LIMIT, + call_gas_limit: MIN_CALL_GAS_LIMIT.into(), ..Default::default() }; @@ -608,13 +577,12 @@ mod tests { #[tokio::test] async fn test_check_fees_too_low() { let settings = Settings { - chain_id: 10000000, base_fee_accept_percent: 80, priority_fee_mode: gas::PriorityFeeMode::PriorityFeeIncreasePercent(0), ..Default::default() }; - let (provider, entry_point) = create_base_config(); - let prechecker = PrecheckerImpl::new(Arc::new(provider), entry_point, settings); + let (cs, provider, entry_point) = create_base_config(); + let prechecker = PrecheckerImpl::new(cs, Arc::new(provider), entry_point, settings); let mut async_data = get_test_async_data(); async_data.base_fee = 5_000.into(); @@ -624,7 +592,7 @@ mod tests { max_fee_per_gas: math::percent(5000, settings.base_fee_accept_percent - 10).into(), max_priority_fee_per_gas: 0.into(), pre_verification_gas: 1_000.into(), - call_gas_limit: MIN_CALL_GAS_LIMIT, + call_gas_limit: MIN_CALL_GAS_LIMIT.into(), ..Default::default() }; @@ -641,34 +609,35 @@ mod tests { #[tokio::test] async fn test_check_fees_min() { let settings = Settings { - chain_id: Chain::Optimism as u64, base_fee_accept_percent: 100, priority_fee_mode: gas::PriorityFeeMode::PriorityFeeIncreasePercent(0), ..Default::default() }; - let (provider, entry_point) = create_base_config(); - let prechecker = PrecheckerImpl::new(Arc::new(provider), entry_point, settings); + let (mut cs, provider, entry_point) = create_base_config(); + cs.id = 10; + cs.min_max_priority_fee_per_gas = 100_000.into(); + let mintip = cs.min_max_priority_fee_per_gas; + let prechecker = PrecheckerImpl::new(cs, Arc::new(provider), entry_point, settings); let mut async_data = get_test_async_data(); async_data.base_fee = 5_000.into(); async_data.min_pre_verification_gas = 1_000.into(); - let mintip = get_min_max_priority_fee_per_gas(Chain::Optimism as u64); let undertip = mintip - U256::from(1); let op = UserOperation { max_fee_per_gas: U256::from(5_000) + mintip, max_priority_fee_per_gas: undertip, pre_verification_gas: 1_000.into(), - call_gas_limit: MIN_CALL_GAS_LIMIT, + call_gas_limit: MIN_CALL_GAS_LIMIT.into(), ..Default::default() }; let res = prechecker.check_gas(&op, async_data); let mut expected = ArrayVec::::new(); expected.push(PrecheckViolation::MaxPriorityFeePerGasTooLow( - get_min_max_priority_fee_per_gas(Chain::Optimism as u64) - U256::from(1), - get_min_max_priority_fee_per_gas(Chain::Optimism as u64), + mintip - U256::from(1), + mintip, )); assert_eq!(res, expected); @@ -677,13 +646,12 @@ mod tests { #[tokio::test] async fn test_pvg_too_low() { let settings = Settings { - chain_id: 10000000, base_fee_accept_percent: 80, priority_fee_mode: gas::PriorityFeeMode::PriorityFeeIncreasePercent(0), ..Default::default() }; - let (provider, entry_point) = create_base_config(); - let prechecker = PrecheckerImpl::new(Arc::new(provider), entry_point, settings); + let (cs, provider, entry_point) = create_base_config(); + let prechecker = PrecheckerImpl::new(cs, Arc::new(provider), entry_point, settings); let mut async_data = get_test_async_data(); async_data.base_fee = 5_000.into(); @@ -697,7 +665,7 @@ mod tests { settings.pre_verification_gas_accept_percent - 10, ) .into(), - call_gas_limit: MIN_CALL_GAS_LIMIT, + call_gas_limit: MIN_CALL_GAS_LIMIT.into(), ..Default::default() }; diff --git a/crates/sim/src/simulation/context.rs b/crates/sim/src/simulation/context.rs new file mode 100644 index 00000000..87600ef5 --- /dev/null +++ b/crates/sim/src/simulation/context.rs @@ -0,0 +1,180 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use std::collections::{BTreeSet, HashMap, HashSet}; + +use anyhow::Context; +use ethers::types::{Address, BlockId, U256}; +use rundler_types::{ + pool::SimulationViolation, EntityInfos, EntityType, Opcode, StakeInfo, UserOperation, + ValidationOutput, +}; +use serde::{Deserialize, Serialize}; + +use super::Settings; +use crate::{ExpectedStorage, ViolationError}; + +#[derive(Clone, Debug)] +pub struct ValidationContext { + pub(crate) op: UO, + pub(crate) block_id: BlockId, + pub(crate) entity_infos: EntityInfos, + pub(crate) tracer_out: TracerOutput, + pub(crate) entry_point_out: ValidationOutput, + pub(crate) accessed_addresses: HashSet

, + pub(crate) has_factory: bool, + pub(crate) associated_addresses: HashSet
, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub(crate) struct TracerOutput { + pub(crate) phases: Vec, + pub(crate) revert_data: Option, + pub(crate) accessed_contracts: HashMap, + pub(crate) associated_slots_by_address: AssociatedSlotsByAddress, + pub(crate) factory_called_create2_twice: bool, + pub(crate) expected_storage: ExpectedStorage, +} + +#[derive(Clone, Debug, Default, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub(crate) struct Phase { + pub(crate) forbidden_opcodes_used: Vec, + pub(crate) forbidden_precompiles_used: Vec, + pub(crate) storage_accesses: HashMap, + pub(crate) called_banned_entry_point_method: bool, + pub(crate) called_non_entry_point_with_value: bool, + pub(crate) ran_out_of_gas: bool, + pub(crate) undeployed_contract_accesses: Vec
, + pub(crate) ext_code_access_info: HashMap, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(rename_all = "camelCase")] +pub(crate) struct ContractInfo { + pub(crate) header: String, + pub(crate) opcode: Opcode, + pub(crate) length: u64, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub(crate) struct AccessInfo { + // slot value, just prior this current operation + pub(crate) reads: HashMap, + // count of writes. + pub(crate) writes: HashMap, +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub(crate) struct AssociatedSlotsByAddress(pub(crate) HashMap>); + +impl AssociatedSlotsByAddress { + pub(crate) fn is_associated_slot(&self, address: Address, slot: U256) -> bool { + if slot == address.as_bytes().into() { + return true; + } + let Some(associated_slots) = self.0.get(&address) else { + return false; + }; + let Some(&next_smallest_slot) = associated_slots.range(..(slot + 1)).next_back() else { + return false; + }; + slot - next_smallest_slot < 128.into() + } + + pub(crate) fn addresses(&self) -> HashSet
{ + self.0.clone().into_keys().collect() + } +} + +/// Trait for providing the validation context for a user operation. +#[async_trait::async_trait] +pub trait ValidationContextProvider: Send + Sync + 'static { + /// The user operation type this provider targets. + type UO: UserOperation; + + /// Get the validation context for a user operation. + async fn get_context( + &self, + op: Self::UO, + block_id: BlockId, + ) -> Result, ViolationError>; + + /// Get the violations specific to the particular entry point this provider targets. + fn get_specific_violations( + &self, + _context: &ValidationContext, + ) -> Vec; +} + +pub(crate) fn entity_type_from_simulation_phase(i: usize) -> Option { + match i { + 0 => Some(EntityType::Factory), + 1 => Some(EntityType::Account), + 2 => Some(EntityType::Paymaster), + _ => None, + } +} + +pub(crate) fn infos_from_validation_output( + factory_address: Option
, + sender_address: Address, + paymaster_address: Option
, + entry_point_out: &ValidationOutput, + sim_settings: &Settings, +) -> EntityInfos { + let mut ei = EntityInfos::default(); + ei.set_sender( + sender_address, + is_staked(entry_point_out.sender_info, sim_settings), + ); + if let Some(factory_address) = factory_address { + ei.set_factory( + factory_address, + is_staked(entry_point_out.factory_info, sim_settings), + ); + } + if let Some(paymaster_address) = paymaster_address { + ei.set_paymaster( + paymaster_address, + is_staked(entry_point_out.paymaster_info, sim_settings), + ); + } + if let Some(aggregator_info) = entry_point_out.aggregator_info { + ei.set_aggregator( + aggregator_info.address, + is_staked(aggregator_info.stake_info, sim_settings), + ); + } + + ei +} + +pub(crate) fn is_staked(info: StakeInfo, sim_settings: &Settings) -> bool { + info.stake >= sim_settings.min_stake_value.into() + && info.unstake_delay_sec >= sim_settings.min_unstake_delay.into() +} + +pub(crate) fn parse_combined_context_str(combined: &str) -> anyhow::Result<(A, B)> +where + A: std::str::FromStr, + B: std::str::FromStr, + ::Err: std::error::Error + Send + Sync + 'static, + ::Err: std::error::Error + Send + Sync + 'static, +{ + let (a, b) = combined + .split_once(':') + .context("tracer combined should contain two parts")?; + Ok((a.parse()?, b.parse()?)) +} diff --git a/crates/sim/src/simulation/mempool.rs b/crates/sim/src/simulation/mempool.rs index 97c9626c..d3dab56e 100644 --- a/crates/sim/src/simulation/mempool.rs +++ b/crates/sim/src/simulation/mempool.rs @@ -13,8 +13,8 @@ use std::{collections::HashMap, str::FromStr}; -use ethers::types::{Address, Opcode, H256, U256}; -use rundler_types::{Entity, EntityType}; +use ethers::types::{Address, H256, U256}; +use rundler_types::{Entity, EntityType, Opcode}; use serde::Deserialize; use serde_with::{serde_as, DisplayFromStr}; @@ -24,11 +24,36 @@ use crate::simulation::SimulationViolation; /// /// Typically read from a JSON file using the `Deserialize` trait. #[derive(Debug, Clone, Deserialize, Default)] +#[serde(rename_all = "camelCase")] pub struct MempoolConfig { + /// Entry point address this mempool is associated with. + pub(crate) entry_point: Address, /// Allowlist to match violations against. pub(crate) allowlist: Vec, } +impl MempoolConfig { + /// Return the entrypoint address this mempool is associated with + pub fn entry_point(&self) -> Address { + self.entry_point + } +} + +/// A collection of mempool configurations keyed by their ID. +#[derive(Debug, Clone, Deserialize, Default)] +pub struct MempoolConfigs(HashMap); + +impl MempoolConfigs { + /// Get the mempool configs for a specific entry point address + pub fn get_for_entry_point(&self, entry_point: Address) -> HashMap { + self.0 + .iter() + .filter(|(_, config)| config.entry_point == entry_point) + .map(|(id, config)| (*id, config.clone())) + .collect() + } +} + /// The entity allowed by an allowlist entry. #[derive(Debug, Copy, Clone)] pub(crate) enum AllowEntity { @@ -158,7 +183,7 @@ impl AllowlistEntry { } AllowRule::NotStaked => { if let SimulationViolation::NotStaked(stake_data) = violation { - self.entity.is_allowed(&stake_data.entity) + self.entity.is_allowed(&stake_data.needs_stake) } else { false } @@ -201,10 +226,9 @@ pub(crate) fn match_mempools( #[cfg(test)] mod tests { use ethers::types::U256; - use rundler_types::StorageSlot; + use rundler_types::{pool::NeedsStakeInformation, StorageSlot, ViolationOpCode}; use super::*; - use crate::simulation::{simulation::NeedsStakeInformation, ViolationOpCode}; #[test] fn test_allow_entity_any() { @@ -437,12 +461,10 @@ mod tests { let entry = AllowlistEntry::new(AllowEntity::Address(entity_addr), AllowRule::NotStaked); let violation = SimulationViolation::NotStaked(Box::new(NeedsStakeInformation { - entity: Entity { - kind: EntityType::Account, - address: entity_addr, - }, + needs_stake: Entity::paymaster(entity_addr), + accessing_entity: EntityType::Paymaster, accessed_entity: Some(EntityType::Paymaster), - accessed_address: Address::random(), + accessed_address: entity_addr, slot: U256::zero(), min_stake: U256::zero(), min_unstake_delay: U256::zero(), @@ -451,12 +473,10 @@ mod tests { assert!(entry.is_allowed(&violation)); let violation = SimulationViolation::NotStaked(Box::new(NeedsStakeInformation { - entity: Entity { - kind: EntityType::Account, - address: Address::random(), - }, + needs_stake: Entity::paymaster(Address::random()), + accessing_entity: EntityType::Paymaster, accessed_entity: Some(EntityType::Paymaster), - accessed_address: Address::random(), + accessed_address: entity_addr, slot: U256::zero(), min_stake: U256::zero(), min_unstake_delay: U256::zero(), @@ -473,6 +493,7 @@ mod tests { ( H256::random(), MempoolConfig { + entry_point: Address::random(), allowlist: vec![AllowlistEntry::new( AllowEntity::Type(EntityType::Account), AllowRule::ForbiddenOpcode { @@ -505,6 +526,7 @@ mod tests { ( H256::random(), MempoolConfig { + entry_point: Address::random(), allowlist: vec![AllowlistEntry::new( AllowEntity::Type(EntityType::Account), AllowRule::ForbiddenOpcode { @@ -549,6 +571,7 @@ mod tests { ( mempool1, MempoolConfig { + entry_point: Address::random(), allowlist: vec![AllowlistEntry::new( AllowEntity::Type(EntityType::Account), AllowRule::ForbiddenOpcode { @@ -584,6 +607,7 @@ mod tests { ( mempool1, MempoolConfig { + entry_point: Address::random(), allowlist: vec![ AllowlistEntry::new( AllowEntity::Type(EntityType::Account), @@ -605,6 +629,7 @@ mod tests { ( mempool2, MempoolConfig { + entry_point: Address::random(), allowlist: vec![ AllowlistEntry::new( AllowEntity::Type(EntityType::Account), diff --git a/crates/sim/src/simulation/mod.rs b/crates/sim/src/simulation/mod.rs index 79347ecd..eb035d48 100644 --- a/crates/sim/src/simulation/mod.rs +++ b/crates/sim/src/simulation/mod.rs @@ -11,19 +11,193 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -#[allow(clippy::module_inception)] -mod simulation; +use std::collections::HashSet; + +use anyhow::Error; +use ethers::types::{Address, H256, U256}; #[cfg(feature = "test-utils")] -pub use simulation::MockSimulator; -pub use simulation::{ - EntityInfo, EntityInfos, NeedsStakeInformation, Settings, SimulationError, SimulationResult, - SimulationViolation, Simulator, SimulatorImpl, ViolationOpCode, +use mockall::automock; +use rundler_provider::AggregatorSimOut; +use rundler_types::{ + pool::{MempoolError, SimulationViolation}, + EntityInfos, UserOperation, ValidTimeRange, }; +mod context; +pub use context::ValidationContextProvider; + mod mempool; -pub use mempool::MempoolConfig; +pub use mempool::{MempoolConfig, MempoolConfigs}; + +mod simulator; +pub use simulator::{new_v0_6_simulator, new_v0_7_simulator, SimulatorImpl}; + +mod unsafe_sim; +pub use unsafe_sim::UnsafeSimulator; + +/// Entry Point v0.6 Tracing +pub mod v0_6; +/// Entry Point v0.7 Tracing +pub mod v0_7; + +use crate::{ExpectedStorage, ViolationError}; + +/// The result of a successful simulation +#[derive(Clone, Debug, Default)] +pub struct SimulationResult { + /// The mempool IDs that support this operation + pub mempools: Vec, + /// Block hash this operation was simulated against + pub block_hash: H256, + /// Block number this operation was simulated against + pub block_number: Option, + /// Gas used in the pre-op phase of simulation measured + /// by the entry point + pub pre_op_gas: U256, + /// The time range for which this operation is valid + pub valid_time_range: ValidTimeRange, + /// If using an aggregator, the result of the aggregation + /// simulation + pub aggregator: Option, + /// Code hash of all accessed contracts + pub code_hash: H256, + /// Whether the sender account is staked + pub account_is_staked: bool, + /// List of all addresses accessed during validation + pub accessed_addresses: HashSet
, + /// List of addresses that have associated storage slots + /// accessed within the simulation + pub associated_addresses: HashSet
, + /// Expected storage values for all accessed slots during validation + pub expected_storage: ExpectedStorage, + /// Whether the operation requires a post-op + pub requires_post_op: bool, + /// All the entities used in this operation and their staking state + pub entity_infos: EntityInfos, +} + +impl SimulationResult { + /// Get the aggregator address if one was used + pub fn aggregator_address(&self) -> Option
{ + self.aggregator.as_ref().map(|agg| agg.address) + } +} + +/// The result of a failed simulation. We return a list of the violations that ocurred during the failed simulation +/// and also information about all the entities used in the op to handle entity penalties +#[derive(Clone, Debug)] +pub struct SimulationError { + /// A list of violations that occurred during simulation, or some other error that occurred not directly related to simulation rules + pub violation_error: ViolationError, + /// The addresses and staking states of all the entities involved in an op. This value is None when simulation fails at a point where we are no + pub entity_infos: Option, +} + +impl From for SimulationError { + fn from(error: Error) -> Self { + SimulationError { + violation_error: ViolationError::Other(error), + entity_infos: None, + } + } +} + +impl From> for SimulationError { + fn from(violation_error: ViolationError) -> Self { + SimulationError { + violation_error, + entity_infos: None, + } + } +} + +impl From for MempoolError { + fn from(mut error: SimulationError) -> Self { + let SimulationError { + violation_error, .. + } = &mut error; + let ViolationError::Violations(violations) = violation_error else { + return Self::Other((*violation_error).clone().into()); + }; + + let Some(violation) = violations.iter_mut().min() else { + return Self::Other((*violation_error).clone().into()); + }; + + // extract violation and replace with dummy + Self::SimulationViolation(std::mem::replace( + violation, + SimulationViolation::DidNotRevert, + )) + } +} + +/// Simulator trait for running user operation simulations +#[cfg_attr(feature = "test-utils", automock(type UO = rundler_types::v0_6::UserOperation;))] +#[async_trait::async_trait] +pub trait Simulator: Send + Sync + 'static { + /// The type of user operation that this simulator can handle + type UO: UserOperation; + + /// Simulate a user operation, returning simulation information + /// upon success, or simulation violations. + async fn simulate_validation( + &self, + op: Self::UO, + block_hash: Option, + expected_code_hash: Option, + ) -> Result; +} + +/// Simulation Settings +#[derive(Debug, Clone)] +pub struct Settings { + /// The minimum amount of time that a staked entity must have configured as + /// their unstake delay on the entry point contract in order to be considered staked. + pub min_unstake_delay: u32, + /// The minimum amount of stake that a staked entity must have on the entry point + /// contract in order to be considered staked. + pub min_stake_value: u128, + /// The maximum amount of gas that can be used during the simulation call + pub max_simulate_handle_ops_gas: u64, + /// The maximum amount of verification gas that can be used during the simulation call + pub max_verification_gas: u64, + /// The max duration of the custom javascript tracer. Must be in a format parseable by the + /// ParseDuration function on an ethereum node. See Docs: https://pkg.go.dev/time#ParseDuration + pub tracer_timeout: String, +} -mod tracer; -pub use tracer::{SimulateValidationTracer, SimulateValidationTracerImpl}; +impl Settings { + /// Create new settings + pub fn new( + min_unstake_delay: u32, + min_stake_value: u128, + max_simulate_handle_ops_gas: u64, + max_verification_gas: u64, + tracer_timeout: String, + ) -> Self { + Self { + min_unstake_delay, + min_stake_value, + max_simulate_handle_ops_gas, + max_verification_gas, + tracer_timeout, + } + } +} -mod validation_results; +#[cfg(any(test, feature = "test-utils"))] +impl Default for Settings { + fn default() -> Self { + Self { + // one day in seconds: defined in the ERC-4337 spec + min_unstake_delay: 84600, + // 10^18 wei = 1 eth + min_stake_value: 1_000_000_000_000_000_000, + // 550 million gas: currently the defaults for Alchemy eth_call + max_simulate_handle_ops_gas: 550_000_000, + max_verification_gas: 5_000_000, + tracer_timeout: "10s".to_string(), + } + } +} diff --git a/crates/sim/src/simulation/simulation.rs b/crates/sim/src/simulation/simulation.rs deleted file mode 100644 index d3aa752b..00000000 --- a/crates/sim/src/simulation/simulation.rs +++ /dev/null @@ -1,1369 +0,0 @@ -// This file is part of Rundler. -// -// Rundler is free software: you can redistribute it and/or modify it under the -// terms of the GNU Lesser General Public License as published by the Free Software -// Foundation, either version 3 of the License, or (at your option) any later version. -// -// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; -// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -// See the GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License along with Rundler. -// If not, see https://www.gnu.org/licenses/. - -use std::{ - collections::{HashMap, HashSet}, - mem, - ops::Deref, - sync::Arc, -}; - -use anyhow::Error; -use async_trait::async_trait; -use ethers::{ - abi::AbiDecode, - types::{Address, BlockId, Opcode, H256, U256}, -}; -use indexmap::IndexSet; -#[cfg(feature = "test-utils")] -use mockall::automock; -use rundler_provider::{AggregatorOut, AggregatorSimOut, Provider}; -use rundler_types::{ - contracts::i_entry_point::FailedOp, Entity, EntityType, StorageSlot, UserOperation, - ValidTimeRange, -}; -use strum::IntoEnumIterator; - -use super::{ - mempool::{match_mempools, AllowEntity, AllowRule, MempoolConfig, MempoolMatchResult}, - tracer::{ - parse_combined_tracer_str, AccessInfo, AssociatedSlotsByAddress, SimulateValidationTracer, - SimulationTracerOutput, - }, - validation_results::{StakeInfo, ValidationOutput, ValidationReturnInfo}, -}; -use crate::{ - types::{ExpectedStorage, ViolationError}, - utils, -}; - -/// The result of a successful simulation -#[derive(Clone, Debug, Default)] -pub struct SimulationResult { - /// The mempool IDs that support this operation - pub mempools: Vec, - /// Block hash this operation was simulated against - pub block_hash: H256, - /// Block number this operation was simulated against - pub block_number: Option, - /// Gas used in the pre-op phase of simulation measured - /// by the entry point - pub pre_op_gas: U256, - /// The time range for which this operation is valid - pub valid_time_range: ValidTimeRange, - /// If using an aggregator, the result of the aggregation - /// simulation - pub aggregator: Option, - /// Code hash of all accessed contracts - pub code_hash: H256, - /// List of used entities that need to be staked for this operation - /// to be valid - pub entities_needing_stake: Vec, - /// Whether the sender account is staked - pub account_is_staked: bool, - /// List of all addresses accessed during validation - pub accessed_addresses: HashSet
, - /// List of addresses that have associated storage slots - /// accessed within the simulation - pub associated_addresses: HashSet
, - /// Expected storage values for all accessed slots during validation - pub expected_storage: ExpectedStorage, - /// Whether the operation requires a post-op - pub requires_post_op: bool, - /// All the entities used in this operation and their staking state - pub entity_infos: EntityInfos, -} - -impl SimulationResult { - /// Get the aggregator address if one was used - pub fn aggregator_address(&self) -> Option
{ - self.aggregator.as_ref().map(|agg| agg.address) - } -} - -/// The result of a failed simulation. We return a list of the violations that ocurred during the failed simulation -/// and also information about all the entities used in the op to handle entity penalties -#[derive(Clone, Debug)] -pub struct SimulationError { - /// A list of violations that occurred during simulation, or some other error that occurred not directly related to simulation rules - pub violation_error: ViolationError, - /// The addresses and staking states of all the entities involved in an op. This value is None when simulation fails at a point where we are no - pub entity_infos: Option, -} - -impl From for SimulationError { - fn from(error: Error) -> Self { - SimulationError { - violation_error: ViolationError::Other(error), - entity_infos: None, - } - } -} - -/// Simulator trait for running user operation simulations -#[cfg_attr(feature = "test-utils", automock)] -#[async_trait::async_trait] -pub trait Simulator: Send + Sync + 'static { - /// Simulate a user operation, returning simulation information - /// upon success, or simulation violations. - async fn simulate_validation( - &self, - op: UserOperation, - block_hash: Option, - expected_code_hash: Option, - ) -> Result; -} - -/// Simulator implementation. -/// -/// This simulator supports the use of "alternative mempools". -/// During simulation, the simulator will check the violations found -/// against the mempool configurations provided in the constructor. -/// -/// If a mempool is found to support all of the associated violations, -/// it will be included in the list of mempools returned by the simulator. -/// -/// If no mempools are found, the simulator will return an error containing -/// the violations. -#[derive(Debug)] -pub struct SimulatorImpl { - provider: Arc

, - entry_point_address: Address, - simulate_validation_tracer: T, - sim_settings: Settings, - mempool_configs: HashMap, - allow_unstaked_addresses: HashSet

, -} - -impl SimulatorImpl -where - P: Provider, - T: SimulateValidationTracer, -{ - /// Create a new simulator - /// - /// `mempool_configs` is a map of mempool IDs to mempool configurations. - /// It is used during simulation to determine which mempools support - /// the violations found during simulation. - pub fn new( - provider: Arc

, - entry_point_address: Address, - simulate_validation_tracer: T, - sim_settings: Settings, - mempool_configs: HashMap, - ) -> Self { - // Get a list of entities that are allowed to act as staked entities despite being unstaked - let mut allow_unstaked_addresses = HashSet::new(); - for config in mempool_configs.values() { - for entry in &config.allowlist { - if entry.rule == AllowRule::NotStaked { - if let AllowEntity::Address(address) = entry.entity { - allow_unstaked_addresses.insert(address); - } - } - } - } - - Self { - provider, - entry_point_address, - simulate_validation_tracer, - sim_settings, - mempool_configs, - allow_unstaked_addresses, - } - } - - /// Return the associated settings - pub fn settings(&self) -> &Settings { - &self.sim_settings - } - - // Run the tracer and transform the output. - // Any violations during this stage are errors. - async fn create_context( - &self, - op: UserOperation, - block_id: BlockId, - ) -> Result { - let factory_address = op.factory(); - let sender_address = op.sender; - let paymaster_address = op.paymaster(); - println!("HC simulation.rs create_context {:?}", op.clone()); - let tracer_out = self - .simulate_validation_tracer - .trace_simulate_validation(op.clone(), block_id, self.sim_settings.max_verification_gas) - .await?; - let num_phases = tracer_out.phases.len() as u32; - // Check if there are too many phases here, then check too few at the - // end. We are detecting cases where the entry point is broken. Too many - // phases definitely means it's broken, but too few phases could still - // mean the entry point is fine if one of the phases fails and it - // doesn't reach the end of execution. - if num_phases > 3 { - Err(SimulationError { - violation_error: ViolationError::Violations(vec![ - SimulationViolation::WrongNumberOfPhases(num_phases), - ]), - entity_infos: None, - })? - } - let Some(ref revert_data) = tracer_out.revert_data else { - Err(SimulationError { - violation_error: ViolationError::Violations(vec![ - SimulationViolation::DidNotRevert, - ]), - entity_infos: None, - })? - }; - let last_entity_type = - entity_type_from_simulation_phase(tracer_out.phases.len() - 1).unwrap(); - - if let Ok(failed_op) = FailedOp::decode_hex(revert_data) { - let entity_addr = match last_entity_type { - EntityType::Factory => factory_address, - EntityType::Paymaster => paymaster_address, - EntityType::Account => Some(sender_address), - _ => None, - }; - Err(SimulationError { - violation_error: ViolationError::Violations(vec![ - SimulationViolation::UnintendedRevertWithMessage( - last_entity_type, - failed_op.reason, - entity_addr, - ), - ]), - entity_infos: None, - })? - } - let Ok(entry_point_out) = ValidationOutput::decode_hex(revert_data) else { - let entity_addr = match last_entity_type { - EntityType::Factory => factory_address, - EntityType::Paymaster => paymaster_address, - EntityType::Account => Some(sender_address), - _ => None, - }; - Err(SimulationError { - violation_error: ViolationError::Violations(vec![ - SimulationViolation::UnintendedRevert(last_entity_type, entity_addr), - ]), - entity_infos: None, - })? - }; - let entity_infos = EntityInfos::new( - factory_address, - sender_address, - paymaster_address, - &entry_point_out, - self.sim_settings, - ); - if num_phases < 3 { - Err(SimulationError { - violation_error: ViolationError::Violations(vec![ - SimulationViolation::WrongNumberOfPhases(num_phases), - ]), - entity_infos: Some(entity_infos), - })? - }; - - let associated_addresses = tracer_out.associated_slots_by_address.addresses(); - - Ok(ValidationContext { - block_id, - entity_infos, - tracer_out, - entry_point_out, - associated_addresses, - entities_needing_stake: vec![], - accessed_addresses: HashSet::new(), - initcode_length: op.init_code.len(), - }) - } - - async fn validate_aggregator_signature( - &self, - op: UserOperation, - aggregator_address: Option

, - gas_cap: u64, - ) -> anyhow::Result { - let Some(aggregator_address) = aggregator_address else { - return Ok(AggregatorOut::NotNeeded); - }; - - Ok(self - .provider - .clone() - .validate_user_op_signature(aggregator_address, op, gas_cap) - .await?) - } - - // Parse the output from tracing and return a list of violations. - // Most violations found during this stage are allowlistable and can be added - // to the list of allowlisted violations on a given mempool. - fn gather_context_violations( - &self, - context: &mut ValidationContext, - ) -> anyhow::Result> { - let &mut ValidationContext { - ref entity_infos, - ref tracer_out, - ref entry_point_out, - ref mut entities_needing_stake, - ref mut accessed_addresses, - initcode_length, - .. - } = context; - println!("HC trace {:?}", tracer_out); - let mut violations = vec![]; - - if entry_point_out.return_info.sig_failed { - violations.push(SimulationViolation::InvalidSignature); - } - - let sender_address = entity_infos.sender_address(); - let mut entity_types_needing_stake: HashMap, U256)> = - HashMap::new(); - - for (index, phase) in tracer_out.phases.iter().enumerate().take(3) { - let kind = entity_type_from_simulation_phase(index).unwrap(); - let Some(entity_info) = entity_infos.get(kind) else { - continue; - }; - let entity = Entity { - kind, - address: entity_info.address, - }; - for opcode in &phase.forbidden_opcodes_used { - let (contract, opcode) = parse_combined_tracer_str(opcode)?; - violations.push(SimulationViolation::UsedForbiddenOpcode( - entity, - contract, - ViolationOpCode(opcode), - )); - } - - for (addr, opcode) in &phase.ext_code_access_info { - if *addr == self.entry_point_address { - violations.push(SimulationViolation::UsedForbiddenOpcode( - entity, - *addr, - ViolationOpCode(*opcode), - )); - } - } - - for precompile in &phase.forbidden_precompiles_used { - let (contract, precompile) = parse_combined_tracer_str(precompile)?; - violations.push(SimulationViolation::UsedForbiddenPrecompile( - entity, contract, precompile, - )); - } - - if entity.kind == EntityType::Paymaster - && !entry_point_out.return_info.paymaster_context.is_empty() - && !entity_info.is_staked - { - // [EREP-050] - violations.push(SimulationViolation::UnstakedPaymasterContext); - } - - let mut banned_slots_accessed = IndexSet::::new(); - for (addr, access_info) in &phase.storage_accesses { - let address = *addr; - accessed_addresses.insert(address); - - let violation = parse_storage_accesses(ParseStorageAccess { - access_info, - slots_by_address: &tracer_out.associated_slots_by_address, - address, - sender: sender_address, - entrypoint: self.entry_point_address, - initcode_length, - entity: &entity, - entity_infos, - })?; - - match violation { - StorageRestriction::Allowed => {} - StorageRestriction::NeedsStake(addr, entity_type, slot) => { - if !entity_info.is_staked { - entity_types_needing_stake.insert(entity, (addr, entity_type, slot)); - } - } - StorageRestriction::Banned(slot) => { - banned_slots_accessed.insert(StorageSlot { address, slot }); - } - } - } - - for slot in banned_slots_accessed { - violations.push(SimulationViolation::InvalidStorageAccess(entity, slot)); - } - let non_sender_called_with_value = phase - .addresses_calling_with_value - .iter() - .any(|address| address != &sender_address); - if non_sender_called_with_value || phase.called_non_entry_point_with_value { - violations.push(SimulationViolation::CallHadValue(entity)); - } - if phase.called_banned_entry_point_method { - violations.push(SimulationViolation::CalledBannedEntryPointMethod(entity)); - } - - // These violations are not allowlistable but we need to collect them here - if phase.ran_out_of_gas { - violations.push(SimulationViolation::OutOfGas(entity)); - } - for &address in &phase.undeployed_contract_accesses { - violations.push(SimulationViolation::AccessedUndeployedContract( - entity, address, - )) - } - } - - if let Some(aggregator_info) = entry_point_out.aggregator_info { - if !is_staked(aggregator_info.stake_info, self.sim_settings) { - violations.push(SimulationViolation::UnstakedAggregator) - } - } - - for (ent, (accessed_address, accessed_entity, slot)) in entity_types_needing_stake { - entities_needing_stake.push(ent.kind); - - violations.push(SimulationViolation::NotStaked(Box::new( - NeedsStakeInformation { - entity: ent, - accessed_address, - accessed_entity, - slot, - min_stake: self.sim_settings.min_stake_value.into(), - min_unstake_delay: self.sim_settings.min_unstake_delay.into(), - }, - ))); - } - - if tracer_out.factory_called_create2_twice { - let factory = entity_infos.get(EntityType::Factory); - match factory { - Some(factory) => { - violations.push(SimulationViolation::FactoryCalledCreate2Twice( - factory.address, - )); - } - None => { - // weird case where CREATE2 is called > 1, but there isn't a factory - // defined. This should never happen, blame the violation on the entry point. - violations.push(SimulationViolation::FactoryCalledCreate2Twice( - self.entry_point_address, - )); - } - } - } - - Ok(violations) - } - - // Check the code hash of the entities associated with the user operation - // if needed, validate that the signature is valid for the aggregator. - // Violations during this stage are always errors. - async fn check_contracts( - &self, - op: UserOperation, - context: &mut ValidationContext, - expected_code_hash: Option, - ) -> Result<(H256, Option), SimulationError> { - let &mut ValidationContext { - block_id, - ref mut tracer_out, - ref entry_point_out, - .. - } = context; - - // collect a vector of violations to ensure a deterministic error message - let mut violations = vec![]; - - let aggregator_address = entry_point_out.aggregator_info.map(|info| info.address); - let code_hash_future = utils::get_code_hash( - self.provider.deref(), - mem::take(&mut tracer_out.accessed_contract_addresses), - Some(block_id), - ); - let aggregator_signature_future = self.validate_aggregator_signature( - op, - aggregator_address, - self.sim_settings.max_verification_gas, - ); - - let (code_hash, aggregator_out) = - tokio::try_join!(code_hash_future, aggregator_signature_future)?; - - if let Some(expected_code_hash) = expected_code_hash { - if expected_code_hash != code_hash { - violations.push(SimulationViolation::CodeHashChanged) - } - } - let aggregator = match aggregator_out { - AggregatorOut::NotNeeded => None, - AggregatorOut::SuccessWithInfo(info) => Some(info), - AggregatorOut::ValidationReverted => { - violations.push(SimulationViolation::AggregatorValidationFailed); - None - } - }; - - if !violations.is_empty() { - return Err(SimulationError { - violation_error: ViolationError::Violations(violations), - entity_infos: None, - }); - } - - Ok((code_hash, aggregator)) - } -} - -#[async_trait] -impl Simulator for SimulatorImpl -where - P: Provider, - T: SimulateValidationTracer, -{ - async fn simulate_validation( - &self, - op: UserOperation, - block_hash: Option, - expected_code_hash: Option, - ) -> Result { - let (block_hash, block_number) = match block_hash { - // If we are given a block_hash, we return a None block number, avoiding an extra call - Some(block_hash) => (block_hash, None), - None => { - let hash_and_num = self - .provider - .get_latest_block_hash_and_number() - .await - .map_err(anyhow::Error::from)?; - (hash_and_num.0, Some(hash_and_num.1.as_u64())) - } - }; - let block_id = block_hash.into(); - let mut context = match self.create_context(op.clone(), block_id).await { - Ok(context) => context, - error @ Err(_) => error?, - }; - - // Gather all violations from the tracer - let mut overridable_violations = self.gather_context_violations(&mut context)?; - // Sort violations so that the final error message is deterministic - overridable_violations.sort(); - // Check violations against mempool rules, find supporting mempools, error if none found - let mempools = match match_mempools(&self.mempool_configs, &overridable_violations) { - MempoolMatchResult::Matches(pools) => pools, - MempoolMatchResult::NoMatch(i) => { - return Err(SimulationError { - violation_error: ViolationError::Violations(vec![ - overridable_violations[i].clone() - ]), - entity_infos: Some(context.entity_infos), - }) - } - }; - - // Check code hash and aggregator signature, these can't fail - let (code_hash, aggregator) = self - .check_contracts(op, &mut context, expected_code_hash) - .await?; - - // Transform outputs into success struct - let ValidationContext { - tracer_out, - entry_point_out, - entities_needing_stake, - accessed_addresses, - associated_addresses, - .. - } = context; - let ValidationOutput { - return_info, - sender_info, - .. - } = entry_point_out; - let account_is_staked = is_staked(sender_info, self.sim_settings); - let ValidationReturnInfo { - pre_op_gas, - valid_after, - valid_until, - paymaster_context, - .. - } = return_info; - - // Conduct any stake overrides before assigning entity_infos - context - .entity_infos - .override_is_staked(&self.allow_unstaked_addresses); - - Ok(SimulationResult { - mempools, - block_hash, - block_number, - pre_op_gas, - valid_time_range: ValidTimeRange::new(valid_after, valid_until), - aggregator, - code_hash, - entities_needing_stake, - account_is_staked, - accessed_addresses, - associated_addresses, - expected_storage: tracer_out.expected_storage, - requires_post_op: !paymaster_context.is_empty(), - entity_infos: context.entity_infos, - }) - } -} - -/// All possible simulation violations -#[derive(Clone, Debug, parse_display::Display, Ord, Eq, PartialOrd, PartialEq)] -pub enum SimulationViolation { - // Make sure to maintain the order here based on the importance - // of the violation for converting to an JSON RPC error - /// The user operation signature is invalid - #[display("invalid signature")] - InvalidSignature, - /// The user operation used an opcode that is not allowed - #[display("{0.kind} uses banned opcode: {2} in contract {1:?}")] - UsedForbiddenOpcode(Entity, Address, ViolationOpCode), - /// The user operation used a precompile that is not allowed - #[display("{0.kind} uses banned precompile: {2:?} in contract {1:?}")] - UsedForbiddenPrecompile(Entity, Address, Address), - /// The user operation accessed a contract that has not been deployed - #[display( - "{0.kind} tried to access code at {1} during validation, but that address is not a contract" - )] - AccessedUndeployedContract(Entity, Address), - /// The user operation factory entity called CREATE2 more than once during initialization - #[display("factory may only call CREATE2 once during initialization")] - FactoryCalledCreate2Twice(Address), - /// The user operation accessed a storage slot that is not allowed - #[display("{0.kind} accessed forbidden storage at address {1:?} during validation")] - InvalidStorageAccess(Entity, StorageSlot), - /// The user operation called an entry point method that is not allowed - #[display("{0.kind} called entry point method other than depositTo")] - CalledBannedEntryPointMethod(Entity), - /// The user operation made a call that contained value to a contract other than the entrypoint - /// during validation - #[display("{0.kind} must not send ETH during validation (except from account to entry point)")] - CallHadValue(Entity), - /// The code hash of accessed contracts changed on the second simulation - #[display("code accessed by validation has changed since the last time validation was run")] - CodeHashChanged, - /// The user operation contained an entity that accessed storage without being staked - #[display("Unstaked {0.entity} accessed {0.accessed_address} ({0.accessed_entity:?}) at slot {0.slot}")] - NotStaked(Box), - /// The user operation uses a paymaster that returns a context while being unstaked - #[display("Unstaked paymaster must not return context")] - UnstakedPaymasterContext, - /// The user operation uses an aggregator entity and it is not staked - #[display("An aggregator must be staked, regardless of storager usage")] - UnstakedAggregator, - /// Simulation reverted with an unintended reason, containing a message - #[display("reverted while simulating {0} validation: {1}")] - UnintendedRevertWithMessage(EntityType, String, Option
), - /// Simulation reverted with an unintended reason - #[display("reverted while simulating {0} validation")] - UnintendedRevert(EntityType, Option
), - /// Simulation did not revert, a revert is always expected - #[display("simulateValidation did not revert. Make sure your EntryPoint is valid")] - DidNotRevert, - /// Simulation had the wrong number of phases - #[display("simulateValidation should have 3 parts but had {0} instead. Make sure your EntryPoint is valid")] - WrongNumberOfPhases(u32), - /// The user operation ran out of gas during validation - #[display("ran out of gas during {0.kind} validation")] - OutOfGas(Entity), - /// The user operation aggregator signature validation failed - #[display("aggregator signature validation failed")] - AggregatorValidationFailed, -} - -/// A wrapper around Opcode that implements extra traits -#[derive(Debug, PartialEq, Clone, parse_display::Display, Eq)] -#[display("{0:?}")] -pub struct ViolationOpCode(pub Opcode); - -impl PartialOrd for ViolationOpCode { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for ViolationOpCode { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - let left = self.0 as i32; - let right = other.0 as i32; - - left.cmp(&right) - } -} - -fn entity_type_from_simulation_phase(i: usize) -> Option { - match i { - 0 => Some(EntityType::Factory), - 1 => Some(EntityType::Account), - 2 => Some(EntityType::Paymaster), - _ => None, - } -} - -#[derive(Debug)] -struct ValidationContext { - block_id: BlockId, - entity_infos: EntityInfos, - tracer_out: SimulationTracerOutput, - entry_point_out: ValidationOutput, - entities_needing_stake: Vec, - accessed_addresses: HashSet
, - initcode_length: usize, - associated_addresses: HashSet
, -} - -#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] -/// additional context about an entity -pub struct EntityInfo { - /// The address of an entity - pub address: Address, - /// Whether the entity is staked or not - pub is_staked: bool, -} - -impl EntityInfo { - fn override_is_staked(&mut self, allow_unstaked_addresses: &HashSet
) { - self.is_staked = allow_unstaked_addresses.contains(&self.address) || self.is_staked; - } -} - -#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] -/// additional context for all the entities used in an op -pub struct EntityInfos { - /// The entity info for the factory - pub factory: Option, - /// The entity info for the op sender - pub sender: EntityInfo, - /// The entity info for the paymaster - pub paymaster: Option, - /// The entity info for the aggregator - pub aggregator: Option, -} - -impl EntityInfos { - fn new( - factory_address: Option
, - sender_address: Address, - paymaster_address: Option
, - entry_point_out: &ValidationOutput, - sim_settings: Settings, - ) -> Self { - let factory = factory_address.map(|address| EntityInfo { - address, - is_staked: is_staked(entry_point_out.factory_info, sim_settings), - }); - let sender = EntityInfo { - address: sender_address, - is_staked: is_staked(entry_point_out.sender_info, sim_settings), - }; - let paymaster = paymaster_address.map(|address| EntityInfo { - address, - is_staked: is_staked(entry_point_out.paymaster_info, sim_settings), - }); - let aggregator = entry_point_out - .aggregator_info - .map(|aggregator_info| EntityInfo { - address: aggregator_info.address, - is_staked: is_staked(aggregator_info.stake_info, sim_settings), - }); - - Self { - factory, - sender, - paymaster, - aggregator, - } - } - - /// Get iterator over the entities - pub fn entities(&'_ self) -> impl Iterator + '_ { - EntityType::iter().filter_map(|t| self.get(t).map(|info| (t, info))) - } - - fn override_is_staked(&mut self, allow_unstaked_addresses: &HashSet
) { - if let Some(mut factory) = self.factory { - factory.override_is_staked(allow_unstaked_addresses) - } - self.sender.override_is_staked(allow_unstaked_addresses); - if let Some(mut paymaster) = self.paymaster { - paymaster.override_is_staked(allow_unstaked_addresses) - } - if let Some(mut aggregator) = self.aggregator { - aggregator.override_is_staked(allow_unstaked_addresses) - } - } - - /// Get the EntityInfo of a specific entity - pub fn get(self, entity: EntityType) -> Option { - match entity { - EntityType::Factory => self.factory, - EntityType::Account => Some(self.sender), - EntityType::Paymaster => self.paymaster, - EntityType::Aggregator => self.aggregator, - } - } - - fn type_from_address(self, address: Address) -> Option { - if address.eq(&self.sender.address) { - return Some(EntityType::Account); - } - - if let Some(factory) = self.factory { - if address.eq(&factory.address) { - return Some(EntityType::Factory); - } - } - - if let Some(paymaster) = self.paymaster { - if address.eq(&paymaster.address) { - return Some(EntityType::Paymaster); - } - } - - if let Some(aggregator) = self.aggregator { - if address.eq(&aggregator.address) { - return Some(EntityType::Aggregator); - } - } - - None - } - - fn sender_address(self) -> Address { - self.sender.address - } -} - -fn is_staked(info: StakeInfo, sim_settings: Settings) -> bool { - info.stake >= sim_settings.min_stake_value.into() - && info.unstake_delay_sec >= sim_settings.min_unstake_delay.into() -} - -#[derive(Clone, Debug, Eq, PartialEq)] -enum StorageRestriction { - Allowed, - NeedsStake(Address, Option, U256), - Banned(U256), -} - -/// Information about a storage violation based on stake status -#[derive(Debug, PartialEq, Clone, PartialOrd, Eq, Ord)] -pub struct NeedsStakeInformation { - /// Entity of stake information - pub entity: Entity, - /// Address that was accessed while unstaked - pub accessed_address: Address, - /// Type of accessed entity if it is a known entity - pub accessed_entity: Option, - /// The accessed slot number - pub slot: U256, - /// Minumum stake - pub min_stake: U256, - /// Minumum delay after an unstake event - pub min_unstake_delay: U256, -} - -#[derive(Clone, Debug)] -struct ParseStorageAccess<'a> { - access_info: &'a AccessInfo, - slots_by_address: &'a AssociatedSlotsByAddress, - address: Address, - sender: Address, - entrypoint: Address, - initcode_length: usize, - entity: &'a Entity, - entity_infos: &'a EntityInfos, -} - -fn parse_storage_accesses(args: ParseStorageAccess<'_>) -> Result { - let ParseStorageAccess { - access_info, - address, - sender, - entrypoint, - entity_infos, - entity, - slots_by_address, - initcode_length, - .. - } = args; - - if address.eq(&sender) || address.eq(&entrypoint) { - return Ok(StorageRestriction::Allowed); - } - - let mut required_stake_slot = None; - - let slots: Vec<&U256> = access_info - .reads - .keys() - .chain(access_info.writes.keys()) - .collect(); - - for slot in slots { - let is_sender_associated = slots_by_address.is_associated_slot(sender, *slot); - // [STO-032] - let is_entity_associated = slots_by_address.is_associated_slot(entity.address, *slot); - // [STO-031] - let is_same_address = address.eq(&entity.address); - // [STO-033] - let is_read_permission = !access_info.writes.contains_key(slot); - - if is_sender_associated { - if initcode_length > 2 - // special case: account.validateUserOp is allowed to use assoc storage if factory is staked. - // [STO-022], [STO-021] - && !(entity.address.eq(&sender) - && entity_infos - .factory - .expect("Factory needs to be present and staked") - .is_staked) - { - required_stake_slot = Some(slot); - } - } else if is_entity_associated || is_same_address || is_read_permission { - required_stake_slot = Some(slot); - } else { - return Ok(StorageRestriction::Banned(*slot)); - } - } - - if let Some(required_stake_slot) = required_stake_slot { - if let Some(entity_type) = entity_infos.type_from_address(address) { - return Ok(StorageRestriction::NeedsStake( - address, - Some(entity_type), - *required_stake_slot, - )); - } - - return Ok(StorageRestriction::NeedsStake( - address, - None, - *required_stake_slot, - )); - } - - Ok(StorageRestriction::Allowed) -} - -/// Simulation Settings -#[derive(Debug, Copy, Clone)] -pub struct Settings { - /// The minimum amount of time that a staked entity must have configured as - /// their unstake delay on the entry point contract in order to be considered staked. - pub min_unstake_delay: u32, - /// The minimum amount of stake that a staked entity must have on the entry point - /// contract in order to be considered staked. - pub min_stake_value: u128, - /// The maximum amount of gas that can be used during the simulation call - pub max_simulate_handle_ops_gas: u64, - /// The maximum amount of verification gas that can be used during the simulation call - pub max_verification_gas: u64, -} - -impl Settings { - /// Create new settings - pub fn new( - min_unstake_delay: u32, - min_stake_value: u128, - max_simulate_handle_ops_gas: u64, - max_verification_gas: u64, - ) -> Self { - Self { - min_unstake_delay, - min_stake_value, - max_simulate_handle_ops_gas, - max_verification_gas, - } - } -} - -#[cfg(any(test, feature = "test-utils"))] -impl Default for Settings { - fn default() -> Self { - Self { - // one day in seconds: defined in the ERC-4337 spec - min_unstake_delay: 84600, - // 10^18 wei = 1 eth - min_stake_value: 1_000_000_000_000_000_000, - // 550 million gas: currently the defaults for Alchemy eth_call - max_simulate_handle_ops_gas: 550_000_000, - max_verification_gas: 5_000_000, - } - } -} - -#[cfg(test)] -mod tests { - use std::str::FromStr; - - use ethers::{ - abi::AbiEncode, - providers::JsonRpcError, - types::{Address, BlockNumber, Bytes, U64}, - utils::hex, - }; - use rundler_provider::{AggregatorOut, MockProvider, ProviderError}; - - use super::*; - use crate::simulation::tracer::{MockSimulateValidationTracer, Phase}; - - fn create_base_config() -> (MockProvider, MockSimulateValidationTracer) { - (MockProvider::new(), MockSimulateValidationTracer::new()) - } - - fn get_test_tracer_output() -> SimulationTracerOutput { - SimulationTracerOutput { - accessed_contract_addresses: vec![ - Address::from_str("0x5ff137d4b0fdcd49dca30c7cf57e578a026d2789").unwrap(), - Address::from_str("0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4").unwrap(), - Address::from_str("0x8abb13360b87be5eeb1b98647a016add927a136c").unwrap(), - ], - associated_slots_by_address: serde_json::from_str(r#" - { - "0x0000000000000000000000000000000000000000": [ - "0xd5c1ebdd81c5c7bebcd52bc11c8d37f7038b3c64f849c2ca58a022abeab1adae", - "0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5" - ], - "0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4": [ - "0x3072884cc37d411af7360b34f105e1e860b1631783232a4f2d5c094d365cdaab", - "0xf5357e1da3acf909ceaed3492183cbad85a3c9e1f0076495f66d3eed05219bd5", - "0xf264fff4db20d04721712f34a6b5a8bca69a212345e40a92101082e79bdd1f0a" - ] - } - "#).unwrap(), - factory_called_create2_twice: false, - expected_storage: serde_json::from_str(r#" - { - "0x5ff137d4b0fdcd49dca30c7cf57e578a026d2789": { - "0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5": "0x0000000000000000000000000000000000000000000000000000000000000000", - "0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb6": "0x0000000000000000000000000000000000000000000000000000000000000000" - } - } - "#).unwrap(), - phases: vec![ - Phase { - addresses_calling_with_value: vec![], - called_banned_entry_point_method: false, - called_non_entry_point_with_value: false, - forbidden_opcodes_used: vec![], - forbidden_precompiles_used: vec![], - ran_out_of_gas: false, - storage_accesses: HashMap::new(), - undeployed_contract_accesses: vec![], - ext_code_access_info: HashMap::new(), - }, - Phase { - addresses_calling_with_value: vec![Address::from_str("0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4").unwrap()], - called_banned_entry_point_method: false, - called_non_entry_point_with_value: false, - forbidden_opcodes_used: vec![], - forbidden_precompiles_used: vec![], - ran_out_of_gas: false, - storage_accesses: HashMap::new(), - undeployed_contract_accesses: vec![], - ext_code_access_info: HashMap::new(), - }, - Phase { - addresses_calling_with_value: vec![], - called_banned_entry_point_method: false, - called_non_entry_point_with_value: false, - forbidden_opcodes_used: vec![], - forbidden_precompiles_used: vec![], - ran_out_of_gas: false, - storage_accesses: HashMap::new(), - undeployed_contract_accesses: vec![], - ext_code_access_info: HashMap::new(), - } - ], - revert_data: Some("0xe0cff05f00000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000014eff00000000000000000000000000000000000000000000000000000b7679c50c24000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffff00000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000".into()), - } - } - - fn create_simulator( - provider: MockProvider, - simulate_validation_tracer: MockSimulateValidationTracer, - ) -> SimulatorImpl { - let settings = Settings::default(); - - let mut mempool_configs = HashMap::new(); - mempool_configs.insert(H256::zero(), MempoolConfig::default()); - - let provider = Arc::new(provider); - - let simulator: SimulatorImpl = - SimulatorImpl::new( - Arc::clone(&provider), - Address::from_str("0x5ff137d4b0fdcd49dca30c7cf57e578a026d2789").unwrap(), - simulate_validation_tracer, - settings, - mempool_configs, - ); - - simulator - } - - #[tokio::test] - async fn test_simulate_validation() { - let (mut provider, mut tracer) = create_base_config(); - - provider - .expect_get_latest_block_hash_and_number() - .returning(|| { - Ok(( - H256::from_str( - "0x38138f1cb4653ab6ab1c89ae3a6acc8705b54bd16a997d880c4421014ed66c3d", - ) - .unwrap(), - U64::zero(), - )) - }); - - tracer - .expect_trace_simulate_validation() - .returning(move |_, _, _| Ok(get_test_tracer_output())); - - // The underlying eth_call when getting the code hash in check_contracts - provider.expect_call().returning(|_, _, _| { - let json_rpc_error = JsonRpcError { - code: -32000, - message: "execution reverted".to_string(), - data: Some(serde_json::Value::String( - "0x091cd005abf68e7b82c951a8619f065986132f67a0945153533cfcdd93b6895f33dbc0c7" - .to_string(), - )), - }; - Err(ProviderError::JsonRpcError(json_rpc_error)) - }); - - provider - .expect_validate_user_op_signature() - .returning(|_, _, _| Ok(AggregatorOut::NotNeeded)); - - let user_operation = UserOperation { - sender: Address::from_str("b856dbd4fa1a79a46d426f537455e7d3e79ab7c4").unwrap(), - nonce: U256::from(264), - init_code: Bytes::from_str("0x").unwrap(), - call_data: Bytes::from_str("0xb61d27f6000000000000000000000000b856dbd4fa1a79a46d426f537455e7d3e79ab7c4000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000004d087d28800000000000000000000000000000000000000000000000000000000").unwrap(), - call_gas_limit: U256::from(9100), - verification_gas_limit: U256::from(64805), - pre_verification_gas: U256::from(46128), - max_fee_per_gas: U256::from(105000100), - max_priority_fee_per_gas: U256::from(105000000), - paymaster_and_data: Bytes::from_str("0x").unwrap(), - signature: Bytes::from_str("0x98f89993ce573172635b44ef3b0741bd0c19dd06909d3539159f6d66bef8c0945550cc858b1cf5921dfce0986605097ba34c2cf3fc279154dd25e161ea7b3d0f1c").unwrap(), - }; - - let simulator = create_simulator(provider, tracer); - let res = simulator - .simulate_validation(user_operation, None, None) - .await; - assert!(res.is_ok()); - } - - #[tokio::test] - async fn test_create_context_two_phases_unintended_revert() { - let (provider, mut tracer) = create_base_config(); - - tracer - .expect_trace_simulate_validation() - .returning(|_, _, _| { - let mut tracer_output = get_test_tracer_output(); - tracer_output.revert_data = Some(hex::encode( - FailedOp { - op_index: U256::from(100), - reason: "AA23 reverted (or OOG)".to_string(), - } - .encode(), - )); - Ok(tracer_output) - }); - - let user_operation = UserOperation { - sender: Address::from_str("b856dbd4fa1a79a46d426f537455e7d3e79ab7c4").unwrap(), - nonce: U256::from(264), - init_code: Bytes::from_str("0x").unwrap(), - call_data: Bytes::from_str("0xb61d27f6000000000000000000000000b856dbd4fa1a79a46d426f537455e7d3e79ab7c4000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000004d087d28800000000000000000000000000000000000000000000000000000000").unwrap(), - call_gas_limit: U256::from(9100), - verification_gas_limit: U256::from(64805), - pre_verification_gas: U256::from(46128), - max_fee_per_gas: U256::from(105000100), - max_priority_fee_per_gas: U256::from(105000000), - paymaster_and_data: Bytes::from_str("0x").unwrap(), - signature: Bytes::from_str("0x98f89993ce573172635b44ef3b0741bd0c19dd06909d3539159f6d66bef8c0945550cc858b1cf5921dfce0986605097ba34c2cf3fc279154dd25e161ea7b3d0f1c").unwrap(), - }; - - let simulator = create_simulator(provider, tracer); - let res = simulator - .create_context(user_operation, BlockId::Number(BlockNumber::Latest)) - .await; - - assert!(matches!( - res, - Err(SimulationError { violation_error: ViolationError::Violations(violations), entity_infos: None}) if matches!( - violations.first(), - Some(&SimulationViolation::UnintendedRevertWithMessage( - EntityType::Paymaster, - ref reason, - _ - )) if reason == "AA23 reverted (or OOG)" - ) - )); - } - - #[tokio::test] - async fn test_gather_context_violations() { - let (provider, tracer) = create_base_config(); - - let mut tracer_output = get_test_tracer_output(); - - // add forbidden opcodes and precompiles - tracer_output.phases[1].forbidden_opcodes_used = vec![ - String::from("0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4:GASPRICE"), - String::from("0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4:COINBASE"), - ]; - tracer_output.phases[1].forbidden_precompiles_used = vec![String::from( - "0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4:0x0000000000000000000000000000000000000019", - )]; - - // add a storage access for a random unrelated address - let mut writes = HashMap::new(); - - writes.insert( - H256::from_str("0xa3f946b7ed2f016739c6be6031c5579a53d3784a471c3b5f9c2a1f8706c65a4b") - .unwrap() - .to_fixed_bytes() - .into(), - 1, - ); - - tracer_output.phases[1].storage_accesses.insert( - Address::from_str("0x1c0e100fcf093c64cdaa545b425ad7ed8e8a0db6").unwrap(), - AccessInfo { - reads: HashMap::new(), - writes, - }, - ); - - let mut validation_context = ValidationContext { - initcode_length: 10, - associated_addresses: HashSet::new(), - block_id: BlockId::Number(BlockNumber::Latest), - entity_infos: EntityInfos::new( - Some(Address::from_str("0x5ff137d4b0fdcd49dca30c7cf57e578a026d2789").unwrap()), - Address::from_str("0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4").unwrap(), - Some(Address::from_str("0x8abb13360b87be5eeb1b98647a016add927a136c").unwrap()), - &ValidationOutput { - return_info: ValidationReturnInfo::from(( - U256::default(), - U256::default(), - false, - 0, - 0, - Bytes::default(), - )), - sender_info: StakeInfo::from((U256::default(), U256::default())), - factory_info: StakeInfo::from((U256::default(), U256::default())), - paymaster_info: StakeInfo::from((U256::default(), U256::default())), - aggregator_info: None, - }, - Settings::default(), - ), - tracer_out: tracer_output, - entry_point_out: ValidationOutput { - return_info: ValidationReturnInfo::from(( - U256::default(), - U256::default(), - true, - 0, - 0, - Bytes::default(), - )), - sender_info: StakeInfo::from((U256::default(), U256::default())), - factory_info: StakeInfo::from((U256::default(), U256::default())), - paymaster_info: StakeInfo::from((U256::default(), U256::default())), - aggregator_info: None, - }, - entities_needing_stake: vec![], - accessed_addresses: HashSet::new(), - }; - - let simulator = create_simulator(provider, tracer); - let res = simulator.gather_context_violations(&mut validation_context); - - assert_eq!( - res.unwrap(), - vec![ - SimulationViolation::InvalidSignature, - SimulationViolation::UsedForbiddenOpcode( - Entity { - kind: EntityType::Account, - address: Address::from_str("0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4") - .unwrap() - }, - Address::from_str("0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4").unwrap(), - ViolationOpCode(Opcode::GASPRICE), - ), - SimulationViolation::UsedForbiddenOpcode( - Entity { - kind: EntityType::Account, - address: Address::from_str("0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4") - .unwrap() - }, - Address::from_str("0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4").unwrap(), - ViolationOpCode(Opcode::COINBASE), - ), - SimulationViolation::UsedForbiddenPrecompile( - Entity { - kind: EntityType::Account, - address: Address::from_str("0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4") - .unwrap() - }, - Address::from_str("0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4").unwrap(), - Address::from_str("0x0000000000000000000000000000000000000019").unwrap(), - ), - SimulationViolation::InvalidStorageAccess( - Entity { - kind: EntityType::Account, - address: Address::from_str("0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4") - .unwrap() - }, - StorageSlot { - address: Address::from_str("0x1c0e100fcf093c64cdaa545b425ad7ed8e8a0db6") - .unwrap(), - slot: U256::from_str( - "0xa3f946b7ed2f016739c6be6031c5579a53d3784a471c3b5f9c2a1f8706c65a4b" - ) - .unwrap() - } - ), - ] - ); - } -} diff --git a/crates/sim/src/simulation/simulator.rs b/crates/sim/src/simulation/simulator.rs new file mode 100644 index 00000000..49f1a6de --- /dev/null +++ b/crates/sim/src/simulation/simulator.rs @@ -0,0 +1,1214 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use std::{ + collections::{HashMap, HashSet}, + marker::PhantomData, + ops::Deref, + sync::Arc, +}; + +use async_trait::async_trait; +use ethers::types::{Address, H256, U256}; +use rundler_provider::{ + AggregatorOut, AggregatorSimOut, EntryPoint, Provider, SignatureAggregator, SimulationProvider, +}; +use rundler_types::{ + pool::{NeedsStakeInformation, SimulationViolation}, + v0_6::UserOperation as UserOperationV0_6, + v0_7::UserOperation as UserOperationV0_7, + Entity, EntityInfo, EntityInfos, EntityType, Opcode, StorageSlot, UserOperation, + ValidTimeRange, ValidationOutput, ValidationReturnInfo, ViolationOpCode, +}; + +use super::context::{ + self, AccessInfo, AssociatedSlotsByAddress, ValidationContext, ValidationContextProvider, +}; +use crate::{ + simulation::{ + mempool::{self, AllowEntity, AllowRule, MempoolConfig, MempoolMatchResult}, + v0_6::ValidationContextProvider as ValidationContextProviderV0_6, + v0_7::ValidationContextProvider as ValidationContextProviderV0_7, + Settings, Simulator, + }, + types::ViolationError, + utils, SimulationError, SimulationResult, +}; + +/// Create a new simulator for v0.6 entry point contracts +pub fn new_v0_6_simulator( + provider: Arc

, + entry_point: E, + sim_settings: Settings, + mempool_configs: HashMap, +) -> impl Simulator +where + P: Provider, + E: EntryPoint + + SignatureAggregator + + SimulationProvider + + Clone, +{ + SimulatorImpl::new( + provider.clone(), + entry_point.clone(), + ValidationContextProviderV0_6::new(provider, entry_point, sim_settings.clone()), + sim_settings, + mempool_configs, + ) +} + +/// Create a new simulator for v0.6 entry point contracts +pub fn new_v0_7_simulator( + provider: Arc

, + entry_point: E, + sim_settings: Settings, + mempool_configs: HashMap, +) -> impl Simulator +where + P: Provider, + E: EntryPoint + + SignatureAggregator + + SimulationProvider + + Clone, +{ + SimulatorImpl::new( + provider.clone(), + entry_point.clone(), + ValidationContextProviderV0_7::new(provider, entry_point, sim_settings.clone()), + sim_settings, + mempool_configs, + ) +} + +/// Simulator implementation. +/// +/// This simulator supports the use of "alternative mempools". +/// During simulation, the simulator will check the violations found +/// against the mempool configurations provided in the constructor. +/// +/// If a mempool is found to support all of the associated violations, +/// it will be included in the list of mempools returned by the simulator. +/// +/// If no mempools are found, the simulator will return an error containing +/// the violations. +#[derive(Debug)] +pub struct SimulatorImpl { + provider: Arc

, + entry_point: E, + validation_context_provider: V, + sim_settings: Settings, + mempool_configs: HashMap, + allow_unstaked_addresses: HashSet

, + _uo_type: PhantomData, +} + +impl SimulatorImpl +where + UO: UserOperation, + P: Provider, + E: EntryPoint + SignatureAggregator + Clone, + V: ValidationContextProvider, +{ + /// Create a new simulator + /// + /// `mempool_configs` is a map of mempool IDs to mempool configurations. + /// It is used during simulation to determine which mempools support + /// the violations found during simulation. + pub fn new( + provider: Arc

, + entry_point: E, + validation_context_provider: V, + sim_settings: Settings, + mempool_configs: HashMap, + ) -> Self { + // Get a list of entities that are allowed to act as staked entities despite being unstaked + let mut allow_unstaked_addresses = HashSet::new(); + for config in mempool_configs.values() { + for entry in &config.allowlist { + if entry.rule == AllowRule::NotStaked { + if let AllowEntity::Address(address) = entry.entity { + allow_unstaked_addresses.insert(address); + } + } + } + } + + Self { + provider, + entry_point, + validation_context_provider, + sim_settings, + mempool_configs, + allow_unstaked_addresses, + _uo_type: PhantomData, + } + } + + async fn validate_aggregator_signature( + &self, + op: UO, + aggregator_address: Option

, + gas_cap: u64, + ) -> anyhow::Result { + let Some(aggregator_address) = aggregator_address else { + return Ok(AggregatorOut::NotNeeded); + }; + + self.entry_point + .clone() + .validate_user_op_signature(aggregator_address, op, gas_cap) + .await + } + + // Parse the output from tracing and return a list of violations. + // Most violations found during this stage are allowlistable and can be added + // to the list of allowlisted violations on a given mempool. + fn gather_context_violations( + &self, + context: &mut ValidationContext, + ) -> anyhow::Result> { + let &mut ValidationContext { + ref entity_infos, + ref tracer_out, + ref entry_point_out, + ref mut accessed_addresses, + has_factory, + .. + } = context; + + let mut violations = vec![]; + + let sender_address = entity_infos.sender_address(); + for (index, phase) in tracer_out.phases.iter().enumerate().take(3) { + let kind = context::entity_type_from_simulation_phase(index).unwrap(); + let Some(ei) = entity_infos.get(kind) else { + continue; + }; + for opcode in &phase.forbidden_opcodes_used { + let (contract, opcode) = context::parse_combined_context_str(opcode)?; + + // [OP-080] - staked entities are allowed to use BALANCE and SELFBALANCE + if ei.is_staked && (opcode == Opcode::BALANCE || opcode == Opcode::SELFBALANCE) { + continue; + } + + // [OP-011] + violations.push(SimulationViolation::UsedForbiddenOpcode( + ei.entity, + contract, + ViolationOpCode(opcode), + )); + } + + for (addr, opcode) in &phase.ext_code_access_info { + if *addr == self.entry_point.address() { + // [OP-054] + // [OP-051] - If calling `EXTCODESIZE ISZERO` the tracer won't add to this list + violations.push(SimulationViolation::UsedForbiddenOpcode( + ei.entity, + *addr, + ViolationOpCode(*opcode), + )); + } + } + + for precompile in &phase.forbidden_precompiles_used { + let (contract, precompile) = context::parse_combined_context_str(precompile)?; + // [OP-062] + violations.push(SimulationViolation::UsedForbiddenPrecompile( + ei.entity, contract, precompile, + )); + } + + for (addr, access_info) in &phase.storage_accesses { + let address = *addr; + accessed_addresses.insert(address); + + let restrictions = parse_storage_accesses(ParseStorageAccess { + access_info, + slots_by_address: &tracer_out.associated_slots_by_address, + address, + sender: sender_address, + entrypoint: self.entry_point.address(), + has_factory, + entity: &ei.entity, + }); + + for restriction in restrictions { + match restriction { + StorageRestriction::NeedsStake( + needs_stake, + accessing_entity, + accessed_entity, + accessed_address, + slot, + ) => { + let needs_stake_entity = entity_infos + .get(needs_stake) + .expect("entity type not found in entity_infos"); + + if !needs_stake_entity.is_staked { + // [STO-*] + violations.push(SimulationViolation::NotStaked(Box::new( + NeedsStakeInformation { + needs_stake: ei.entity, + accessing_entity, + accessed_entity, + accessed_address, + slot, + min_stake: self.sim_settings.min_stake_value.into(), + min_unstake_delay: self + .sim_settings + .min_unstake_delay + .into(), + }, + ))); + } + } + StorageRestriction::AssociatedStorageDuringDeploy( + needs_stake, + address, + slot, + ) => { + let needs_stake_entity = needs_stake.and_then(|t| entity_infos.get(t)); + if let Some(needs_stake_entity) = needs_stake_entity { + if needs_stake_entity.is_staked { + tracing::debug!("Associated storage accessed by staked entity during deploy, and entity is staked"); + continue; + } + } + if let Some(factory) = entity_infos.get(EntityType::Factory) { + if factory.is_staked { + tracing::debug!("Associated storage accessed by staked entity during deploy, and factory is staked"); + continue; + } + } + // [STO-022] + violations.push(SimulationViolation::AssociatedStorageDuringDeploy( + needs_stake_entity.map(|ei| ei.entity), + StorageSlot { address, slot }, + )) + } + StorageRestriction::Banned(slot) => { + // [STO-*] + violations.push(SimulationViolation::InvalidStorageAccess( + ei.entity, + StorageSlot { address, slot }, + )); + } + } + } + } + + if phase.called_non_entry_point_with_value { + // [OP-061] + violations.push(SimulationViolation::CallHadValue(ei.entity)); + } + if phase.called_banned_entry_point_method { + // [OP-054] + violations.push(SimulationViolation::CalledBannedEntryPointMethod(ei.entity)); + } + + if phase.ran_out_of_gas { + // [OP-020] + violations.push(SimulationViolation::OutOfGas(ei.entity)); + } + for &address in &phase.undeployed_contract_accesses { + // OP-042 - Factory can access undeployed sender + if ei.entity.kind == EntityType::Factory && address == sender_address { + continue; + } + // OP-041 - Access to an address without deployed code is forbidden + violations.push(SimulationViolation::AccessedUndeployedContract( + ei.entity, address, + )) + } + } + + if !entry_point_out.return_info.is_valid_time_range() { + violations.push(SimulationViolation::InvalidTimeRange( + entry_point_out.return_info.valid_until, + entry_point_out.return_info.valid_after, + )); + } + + if let Some(aggregator_info) = entry_point_out.aggregator_info { + if !context::is_staked(aggregator_info.stake_info, &self.sim_settings) { + // [EREP-040] + violations.push(SimulationViolation::UnstakedAggregator) + } + } + + for (address, contract_info) in &tracer_out.accessed_contracts { + if contract_info.header.as_str() == "0xEFF000" { + // All arbitrum stylus contracts start with 0xEFF000 + violations.push(SimulationViolation::AccessedUnsupportedContractType( + "Arbitrum Stylus".to_string(), + *address, + )); + } + } + + if tracer_out.factory_called_create2_twice { + let factory = entity_infos.get(EntityType::Factory); + match factory { + Some(factory) => { + // [OP-031] + violations.push(SimulationViolation::FactoryCalledCreate2Twice( + factory.entity.address, + )); + } + None => { + // [OP-031] + // weird case where CREATE2 is called > 1, but there isn't a factory + // defined. This should never happen, blame the violation on the entry point. + violations.push(SimulationViolation::FactoryCalledCreate2Twice( + self.entry_point.address(), + )); + } + } + } + + // Get violations specific to the implemented entry point from the context provider + violations.extend( + self.validation_context_provider + .get_specific_violations(context), + ); + + Ok(violations) + } + + // Check the code hash of the entities associated with the user operation + // if needed, validate that the signature is valid for the aggregator. + // Violations during this stage are always errors. + async fn check_contracts( + &self, + op: UO, + context: &mut ValidationContext, + expected_code_hash: Option, + ) -> Result<(H256, Option), SimulationError> { + let &mut ValidationContext { + block_id, + ref mut tracer_out, + ref entry_point_out, + .. + } = context; + + // collect a vector of violations to ensure a deterministic error message + let mut violations = vec![]; + + let aggregator_address = entry_point_out.aggregator_info.map(|info| info.address); + let code_hash_future = utils::get_code_hash( + self.provider.deref(), + tracer_out.accessed_contracts.keys().cloned().collect(), + Some(block_id), + ); + let aggregator_signature_future = self.validate_aggregator_signature( + op, + aggregator_address, + self.sim_settings.max_verification_gas, + ); + + let (code_hash, aggregator_out) = + tokio::try_join!(code_hash_future, aggregator_signature_future)?; + + if let Some(expected_code_hash) = expected_code_hash { + // [COD-010] + if expected_code_hash != code_hash { + violations.push(SimulationViolation::CodeHashChanged) + } + } + let aggregator = match aggregator_out { + AggregatorOut::NotNeeded => None, + AggregatorOut::SuccessWithInfo(info) => Some(info), + AggregatorOut::ValidationReverted => { + violations.push(SimulationViolation::AggregatorValidationFailed); + None + } + }; + + if !violations.is_empty() { + return Err(SimulationError { + violation_error: ViolationError::Violations(violations), + entity_infos: None, + }); + } + + Ok((code_hash, aggregator)) + } +} + +#[async_trait] +impl Simulator for SimulatorImpl +where + UO: UserOperation, + P: Provider, + E: EntryPoint + SignatureAggregator + Clone, + V: ValidationContextProvider, +{ + type UO = UO; + + async fn simulate_validation( + &self, + op: UO, + block_hash: Option, + expected_code_hash: Option, + ) -> Result { + let (block_hash, block_number) = match block_hash { + // If we are given a block_hash, we return a None block number, avoiding an extra call + Some(block_hash) => (block_hash, None), + None => { + let hash_and_num = self + .provider + .get_latest_block_hash_and_number() + .await + .map_err(anyhow::Error::from)?; + (hash_and_num.0, Some(hash_and_num.1.as_u64())) + } + }; + let block_id = block_hash.into(); + let mut context = match self + .validation_context_provider + .get_context(op.clone(), block_id) + .await + { + Ok(context) => context, + error @ Err(_) => error?, + }; + + // Gather all violations from the tracer + let mut overridable_violations = self.gather_context_violations(&mut context)?; + // Sort violations so that the final error message is deterministic + overridable_violations.sort(); + // Check violations against mempool rules, find supporting mempools, error if none found + let mempools = match mempool::match_mempools(&self.mempool_configs, &overridable_violations) + { + MempoolMatchResult::Matches(pools) => pools, + MempoolMatchResult::NoMatch(i) => { + return Err(SimulationError { + violation_error: ViolationError::Violations(vec![ + overridable_violations[i].clone() + ]), + entity_infos: Some(context.entity_infos), + }) + } + }; + + // Check code hash and aggregator signature, these can't fail + let (code_hash, aggregator) = self + .check_contracts(op, &mut context, expected_code_hash) + .await?; + + // Transform outputs into success struct + let ValidationContext { + tracer_out, + entry_point_out, + accessed_addresses, + associated_addresses, + .. + } = context; + let ValidationOutput { + return_info, + sender_info, + .. + } = entry_point_out; + let account_is_staked = context::is_staked(sender_info, &self.sim_settings); + let ValidationReturnInfo { + pre_op_gas, + valid_after, + valid_until, + paymaster_context, + .. + } = return_info; + + // Conduct any stake overrides before assigning entity_infos + override_infos_staked(&mut context.entity_infos, &self.allow_unstaked_addresses); + + Ok(SimulationResult { + mempools, + block_hash, + block_number, + pre_op_gas, + valid_time_range: ValidTimeRange::new(valid_after, valid_until), + aggregator, + code_hash, + account_is_staked, + accessed_addresses, + associated_addresses, + expected_storage: tracer_out.expected_storage, + requires_post_op: !paymaster_context.is_empty(), + entity_infos: context.entity_infos, + }) + } +} + +#[derive(Clone, Debug, Eq, PartialEq)] +enum StorageRestriction { + /// (Entity needing stake, accessing entity type, accessed entity type, accessed address, accessed slot) + NeedsStake(EntityType, EntityType, Option, Address, U256), + AssociatedStorageDuringDeploy(Option, Address, U256), + Banned(U256), +} + +#[derive(Clone, Debug)] +struct ParseStorageAccess<'a> { + access_info: &'a AccessInfo, + slots_by_address: &'a AssociatedSlotsByAddress, + address: Address, + sender: Address, + entrypoint: Address, + has_factory: bool, + entity: &'a Entity, +} + +fn parse_storage_accesses(args: ParseStorageAccess<'_>) -> Vec { + let ParseStorageAccess { + access_info, + address, + sender, + entrypoint, + entity, + slots_by_address, + has_factory, + .. + } = args; + + let mut restrictions = vec![]; + + // [STO-010] - always allowed to access storage on the account + // [OP-054] - block access to the entrypoint, except for depositTo and fallback + // - this is handled at another level, so we don't need to check for it here + // - at this level we can allow any entry point access through + if address.eq(&sender) || address.eq(&entrypoint) { + return restrictions; + } + + let slots: Vec<&U256> = access_info + .reads + .keys() + .chain(access_info.writes.keys()) + .collect(); + + for slot in slots { + let is_sender_associated = slots_by_address.is_associated_slot(sender, *slot); + // [STO-032] + let is_entity_associated = slots_by_address.is_associated_slot(entity.address, *slot); + // [STO-031] + let is_same_address = address.eq(&entity.address); + // [STO-033] + let is_read_permission = !access_info.writes.contains_key(slot); + + // [STO-021] - Associated storage on external contracts is allowed + if is_sender_associated && !is_same_address { + // [STO-022] - Factory must be staked to access associated storage in a deploy + if has_factory { + match entity.kind { + EntityType::Paymaster | EntityType::Aggregator => { + // If its a paymaster/aggregator, then the entity OR factory must be staked to access associated storage + // during a deploy + restrictions.push(StorageRestriction::AssociatedStorageDuringDeploy( + Some(entity.kind), + address, + *slot, + )); + } + // If its a factory/account, then the factory must be staked to access associated storage during a deploy + EntityType::Account | EntityType::Factory => { + restrictions.push(StorageRestriction::AssociatedStorageDuringDeploy( + None, address, *slot, + )); + } + } + } + } else if is_entity_associated || is_same_address { + restrictions.push(StorageRestriction::NeedsStake( + entity.kind, + entity.kind, + Some(entity.kind), + address, + *slot, + )); + } else if is_read_permission { + restrictions.push(StorageRestriction::NeedsStake( + entity.kind, + entity.kind, + None, + address, + *slot, + )); + } else { + restrictions.push(StorageRestriction::Banned(*slot)); + } + } + + restrictions +} + +fn override_is_staked(ei: &mut EntityInfo, allow_unstaked_addresses: &HashSet
) { + ei.is_staked = allow_unstaked_addresses.contains(&ei.entity.address) || ei.is_staked; +} + +fn override_infos_staked(eis: &mut EntityInfos, allow_unstaked_addresses: &HashSet
) { + override_is_staked(&mut eis.sender, allow_unstaked_addresses); + + if let Some(mut factory) = eis.factory { + override_is_staked(&mut factory, allow_unstaked_addresses); + } + if let Some(mut paymaster) = eis.paymaster { + override_is_staked(&mut paymaster, allow_unstaked_addresses); + } + if let Some(mut aggregator) = eis.aggregator { + override_is_staked(&mut aggregator, allow_unstaked_addresses); + } +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use context::ContractInfo; + use ethers::types::{Address, BlockId, BlockNumber, Bytes, U256, U64}; + use rundler_provider::{AggregatorOut, MockEntryPointV0_6, MockProvider}; + use rundler_types::{ + contracts::utils::get_code_hashes::CodeHashesResult, v0_6::UserOperation, Opcode, StakeInfo, + }; + + use self::context::{Phase, TracerOutput}; + use super::*; + + mockall::mock! { + ValidationContextProviderV0_6 {} + + #[async_trait::async_trait] + impl ValidationContextProvider for ValidationContextProviderV0_6 { + type UO = UserOperation; + async fn get_context( + &self, + op: UserOperationV0_6, + block_id: ethers::types::BlockId, + ) -> Result, ViolationError>; + fn get_specific_violations( + &self, + context: &ValidationContext, + ) -> Vec; + } + } + + fn create_base_config() -> ( + MockProvider, + MockEntryPointV0_6, + MockValidationContextProviderV0_6, + ) { + ( + MockProvider::new(), + MockEntryPointV0_6::new(), + MockValidationContextProviderV0_6::new(), + ) + } + + fn get_test_context() -> ValidationContext { + let tracer_out = TracerOutput { + accessed_contracts: HashMap::from([ + ( + Address::from_str("0x5ff137d4b0fdcd49dca30c7cf57e578a026d2789").unwrap(), + ContractInfo { + header: "0x608060".to_string(), + opcode: Opcode::CALL, + length: 32, + } + ), + ( + Address::from_str("0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4").unwrap(), + ContractInfo { + header: "0x608060".to_string(), + opcode: Opcode::CALL, + length: 32, + } + ), + ( + Address::from_str("0x8abb13360b87be5eeb1b98647a016add927a136c").unwrap(), + ContractInfo { + header: "0x608060".to_string(), + opcode: Opcode::CALL, + length: 32, + } + ), + ]), + associated_slots_by_address: serde_json::from_str(r#" + { + "0x0000000000000000000000000000000000000000": [ + "0xd5c1ebdd81c5c7bebcd52bc11c8d37f7038b3c64f849c2ca58a022abeab1adae", + "0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5" + ], + "0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4": [ + "0x3072884cc37d411af7360b34f105e1e860b1631783232a4f2d5c094d365cdaab", + "0xf5357e1da3acf909ceaed3492183cbad85a3c9e1f0076495f66d3eed05219bd5", + "0xf264fff4db20d04721712f34a6b5a8bca69a212345e40a92101082e79bdd1f0a" + ] + } + "#).unwrap(), + factory_called_create2_twice: false, + expected_storage: serde_json::from_str(r#" + { + "0x5ff137d4b0fdcd49dca30c7cf57e578a026d2789": { + "0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb6": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + } + "#).unwrap(), + phases: vec![ + Phase { + called_banned_entry_point_method: false, + called_non_entry_point_with_value: false, + forbidden_opcodes_used: vec![], + forbidden_precompiles_used: vec![], + ran_out_of_gas: false, + storage_accesses: HashMap::new(), + undeployed_contract_accesses: vec![], + ext_code_access_info: HashMap::new(), + }, + Phase { + called_banned_entry_point_method: false, + called_non_entry_point_with_value: false, + forbidden_opcodes_used: vec![], + forbidden_precompiles_used: vec![], + ran_out_of_gas: false, + storage_accesses: HashMap::new(), + undeployed_contract_accesses: vec![], + ext_code_access_info: HashMap::new(), + }, + Phase { + called_banned_entry_point_method: false, + called_non_entry_point_with_value: false, + forbidden_opcodes_used: vec![], + forbidden_precompiles_used: vec![], + ran_out_of_gas: false, + storage_accesses: HashMap::new(), + undeployed_contract_accesses: vec![], + ext_code_access_info: HashMap::new(), + } + ], + revert_data: Some("0xe0cff05f00000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000014eff00000000000000000000000000000000000000000000000000000b7679c50c24000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffff00000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000".into()), + }; + + ValidationContext { + op: UserOperation { + verification_gas_limit: U256::from(2000), + pre_verification_gas: U256::from(1000), + ..Default::default() + }, + has_factory: true, + associated_addresses: HashSet::new(), + block_id: BlockId::Number(BlockNumber::Latest), + entity_infos: context::infos_from_validation_output( + Some(Address::from_str("0x5ff137d4b0fdcd49dca30c7cf57e578a026d2789").unwrap()), + Address::from_str("0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4").unwrap(), + Some(Address::from_str("0x8abb13360b87be5eeb1b98647a016add927a136c").unwrap()), + &ValidationOutput { + return_info: ValidationReturnInfo::from(( + U256::default(), + U256::default(), + false, + 0, + 0, + Bytes::default(), + )), + sender_info: StakeInfo::from((U256::default(), U256::default())), + factory_info: StakeInfo::from((U256::default(), U256::default())), + paymaster_info: StakeInfo::from((U256::default(), U256::default())), + aggregator_info: None, + }, + &Settings::default(), + ), + tracer_out, + entry_point_out: ValidationOutput { + return_info: ValidationReturnInfo::from(( + 3000.into(), + U256::default(), + true, + 0, + 0, + Bytes::default(), + )), + sender_info: StakeInfo::from((U256::default(), U256::default())), + factory_info: StakeInfo::from((U256::default(), U256::default())), + paymaster_info: StakeInfo::from((U256::default(), U256::default())), + aggregator_info: None, + }, + accessed_addresses: HashSet::new(), + } + } + + fn create_simulator( + provider: MockProvider, + entry_point: MockEntryPointV0_6, + context: MockValidationContextProviderV0_6, + ) -> SimulatorImpl< + UserOperation, + MockProvider, + Arc, + MockValidationContextProviderV0_6, + > { + let settings = Settings::default(); + + let mut mempool_configs = HashMap::new(); + mempool_configs.insert(H256::zero(), MempoolConfig::default()); + + let provider = Arc::new(provider); + + SimulatorImpl::new( + Arc::clone(&provider), + Arc::new(entry_point), + context, + settings, + mempool_configs, + ) + } + + #[tokio::test] + async fn test_simulate_validation() { + let (mut provider, mut entry_point, mut context) = create_base_config(); + + provider + .expect_get_latest_block_hash_and_number() + .returning(|| { + Ok(( + H256::from_str( + "0x38138f1cb4653ab6ab1c89ae3a6acc8705b54bd16a997d880c4421014ed66c3d", + ) + .unwrap(), + U64::zero(), + )) + }); + + context + .expect_get_context() + .returning(move |_, _| Ok(get_test_context())); + context + .expect_get_specific_violations() + .return_const(vec![]); + + // The underlying call constructor when getting the code hash in check_contracts + provider + .expect_call_constructor() + .returning(|_, _: Vec
, _, _| { + Ok(CodeHashesResult { + hash: H256::from_str( + "0x091cd005abf68e7b82c951a8619f065986132f67a0945153533cfcdd93b6895f", + ) + .unwrap() + .into(), + }) + }); + + entry_point + .expect_validate_user_op_signature() + .returning(|_, _, _| Ok(AggregatorOut::NotNeeded)); + + let user_operation = UserOperation { + sender: Address::from_str("b856dbd4fa1a79a46d426f537455e7d3e79ab7c4").unwrap(), + nonce: U256::from(264), + init_code: Bytes::from_str("0x").unwrap(), + call_data: Bytes::from_str("0xb61d27f6000000000000000000000000b856dbd4fa1a79a46d426f537455e7d3e79ab7c4000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000004d087d28800000000000000000000000000000000000000000000000000000000").unwrap(), + call_gas_limit: U256::from(9100), + verification_gas_limit: U256::from(64805), + pre_verification_gas: U256::from(46128), + max_fee_per_gas: U256::from(105000100), + max_priority_fee_per_gas: U256::from(105000000), + paymaster_and_data: Bytes::from_str("0x").unwrap(), + signature: Bytes::from_str("0x98f89993ce573172635b44ef3b0741bd0c19dd06909d3539159f6d66bef8c0945550cc858b1cf5921dfce0986605097ba34c2cf3fc279154dd25e161ea7b3d0f1c").unwrap(), + }; + + let simulator = create_simulator(provider, entry_point, context); + let res = simulator + .simulate_validation(user_operation, None, None) + .await; + assert!(res.is_ok()); + } + + #[tokio::test] + async fn test_gather_context_violations() { + let (provider, mut entry_point, mut context_provider) = create_base_config(); + entry_point + .expect_address() + .returning(|| Address::from_str("0x5ff137d4b0fdcd49dca30c7cf57e578a026d2789").unwrap()); + context_provider + .expect_get_specific_violations() + .return_const(vec![]); + + let mut context = get_test_context(); + + // add forbidden opcodes and precompiles + context.tracer_out.phases[1].forbidden_opcodes_used = vec![ + String::from("0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4:GASPRICE"), + String::from("0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4:COINBASE"), + ]; + context.tracer_out.phases[1].forbidden_precompiles_used = vec![String::from( + "0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4:0x0000000000000000000000000000000000000019", + )]; + + // add a storage access for a random unrelated address + let mut writes = HashMap::new(); + + writes.insert( + H256::from_str("0xa3f946b7ed2f016739c6be6031c5579a53d3784a471c3b5f9c2a1f8706c65a4b") + .unwrap() + .to_fixed_bytes() + .into(), + 1, + ); + + context.tracer_out.phases[1].storage_accesses.insert( + Address::from_str("0x1c0e100fcf093c64cdaa545b425ad7ed8e8a0db6").unwrap(), + AccessInfo { + reads: HashMap::new(), + writes, + }, + ); + + let simulator = create_simulator(provider, entry_point, context_provider); + let res = simulator.gather_context_violations(&mut context); + + assert_eq!( + res.unwrap(), + vec![ + SimulationViolation::UsedForbiddenOpcode( + Entity { + kind: EntityType::Account, + address: Address::from_str("0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4") + .unwrap() + }, + Address::from_str("0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4").unwrap(), + ViolationOpCode(Opcode::GASPRICE), + ), + SimulationViolation::UsedForbiddenOpcode( + Entity { + kind: EntityType::Account, + address: Address::from_str("0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4") + .unwrap() + }, + Address::from_str("0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4").unwrap(), + ViolationOpCode(Opcode::COINBASE), + ), + SimulationViolation::UsedForbiddenPrecompile( + Entity { + kind: EntityType::Account, + address: Address::from_str("0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4") + .unwrap() + }, + Address::from_str("0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4").unwrap(), + Address::from_str("0x0000000000000000000000000000000000000019").unwrap(), + ), + SimulationViolation::InvalidStorageAccess( + Entity { + kind: EntityType::Account, + address: Address::from_str("0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4") + .unwrap() + }, + StorageSlot { + address: Address::from_str("0x1c0e100fcf093c64cdaa545b425ad7ed8e8a0db6") + .unwrap(), + slot: U256::from_str( + "0xa3f946b7ed2f016739c6be6031c5579a53d3784a471c3b5f9c2a1f8706c65a4b" + ) + .unwrap() + } + ), + ] + ); + } + + #[tokio::test] + async fn test_op_080() { + let (provider, ep, mut context_provider) = create_base_config(); + context_provider + .expect_get_specific_violations() + .return_const(vec![]); + + let mut context = get_test_context(); + + // add forbidden opcodes and precompiles + context.tracer_out.phases[2].forbidden_opcodes_used = vec![ + String::from("0x8abb13360b87be5eeb1b98647a016add927a136c:SELFBALANCE"), + String::from("0x8abb13360b87be5eeb1b98647a016add927a136c:BALANCE"), + ]; + + let simulator = create_simulator(provider, ep, context_provider); + let res = simulator.gather_context_violations(&mut context); + + // unstaked causes errors + assert_eq!( + res.unwrap(), + vec![ + SimulationViolation::UsedForbiddenOpcode( + Entity { + kind: EntityType::Paymaster, + address: Address::from_str("0x8abb13360b87be5eeb1b98647a016add927a136c") + .unwrap() + }, + Address::from_str("0x8abb13360b87be5eeb1b98647a016add927a136c").unwrap(), + ViolationOpCode(Opcode::SELFBALANCE) + ), + SimulationViolation::UsedForbiddenOpcode( + Entity { + kind: EntityType::Paymaster, + address: Address::from_str("0x8abb13360b87be5eeb1b98647a016add927a136c") + .unwrap() + }, + Address::from_str("0x8abb13360b87be5eeb1b98647a016add927a136c").unwrap(), + ViolationOpCode(Opcode::BALANCE) + ) + ] + ); + + // staked causes no errors + context.entity_infos.paymaster.as_mut().unwrap().is_staked = true; + let res = simulator.gather_context_violations(&mut context); + assert!(res.unwrap().is_empty()); + } + + #[tokio::test] + async fn test_factory_staking() { + let (provider, mut ep, mut context_provider) = create_base_config(); + ep.expect_address() + .returning(|| Address::from_str("0x5ff137d4b0fdcd49dca30c7cf57e578a026d2789").unwrap()); + context_provider + .expect_get_specific_violations() + .return_const(vec![]); + + let mut writes: HashMap = HashMap::new(); + + let sender_address = + Address::from_str("0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4").unwrap(); + + let external_access_address = Address::random(); + + let sender_bytes = sender_address.as_bytes().into(); + + writes.insert(sender_bytes, 1); + + let mut context = get_test_context(); + context.tracer_out.phases[1].storage_accesses.insert( + external_access_address, + AccessInfo { + reads: HashMap::new(), + writes, + }, + ); + + // Create the simulator using the provider and tracer + let simulator = create_simulator(provider, ep, context_provider); + let res = simulator.gather_context_violations(&mut context); + + assert_eq!( + res.unwrap(), + vec![SimulationViolation::AssociatedStorageDuringDeploy( + None, + StorageSlot { + address: external_access_address, + slot: sender_address.as_bytes().into() + } + )] + ); + + // staked causes no errors + context.entity_infos.factory.as_mut().unwrap().is_staked = true; + let res = simulator.gather_context_violations(&mut context); + assert!(res.unwrap().is_empty()); + } + + #[tokio::test] + async fn test_paymaster_access_during_deploy() { + let (provider, mut ep, mut context_provider) = create_base_config(); + ep.expect_address() + .returning(|| Address::from_str("0x5ff137d4b0fdcd49dca30c7cf57e578a026d2789").unwrap()); + context_provider + .expect_get_specific_violations() + .return_const(vec![]); + + let mut writes: HashMap = HashMap::new(); + + let sender_address = + Address::from_str("0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4").unwrap(); + let paymaster_address = + Address::from_str("0x8abb13360b87be5eeb1b98647a016add927a136c").unwrap(); + + let external_access_address = Address::random(); + + let sender_bytes = sender_address.as_bytes().into(); + + writes.insert(sender_bytes, 1); + + let mut context = get_test_context(); + context.tracer_out.phases[2].storage_accesses.insert( + external_access_address, + AccessInfo { + reads: HashMap::new(), + writes, + }, + ); + + // Create the simulator using the provider and tracer + let simulator = create_simulator(provider, ep, context_provider); + let res = simulator.gather_context_violations(&mut context); + + assert_eq!( + res.unwrap(), + vec![SimulationViolation::AssociatedStorageDuringDeploy( + Some(Entity::paymaster(paymaster_address)), + StorageSlot { + address: external_access_address, + slot: sender_address.as_bytes().into() + } + )] + ); + + // staked causes no errors + context.entity_infos.factory.as_mut().unwrap().is_staked = true; + let res = simulator.gather_context_violations(&mut context); + assert!(res.unwrap().is_empty()); + } + + #[tokio::test] + async fn test_accessed_unsupported_contract() { + let (provider, mut ep, mut context_provider) = create_base_config(); + ep.expect_address() + .returning(|| Address::from_str("0x5ff137d4b0fdcd49dca30c7cf57e578a026d2789").unwrap()); + context_provider + .expect_get_specific_violations() + .return_const(vec![]); + + let addr = Address::random(); + let mut context = get_test_context(); + context.tracer_out.accessed_contracts.insert( + addr, + ContractInfo { + header: "0xEFF000".to_string(), + opcode: Opcode::CALL, + length: 32, + }, + ); + + let simulator = create_simulator(provider, ep, context_provider); + let res = simulator.gather_context_violations(&mut context); + + assert_eq!( + res.unwrap(), + vec![SimulationViolation::AccessedUnsupportedContractType( + "Arbitrum Stylus".to_string(), + addr + )] + ); + } +} diff --git a/crates/sim/src/simulation/tracer.rs b/crates/sim/src/simulation/tracer.rs deleted file mode 100644 index 58863362..00000000 --- a/crates/sim/src/simulation/tracer.rs +++ /dev/null @@ -1,202 +0,0 @@ -// This file is part of Rundler. // -// Rundler is free software: you can redistribute it and/or modify it under the -// terms of the GNU Lesser General Public License as published by the Free Software -// Foundation, either version 3 of the License, or (at your option) any later version. -// -// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; -// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -// See the GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License along with Rundler. -// If not, see https://www.gnu.org/licenses/. - -use std::{ - collections::{BTreeSet, HashMap, HashSet}, - convert::TryFrom, - fmt::Debug, - sync::Arc, -}; - -use anyhow::{bail, Context}; -use async_trait::async_trait; -use ethers::types::{ - Address, BlockId, GethDebugTracerType, GethDebugTracingCallOptions, GethDebugTracingOptions, - GethTrace, Opcode, U256, -}; -#[cfg(test)] -use mockall::automock; -use rundler_provider::{EntryPoint, Provider}; -use rundler_types::UserOperation; -use serde::{Deserialize, Serialize}; - -use crate::ExpectedStorage; -use rundler_types::hybrid_compute; - -#[derive(Clone, Debug, Deserialize, Serialize)] -#[serde(rename_all = "camelCase")] -pub struct SimulationTracerOutput { - pub(crate) phases: Vec, - pub(crate) revert_data: Option, - pub(crate) accessed_contract_addresses: Vec
, - pub(crate) associated_slots_by_address: AssociatedSlotsByAddress, - pub(crate) factory_called_create2_twice: bool, - pub(crate) expected_storage: ExpectedStorage, -} - -impl TryFrom for SimulationTracerOutput { - type Error = anyhow::Error; - fn try_from(trace: GethTrace) -> Result { - match trace { - GethTrace::Unknown(value) => Ok(SimulationTracerOutput::deserialize(&value)?), - GethTrace::Known(_) => { - bail!("Failed to deserialize simulation trace") - } - } - } -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -#[serde(rename_all = "camelCase")] -pub(crate) struct Phase { - pub(crate) forbidden_opcodes_used: Vec, - pub(crate) forbidden_precompiles_used: Vec, - pub(crate) storage_accesses: HashMap, - pub(crate) called_banned_entry_point_method: bool, - pub(crate) addresses_calling_with_value: Vec
, - pub(crate) called_non_entry_point_with_value: bool, - pub(crate) ran_out_of_gas: bool, - pub(crate) undeployed_contract_accesses: Vec
, - pub(crate) ext_code_access_info: HashMap, -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -#[serde(rename_all = "camelCase")] -pub(crate) struct AccessInfo { - // slot value, just prior this current operation - pub(crate) reads: HashMap, - // count of writes. - pub(crate) writes: HashMap, -} - -#[derive(Clone, Debug, Deserialize, Serialize)] -pub(crate) struct AssociatedSlotsByAddress(HashMap>); - -impl AssociatedSlotsByAddress { - pub(crate) fn is_associated_slot(&self, address: Address, slot: U256) -> bool { - if slot == address.as_bytes().into() { - return true; - } - let Some(associated_slots) = self.0.get(&address) else { - return false; - }; - let Some(&next_smallest_slot) = associated_slots.range(..(slot + 1)).next_back() else { - return false; - }; - slot - next_smallest_slot < 128.into() - } - - pub(crate) fn addresses(&self) -> HashSet
{ - self.0.clone().into_keys().collect() - } -} - -/// Trait for tracing the simulation of a user operation. -#[cfg_attr(test, automock)] -#[async_trait] -pub trait SimulateValidationTracer: Send + Sync + 'static { - /// Traces the simulation of a user operation. - async fn trace_simulate_validation( - &self, - op: UserOperation, - block_id: BlockId, - max_validation_gas: u64, - ) -> anyhow::Result; -} - -/// Tracer implementation for the bundler's custom tracer. -#[derive(Debug)] -pub struct SimulateValidationTracerImpl -where - P: Provider, - E: EntryPoint, -{ - provider: Arc

, - entry_point: E, -} - -/// Runs the bundler's custom tracer on the entry point's `simulateValidation` -/// method for the provided user operation. - -#[async_trait] -impl SimulateValidationTracer for SimulateValidationTracerImpl -where - P: Provider, - E: EntryPoint, -{ - async fn trace_simulate_validation( - &self, - op: UserOperation, - block_id: BlockId, - max_validation_gas: u64, - ) -> anyhow::Result { - let tx = self - .entry_point - .simulate_validation(op.clone(), max_validation_gas) - .await?; - - let hh = op.clone().op_hc_hash(); - println!("HC tracer.rs debug_trace_call hh {:?}", hh); - let s2 = hybrid_compute::get_hc_op_statediff(hh, ethers::types::spoof::State::default()); - - println!("HC trace2 pre {:?} {:?} {:?}", op, self.entry_point.address(), tx); - SimulationTracerOutput::try_from( - self.provider - .debug_trace_call( - tx, - Some(block_id), - GethDebugTracingCallOptions { - tracing_options: GethDebugTracingOptions { - tracer: Some(GethDebugTracerType::JsTracer( - validation_tracer_js().to_string(), - )), - ..Default::default() - }, - state_overrides: Some(s2), - ..Default::default() - }, - ) - .await?, - ) - } -} - -impl SimulateValidationTracerImpl -where - P: Provider, - E: EntryPoint, -{ - /// Creates a new instance of the bundler's custom tracer. - pub fn new(provider: Arc

, entry_point: E) -> Self { - Self { - provider, - entry_point, - } - } -} - -fn validation_tracer_js() -> &'static str { - include_str!("../../tracer/dist/validationTracer.js").trim_end_matches(";export{};") -} - -pub(crate) fn parse_combined_tracer_str(combined: &str) -> anyhow::Result<(A, B)> -where - A: std::str::FromStr, - B: std::str::FromStr, - ::Err: std::error::Error + Send + Sync + 'static, - ::Err: std::error::Error + Send + Sync + 'static, -{ - let (a, b) = combined - .split_once(':') - .context("tracer combined should contain two parts")?; - Ok((a.parse()?, b.parse()?)) -} diff --git a/crates/sim/src/simulation/unsafe_sim.rs b/crates/sim/src/simulation/unsafe_sim.rs new file mode 100644 index 00000000..8c70c7fa --- /dev/null +++ b/crates/sim/src/simulation/unsafe_sim.rs @@ -0,0 +1,186 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use std::{marker::PhantomData, sync::Arc}; + +use ethers::types::H256; +use rundler_provider::{ + AggregatorOut, EntryPoint, Provider, SignatureAggregator, SimulationProvider, +}; +use rundler_types::{ + pool::SimulationViolation, EntityInfos, UserOperation, ValidTimeRange, ValidationError, +}; + +use crate::{ + SimulationError, SimulationResult, SimulationSettings as Settings, Simulator, ViolationError, +}; + +/// An unsafe simulator that can be used in place of a regular simulator +/// to extract the information needed from simulation while avoiding the use +/// of debug_traceCall. +/// +/// WARNING: This is "unsafe" for a reason. None of the ERC-7562 checks are +/// performed. +pub struct UnsafeSimulator { + provider: Arc

, + entry_point: E, + sim_settings: Settings, + _uo_type: PhantomData, +} + +impl UnsafeSimulator { + /// Creates a new unsafe simulator + pub fn new(provider: Arc

, entry_point: E, sim_settings: Settings) -> Self { + Self { + provider, + entry_point, + sim_settings, + _uo_type: PhantomData, + } + } +} + +#[async_trait::async_trait] +impl Simulator for UnsafeSimulator +where + UO: UserOperation, + P: Provider, + E: EntryPoint + SimulationProvider + SignatureAggregator + Clone, +{ + type UO = UO; + + // Run an unsafe simulation + // + // The only validation checks that are performed are signature checks + async fn simulate_validation( + &self, + op: UO, + block_hash: Option, + _expected_code_hash: Option, + ) -> Result { + tracing::info!("Performing unsafe simulation"); + + let (block_hash, block_number) = match block_hash { + // If we are given a block_hash, we return a None block number, avoiding an extra call + Some(block_hash) => (block_hash, None), + None => { + let hash_and_num = self + .provider + .get_latest_block_hash_and_number() + .await + .map_err(anyhow::Error::from)?; + (hash_and_num.0, Some(hash_and_num.1.as_u64())) + } + }; + + // simulate the validation + let validation_result = self + .entry_point + .call_simulate_validation( + op.clone(), + self.sim_settings.max_verification_gas, + Some(block_hash), + ) + .await; + + let validation_result = match validation_result { + Ok(res) => res, + Err(err) => match err { + ValidationError::Revert(revert) => { + return Err(SimulationError { + violation_error: vec![SimulationViolation::ValidationRevert(revert)].into(), + entity_infos: None, + }) + } + ValidationError::Other(err) => { + return Err(SimulationError { + violation_error: ViolationError::Other(err), + entity_infos: None, + }) + } + }, + }; + + let valid_until = if validation_result.return_info.valid_until == 0.into() { + u64::MAX.into() + } else { + validation_result.return_info.valid_until + }; + + let pre_op_gas = validation_result.return_info.pre_op_gas; + let valid_time_range = + ValidTimeRange::new(validation_result.return_info.valid_after, valid_until); + let requires_post_op = !validation_result.return_info.paymaster_context.is_empty(); + + let mut entity_infos = EntityInfos::default(); + entity_infos.set_sender(op.sender(), false); + if let Some(f) = op.factory() { + entity_infos.set_factory(f, false); + } + if let Some(p) = op.paymaster() { + entity_infos.set_paymaster(p, false); + } + if let Some(a) = validation_result.aggregator_info { + entity_infos.set_aggregator(a.address, false); + } + + let mut violations = vec![]; + + let aggregator = if let Some(aggregator_info) = validation_result.aggregator_info { + let agg_out = self + .entry_point + .validate_user_op_signature( + aggregator_info.address, + op, + self.sim_settings.max_verification_gas, + ) + .await?; + + match agg_out { + AggregatorOut::NotNeeded => None, + AggregatorOut::SuccessWithInfo(info) => Some(info), + AggregatorOut::ValidationReverted => { + violations.push(SimulationViolation::AggregatorValidationFailed); + None + } + } + } else { + None + }; + + if validation_result.return_info.account_sig_failed + || validation_result.return_info.paymaster_sig_failed + { + violations.push(SimulationViolation::InvalidSignature); + } + + if !violations.is_empty() { + Err(SimulationError { + violation_error: ViolationError::Violations(violations), + entity_infos: Some(entity_infos), + })? + } else { + Ok(SimulationResult { + mempools: vec![H256::zero()], + block_hash, + block_number, + pre_op_gas, + valid_time_range, + requires_post_op, + entity_infos, + aggregator, + ..Default::default() + }) + } + } +} diff --git a/crates/sim/src/simulation/v0_6/context.rs b/crates/sim/src/simulation/v0_6/context.rs new file mode 100644 index 00000000..913de1a7 --- /dev/null +++ b/crates/sim/src/simulation/v0_6/context.rs @@ -0,0 +1,362 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use std::{collections::HashSet, sync::Arc}; + +use ethers::{abi::AbiDecode, types::BlockId}; +use rundler_provider::{Provider, SimulationProvider}; +use rundler_types::{ + contracts::v0_6::i_entry_point::FailedOp, pool::SimulationViolation, v0_6::UserOperation, + EntityType, UserOperation as UserOperationTrait, ValidationOutput, +}; + +use super::{ + tracer::{SimulateValidationTracer, SimulateValidationTracerImpl}, + REQUIRED_VERIFICATION_GAS_LIMIT_BUFFER, +}; +use crate::{ + simulation::context::{ + self as sim_context, ValidationContext, + ValidationContextProvider as ValidationContextProviderTrait, + }, + SimulationSettings, ViolationError, +}; + +/// A provider for creating `ValidationContext` for entry point v0.6. +pub(crate) struct ValidationContextProvider { + simulate_validation_tracer: T, + sim_settings: SimulationSettings, +} + +#[async_trait::async_trait] +impl ValidationContextProviderTrait for ValidationContextProvider +where + T: SimulateValidationTracer, +{ + type UO = UserOperation; + + async fn get_context( + &self, + op: Self::UO, + block_id: BlockId, + ) -> Result, ViolationError> { + let factory_address = op.factory(); + let sender_address = op.sender; + let paymaster_address = op.paymaster(); + println!("HC simulation get_context op {:?}", op.clone()); + let tracer_out = self + .simulate_validation_tracer + .trace_simulate_validation(op.clone(), block_id) + .await?; + let num_phases = tracer_out.phases.len() as u32; + // Check if there are too many phases here, then check too few at the + // end. We are detecting cases where the entry point is broken. Too many + // phases definitely means it's broken, but too few phases could still + // mean the entry point is fine if one of the phases fails and it + // doesn't reach the end of execution. + if num_phases > 3 { + Err(ViolationError::Violations(vec![ + SimulationViolation::WrongNumberOfPhases(num_phases), + ]))? + } + let Some(ref revert_data) = tracer_out.revert_data else { + Err(ViolationError::Violations(vec![ + SimulationViolation::DidNotRevert, + ]))? + }; + let last_entity_type = + sim_context::entity_type_from_simulation_phase(tracer_out.phases.len() - 1).unwrap(); + + if let Ok(failed_op) = FailedOp::decode_hex(revert_data) { + let entity_addr = match last_entity_type { + EntityType::Factory => factory_address, + EntityType::Paymaster => paymaster_address, + EntityType::Account => Some(sender_address), + _ => None, + }; + Err(ViolationError::Violations(vec![ + SimulationViolation::UnintendedRevertWithMessage( + last_entity_type, + failed_op.reason, + entity_addr, + ), + ]))? + } + let Ok(entry_point_out) = ValidationOutput::decode_v0_6_hex(revert_data) else { + let entity_addr = match last_entity_type { + EntityType::Factory => factory_address, + EntityType::Paymaster => paymaster_address, + EntityType::Account => Some(sender_address), + _ => None, + }; + Err(ViolationError::Violations(vec![ + SimulationViolation::UnintendedRevert(last_entity_type, entity_addr), + ]))? + }; + let entity_infos = sim_context::infos_from_validation_output( + factory_address, + sender_address, + paymaster_address, + &entry_point_out, + &self.sim_settings, + ); + + let associated_addresses = tracer_out.associated_slots_by_address.addresses(); + let has_factory = op.factory().is_some(); + Ok(ValidationContext { + op, + block_id, + entity_infos, + tracer_out, + entry_point_out, + associated_addresses, + accessed_addresses: HashSet::new(), + has_factory, + }) + } + + fn get_specific_violations( + &self, + context: &ValidationContext, + ) -> Vec { + let mut violations = vec![]; + + let &ValidationContext { + entry_point_out, + op, + .. + } = &context; + println!("HC trace entry_point_out {:?}", entry_point_out); + + if context.op.paymaster().is_some() + && !entry_point_out.return_info.paymaster_context.is_empty() + && !context.entity_infos.paymaster.unwrap().is_staked + { + // [EREP-050] (only v0.6) + violations.push(SimulationViolation::UnstakedPaymasterContext); + } + + // v0.6 doesn't distinguish between the different types of signature failures + // both of these will be set to true if the signature failed. + if entry_point_out.return_info.account_sig_failed + || entry_point_out.return_info.paymaster_sig_failed + { + violations.push(SimulationViolation::InvalidSignature); + } + + // This is a special case to cover a bug in the 0.6 entrypoint contract where a specially + // crafted UO can use extra verification gas that isn't caught during simulation, but when + // it runs on chain causes the transaction to revert. + let verification_gas_used = entry_point_out + .return_info + .pre_op_gas + .saturating_sub(op.pre_verification_gas()); + let verification_buffer = op + .total_verification_gas_limit() + .saturating_sub(verification_gas_used); + if verification_buffer < REQUIRED_VERIFICATION_GAS_LIMIT_BUFFER { + violations.push(SimulationViolation::VerificationGasLimitBufferTooLow( + op.total_verification_gas_limit(), + verification_gas_used + REQUIRED_VERIFICATION_GAS_LIMIT_BUFFER, + )); + } + + violations + } +} + +impl ValidationContextProvider> +where + P: Provider, + E: SimulationProvider, +{ + /// Creates a new `ValidationContextProvider` for entry point v0.6 with the given provider and entry point. + pub(crate) fn new(provider: Arc

, entry_point: E, sim_settings: SimulationSettings) -> Self { + Self { + simulate_validation_tracer: SimulateValidationTracerImpl::new( + provider, + entry_point, + sim_settings.max_verification_gas, + sim_settings.tracer_timeout.clone(), + ), + sim_settings, + } + } +} + +#[cfg(test)] +mod tests { + use std::{collections::HashMap, str::FromStr}; + + use ethers::{ + abi::AbiEncode, + types::{Address, Bytes, U256}, + utils::hex, + }; + use rundler_types::{contracts::v0_6::i_entry_point::FailedOp, v0_6::UserOperation, Opcode}; + use sim_context::ContractInfo; + + use super::*; + use crate::simulation::context::{Phase, TracerOutput}; + + fn get_test_tracer_output() -> TracerOutput { + TracerOutput { + accessed_contracts: HashMap::from([ + ( + Address::from_str("0x5ff137d4b0fdcd49dca30c7cf57e578a026d2789").unwrap(), + ContractInfo { + header: "0x608060".to_string(), + opcode: Opcode::CALL, + length: 32, + } + ), + ( + Address::from_str("0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4").unwrap(), + ContractInfo { + header: "0x608060".to_string(), + opcode: Opcode::CALL, + length: 32, + } + ), + ( + Address::from_str("0x8abb13360b87be5eeb1b98647a016add927a136c").unwrap(), + ContractInfo { + header: "0x608060".to_string(), + opcode: Opcode::CALL, + length: 32, + } + ), + ]), + associated_slots_by_address: serde_json::from_str(r#" + { + "0x0000000000000000000000000000000000000000": [ + "0xd5c1ebdd81c5c7bebcd52bc11c8d37f7038b3c64f849c2ca58a022abeab1adae", + "0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5" + ], + "0xb856dbd4fa1a79a46d426f537455e7d3e79ab7c4": [ + "0x3072884cc37d411af7360b34f105e1e860b1631783232a4f2d5c094d365cdaab", + "0xf5357e1da3acf909ceaed3492183cbad85a3c9e1f0076495f66d3eed05219bd5", + "0xf264fff4db20d04721712f34a6b5a8bca69a212345e40a92101082e79bdd1f0a" + ] + } + "#).unwrap(), + factory_called_create2_twice: false, + expected_storage: serde_json::from_str(r#" + { + "0x5ff137d4b0fdcd49dca30c7cf57e578a026d2789": { + "0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb5": "0x0000000000000000000000000000000000000000000000000000000000000000", + "0xad3228b676f7d3cd4284a5443f17f1962b36e491b30a40b2405849e597ba5fb6": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + } + "#).unwrap(), + phases: vec![ + Phase { + called_banned_entry_point_method: false, + called_non_entry_point_with_value: false, + forbidden_opcodes_used: vec![], + forbidden_precompiles_used: vec![], + ran_out_of_gas: false, + storage_accesses: HashMap::new(), + undeployed_contract_accesses: vec![], + ext_code_access_info: HashMap::new(), + }, + Phase { + called_banned_entry_point_method: false, + called_non_entry_point_with_value: true, + forbidden_opcodes_used: vec![], + forbidden_precompiles_used: vec![], + ran_out_of_gas: false, + storage_accesses: HashMap::new(), + undeployed_contract_accesses: vec![], + ext_code_access_info: HashMap::new(), + }, + Phase { + called_banned_entry_point_method: false, + called_non_entry_point_with_value: false, + forbidden_opcodes_used: vec![], + forbidden_precompiles_used: vec![], + ran_out_of_gas: false, + storage_accesses: HashMap::new(), + undeployed_contract_accesses: vec![], + ext_code_access_info: HashMap::new(), + } + ], + revert_data: Some("0xe0cff05f00000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000014eff00000000000000000000000000000000000000000000000000000b7679c50c24000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffff00000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000".into()), + } + } + + mockall::mock! { + Tracer {} + + #[async_trait::async_trait] + impl SimulateValidationTracer for Tracer { + async fn trace_simulate_validation( + &self, + op: UserOperation, + block_id: BlockId, + ) -> anyhow::Result; + } + } + + #[tokio::test] + async fn test_create_context_two_phases_unintended_revert() { + let mut tracer = MockTracer::new(); + + tracer.expect_trace_simulate_validation().returning(|_, _| { + let mut tracer_output = get_test_tracer_output(); + tracer_output.revert_data = Some(hex::encode( + FailedOp { + op_index: U256::from(100), + reason: "AA23 reverted (or OOG)".to_string(), + } + .encode(), + )); + Ok(tracer_output) + }); + + let user_operation = UserOperation { + sender: Address::from_str("b856dbd4fa1a79a46d426f537455e7d3e79ab7c4").unwrap(), + nonce: U256::from(264), + init_code: Bytes::from_str("0x").unwrap(), + call_data: Bytes::from_str("0xb61d27f6000000000000000000000000b856dbd4fa1a79a46d426f537455e7d3e79ab7c4000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000004d087d28800000000000000000000000000000000000000000000000000000000").unwrap(), + call_gas_limit: U256::from(9100), + verification_gas_limit: U256::from(64805), + pre_verification_gas: U256::from(46128), + max_fee_per_gas: U256::from(105000100), + max_priority_fee_per_gas: U256::from(105000000), + paymaster_and_data: Bytes::from_str("0x").unwrap(), + signature: Bytes::from_str("0x98f89993ce573172635b44ef3b0741bd0c19dd06909d3539159f6d66bef8c0945550cc858b1cf5921dfce0986605097ba34c2cf3fc279154dd25e161ea7b3d0f1c").unwrap(), + }; + + let context = ValidationContextProvider { + simulate_validation_tracer: tracer, + sim_settings: Default::default(), + }; + + let res = context + .get_context(user_operation.clone(), BlockId::Number(0.into())) + .await; + + assert!(matches!( + res, + Err(ViolationError::Violations(violations)) if matches!( + violations.first(), + Some(&SimulationViolation::UnintendedRevertWithMessage( + EntityType::Paymaster, + ref reason, + _ + )) if reason == "AA23 reverted (or OOG)" + ) + )); + } +} diff --git a/crates/provider/src/ethers/stake_manager.rs b/crates/sim/src/simulation/v0_6/mod.rs similarity index 60% rename from crates/provider/src/ethers/stake_manager.rs rename to crates/sim/src/simulation/v0_6/mod.rs index a2cc9073..c0e4a868 100644 --- a/crates/provider/src/ethers/stake_manager.rs +++ b/crates/sim/src/simulation/v0_6/mod.rs @@ -11,18 +11,12 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -use anyhow::Result; -use ethers::{providers::Middleware, types::Address}; -use rundler_types::{contracts::i_stake_manager::IStakeManager, DepositInfo}; +use ethers::types::U256; -use crate::StakeManager; +mod context; +pub(crate) use context::ValidationContextProvider; -#[async_trait::async_trait] -impl StakeManager for IStakeManager -where - M: Middleware + 'static, -{ - async fn get_deposit_info(&self, address: Address) -> Result { - Ok(IStakeManager::get_deposit_info(self, address).await?) - } -} +mod tracer; + +/// Required buffer for verification gas limit when targeting the 0.6 entrypoint contract +pub(crate) const REQUIRED_VERIFICATION_GAS_LIMIT_BUFFER: U256 = U256([2000, 0, 0, 0]); diff --git a/crates/sim/src/simulation/v0_6/tracer.rs b/crates/sim/src/simulation/v0_6/tracer.rs new file mode 100644 index 00000000..56392d55 --- /dev/null +++ b/crates/sim/src/simulation/v0_6/tracer.rs @@ -0,0 +1,124 @@ +// This file is part of Rundler. // +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use std::{convert::TryFrom, fmt::Debug, sync::Arc}; + +use anyhow::bail; +use async_trait::async_trait; +use ethers::types::{ + BlockId, GethDebugTracerType, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, +}; +use rundler_provider::{Provider, SimulationProvider}; +use rundler_types::UserOperation as UserOperation2; +use rundler_types::v0_6::UserOperation; +use serde::Deserialize; + +use crate::simulation::context::TracerOutput; +use rundler_types::hybrid_compute; + +impl TryFrom for TracerOutput { + type Error = anyhow::Error; + fn try_from(trace: GethTrace) -> Result { + match trace { + GethTrace::Unknown(value) => Ok(TracerOutput::deserialize(&value)?), + GethTrace::Known(_) => { + bail!("Failed to deserialize simulation trace") + } + } + } +} + +/// Trait for tracing the simulation of a user operation. +#[async_trait] +pub(super) trait SimulateValidationTracer: Send + Sync + 'static { + /// Traces the simulation of a user operation. + async fn trace_simulate_validation( + &self, + op: UserOperation, + block_id: BlockId, + ) -> anyhow::Result; +} + +/// Tracer implementation for the bundler's custom tracer. +#[derive(Debug)] +pub(crate) struct SimulateValidationTracerImpl { + provider: Arc

, + entry_point: E, + max_validation_gas: u64, + tracer_timeout: String, +} + +/// Runs the bundler's custom tracer on the entry point's `simulateValidation` +/// method for the provided user operation. + +#[async_trait] +impl SimulateValidationTracer for SimulateValidationTracerImpl +where + P: Provider, + E: SimulationProvider, +{ + async fn trace_simulate_validation( + &self, + op: UserOperation, + block_id: BlockId, + ) -> anyhow::Result { + let (tx, state_override) = self + .entry_point + .get_tracer_simulate_validation_call(op.clone(), self.max_validation_gas); + + let hh = op.clone().hc_hash(); + println!("HC tracer.rs debug_trace_call hh {:?}", hh); + let s2 = hybrid_compute::get_hc_op_statediff(hh, state_override); + + println!("HC trace2 pre {:?} {:?}", op, tx); + + TracerOutput::try_from( + self.provider + .debug_trace_call( + tx, + Some(block_id), + GethDebugTracingCallOptions { + tracing_options: GethDebugTracingOptions { + tracer: Some(GethDebugTracerType::JsTracer( + validation_tracer_js().to_string(), + )), + timeout: Some(self.tracer_timeout.clone()), + ..Default::default() + }, + state_overrides: Some(s2), + }, + ) + .await?, + ) + } +} + +impl SimulateValidationTracerImpl { + /// Creates a new instance of the bundler's custom tracer. + pub(crate) fn new( + provider: Arc

, + entry_point: E, + max_validation_gas: u64, + tracer_timeout: String, + ) -> Self { + Self { + provider, + entry_point, + max_validation_gas, + tracer_timeout, + } + } +} + +fn validation_tracer_js() -> &'static str { + include_str!("../../../tracer/dist/validationTracerV0_6.js").trim_end_matches(";export{};") +} diff --git a/crates/sim/src/simulation/v0_7/context.rs b/crates/sim/src/simulation/v0_7/context.rs new file mode 100644 index 00000000..26ff9cef --- /dev/null +++ b/crates/sim/src/simulation/v0_7/context.rs @@ -0,0 +1,501 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use std::{ + collections::{BTreeSet, HashMap, HashSet}, + sync::Arc, +}; + +use anyhow::{bail, Context}; +use ethers::{ + abi::AbiDecode, + types::{Address, BlockId, Bytes, H160, U256}, + utils::{hex::FromHex, keccak256}, +}; +use rundler_provider::{EntryPoint, Provider, SimulationProvider}; +use rundler_types::{ + contracts::v0_7::{ + entry_point_simulations::{FailedOpWithRevert, SimulateValidationReturn}, + i_entry_point::FailedOp, + }, + pool::SimulationViolation, + v0_7::UserOperation, + EntityInfos, EntityType, Opcode, UserOperation as UserOperationTrait, ValidationOutput, + ValidationRevert, +}; +use rundler_utils::eth::ContractRevertError; + +use super::tracer::{ + CallInfo, ExitType, MethodInfo, SimulateValidationTracer, SimulateValidationTracerImpl, + TopLevelCallInfo, TracerOutput, +}; +use crate::{ + simulation::context::{ + self as sim_context, AccessInfo, AssociatedSlotsByAddress, Phase, + TracerOutput as ContextTracerOutput, ValidationContext, + ValidationContextProvider as ValidationContextProviderTrait, + }, + SimulationSettings, ViolationError, +}; + +// Banned opcodes +// +// Some banned opcodes (i.e. CREATE2) have special handling and aren't on this list. +const BANNED_OPCODES: &[Opcode] = &[ + Opcode::GAS, + Opcode::GASPRICE, + Opcode::GASLIMIT, + Opcode::DIFFICULTY, + Opcode::TIMESTAMP, + Opcode::BASEFEE, + Opcode::BLOCKHASH, + Opcode::BLOBBASEFEE, + Opcode::BLOBHASH, + Opcode::NUMBER, + Opcode::SELFBALANCE, + Opcode::BALANCE, + Opcode::ORIGIN, + Opcode::CREATE, + Opcode::COINBASE, + Opcode::SELFDESTRUCT, +]; + +// Pre calculated method signatures +const SIMULATE_VALIDATION_METHOD: &str = "0xee219423"; +const CREATE_SENDER_METHOD: &str = "0x570e1a36"; +const VALIDATE_USER_OP_METHOD: &str = "0x19822f7c"; +const VALIDATE_PAYMASTER_USER_OP_METHOD: &str = "0x52b7512c"; +const DEPOSIT_TO_METHOD: &str = "0xb760faf9"; +// Max precompile address 0x10000 +const MAX_PRECOMPILE_ADDRESS: Address = + H160([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]); + +/// A provider for creating `ValidationContext` for entry point v0.7. +pub(crate) struct ValidationContextProvider { + simulate_validation_tracer: T, + sim_settings: SimulationSettings, + entry_point_address: Address, +} + +#[async_trait::async_trait] +impl ValidationContextProviderTrait for ValidationContextProvider +where + T: SimulateValidationTracer, +{ + type UO = UserOperation; + + async fn get_context( + &self, + op: Self::UO, + block_id: BlockId, + ) -> Result, ViolationError> { + let tracer_out = self + .simulate_validation_tracer + .trace_simulate_validation(op.clone(), block_id) + .await?; + + let call_stack = self.parse_call_stack(tracer_out.calls.clone())?; + + let top = call_stack.last().context("No calls in call stack")?; + if top.to != self.entry_point_address { + Err(anyhow::anyhow!( + "Top call in call stack is not to entry point" + ))? + } + if top.method != SIMULATE_VALIDATION_METHOD { + Err(anyhow::anyhow!( + "Top call in call stack is not to simulateValidation" + ))? + } + + let mut entry_point_out = match self.parse_top_call(top)? { + Ok(validation_output) => validation_output, + Err(revert) => Err(ViolationError::Violations(vec![ + SimulationViolation::ValidationRevert(revert), + ]))?, + }; + if entry_point_out.return_info.valid_until == 0.into() { + entry_point_out.return_info.valid_until = u64::MAX.into(); + } + + let entity_infos = sim_context::infos_from_validation_output( + op.factory(), + op.sender(), + op.paymaster(), + &entry_point_out, + &self.sim_settings, + ); + + let mut tracer_out = self.parse_tracer_out(&op, tracer_out)?; + + // Check the call stack for calls with value or to the entry point + for (i, call) in call_stack.iter().enumerate() { + if call.to == self.entry_point_address + && (call.from != self.entry_point_address && call.from != Address::zero()) + { + // [OP-053] - can only call fallback from sender + if call.method == "0x" && call.from == op.sender() { + continue; + } + // [OP-052] - can only call depositTo() from sender or factory + if call.method == DEPOSIT_TO_METHOD + && (call.from == op.sender() || Some(call.from) == op.factory()) + { + continue; + } + + // [OP-054] all other calls to entry point are banned + let phase = Self::get_nearest_entity_phase(&call_stack[i..], &entity_infos); + tracer_out.phases[phase].called_banned_entry_point_method = true; + } + + // [OP-061] calls with value are banned, except for the calls above + if call.value.is_some_and(|v| v != U256::zero()) { + let phase = Self::get_nearest_entity_phase(&call_stack[i..], &entity_infos); + tracer_out.phases[phase].called_non_entry_point_with_value = true; + } + } + + Ok(ValidationContext { + has_factory: op.factory().is_some(), + op, + block_id, + entity_infos, + entry_point_out, + accessed_addresses: HashSet::new(), + associated_addresses: tracer_out.associated_slots_by_address.addresses(), + tracer_out, + }) + } + + /// Get the violations specific to the particular entry point this provider targets. + fn get_specific_violations( + &self, + context: &ValidationContext, + ) -> Vec { + let mut violations = vec![]; + + let &ValidationContext { + entry_point_out, .. + } = &context; + + if entry_point_out.return_info.account_sig_failed { + violations.push(SimulationViolation::InvalidAccountSignature); + } + if entry_point_out.return_info.paymaster_sig_failed { + violations.push(SimulationViolation::InvalidPaymasterSignature); + } + + violations + } +} + +#[derive(Debug)] +#[allow(unused)] +struct CallWithResult { + call_type: Opcode, + method: String, + to: Address, + from: Address, + value: Option, + gas: u64, + gas_used: u64, + exit_type: ExitType, + exit_data: String, +} + +impl ValidationContextProvider { + fn parse_call_stack(&self, mut calls: Vec) -> anyhow::Result> { + let mut call_stack = Vec::new(); + let mut ret: Vec = Vec::new(); + let last_exit_info = calls.pop().context("call stack had no calls")?; + for call in calls { + match call { + CallInfo::Exit(exit_info) => { + let method_info: MethodInfo = call_stack + .pop() + .context("unbalanced call stack, exit without method call")?; + ret.push(CallWithResult { + call_type: method_info.method_type, + method: method_info.method, + to: method_info.to, + from: method_info.from, + value: method_info.value, + gas: method_info.gas, + gas_used: exit_info.gas_used, + exit_type: exit_info.exit_type, + exit_data: exit_info.data, + }); + } + CallInfo::Method(info) => { + call_stack.push(info); + } + } + } + + // final call is simulate handle ops, but is not part of the call stack + match last_exit_info { + CallInfo::Exit(exit_info) => { + ret.push(CallWithResult { + call_type: Opcode::CALL, + method: SIMULATE_VALIDATION_METHOD.to_string(), + to: self.entry_point_address, + from: Address::zero(), + value: None, + gas: 0, + gas_used: exit_info.gas_used, + exit_type: exit_info.exit_type, + exit_data: exit_info.data, + }); + } + CallInfo::Method(info) => { + bail!("Final call stack entry is not an exit: {info:?}") + } + } + + Ok(ret) + } + + fn parse_top_call( + &self, + top: &CallWithResult, + ) -> anyhow::Result> { + match top.exit_type { + ExitType::Revert => { + if let Ok(result) = FailedOpWithRevert::decode_hex(top.exit_data.clone()) { + let inner_revert_reason = ContractRevertError::decode(&result.inner) + .ok() + .map(|inner_result| inner_result.reason); + Ok(Err(ValidationRevert::Operation { + entry_point_reason: result.reason, + inner_revert_data: result.inner, + inner_revert_reason, + })) + } else if let Ok(failed_op) = FailedOp::decode_hex(top.exit_data.clone()) { + Ok(Err(ValidationRevert::EntryPoint(failed_op.reason))) + } else if let Ok(err) = ContractRevertError::decode_hex(top.exit_data.clone()) { + Ok(Err(ValidationRevert::EntryPoint(err.reason))) + } else { + Ok(Err(ValidationRevert::Unknown( + Bytes::from_hex(top.exit_data.clone()) + .context("failed to parse exit data has hex")?, + ))) + } + } + ExitType::Return => { + let b = Bytes::from_hex(top.exit_data.clone()) + .context("faled to parse exit data as hex")?; + if let Ok(res) = SimulateValidationReturn::decode(&b) { + Ok(Ok(res.0.into())) + } else { + bail!("Failed to decode validation output {}", top.exit_data); + } + } + } + } + + fn parse_tracer_out( + &self, + op: &UserOperation, + tracer_out: TracerOutput, + ) -> anyhow::Result { + let mut phases = vec![Phase::default(); 3]; + let mut factory_called_create2_twice = false; + + // Check factory + if let Some(call_from_entry_point) = tracer_out + .calls_from_entry_point + .iter() + .find(|c| c.top_level_method_sig == CREATE_SENDER_METHOD) + { + phases[0] = Self::parse_call_to_phase(call_from_entry_point, EntityType::Factory); + // [OP-031] - create call can only be called once + if let Some(count) = call_from_entry_point.opcodes.get(&Opcode::CREATE2) { + if *count > 1 { + factory_called_create2_twice = true; + } + } + } + + // Check account + if let Some(call_from_entry_point) = tracer_out + .calls_from_entry_point + .iter() + .find(|c| c.top_level_method_sig == VALIDATE_USER_OP_METHOD) + { + phases[1] = Self::parse_call_to_phase(call_from_entry_point, EntityType::Account); + } + + // Check paymaster + if let Some(call_from_entry_point) = tracer_out + .calls_from_entry_point + .iter() + .find(|c| c.top_level_method_sig == VALIDATE_PAYMASTER_USER_OP_METHOD) + { + phases[2] = Self::parse_call_to_phase(call_from_entry_point, EntityType::Paymaster); + } + + // Accessed contracts + let accessed_contracts = tracer_out + .calls_from_entry_point + .iter() + .flat_map(|call| call.contract_info.clone()) + .collect(); + + // Associated slots + let factory = op + .factory() + .map(|f| (f, format!("0x000000000000000000000000{f:x}"))); + let paymaster = op + .paymaster() + .map(|p| (p, format!("0x000000000000000000000000{p:x}"))); + let sender = ( + op.sender(), + format!("0x000000000000000000000000{:x}", op.sender()), + ); + + let mut associated_slots_by_address: HashMap> = HashMap::new(); + for k in &tracer_out.keccak { + if let Some((f, addr)) = &factory { + Self::check_associated_slot(addr, *f, k, &mut associated_slots_by_address)?; + } + if let Some((p, addr)) = &paymaster { + Self::check_associated_slot(addr, *p, k, &mut associated_slots_by_address)?; + } + Self::check_associated_slot(&sender.1, sender.0, k, &mut associated_slots_by_address)?; + } + + Ok(ContextTracerOutput { + phases, + revert_data: None, + accessed_contracts, + associated_slots_by_address: AssociatedSlotsByAddress(associated_slots_by_address), + factory_called_create2_twice, + expected_storage: tracer_out.expected_storage, + }) + } + + fn parse_call_to_phase(call: &TopLevelCallInfo, entity_type: EntityType) -> Phase { + // [OP-011] - banned opcodes + // [OP-012] - tracer will not add GAS to list if followed by *CALL + let mut forbidden_opcodes_used = vec![]; + for opcode in call.opcodes.keys() { + if BANNED_OPCODES.contains(opcode) + || (*opcode == Opcode::CREATE2 && entity_type != EntityType::Factory) + // [OP-031] - CREATE2 allowed by factory + { + forbidden_opcodes_used + .push(format!("{}:{}", call.top_level_target_address, opcode)); + } + } + + let storage_accesses = call + .access + .iter() + .map(|(address, info)| { + let reads = info.reads.iter().map(|(slot, value)| (*slot, *value)); + let writes = info.writes.iter().map(|(slot, count)| (*slot, *count)); + ( + *address, + AccessInfo { + reads: reads.collect(), + writes: writes.collect(), + }, + ) + }) + .collect(); + + let mut forbidden_precompiles_used = vec![]; + let mut undeployed_contract_accesses = vec![]; + call.contract_info.iter().for_each(|(address, info)| { + if info.length == 0 { + if *address < MAX_PRECOMPILE_ADDRESS { + // [OP-062] - banned precompiles + // The tracer catches any allowed precompiles and does not add them to this list + forbidden_precompiles_used + .push(format!("{}:{}", call.top_level_target_address, *address,)); + } else { + // [OP-041] + undeployed_contract_accesses.push(*address); + } + } + }); + + Phase { + forbidden_opcodes_used, + forbidden_precompiles_used, + storage_accesses, + called_banned_entry_point_method: false, // set during call stack parsing + called_non_entry_point_with_value: false, // set during call stack parsing + // [OP-020] + ran_out_of_gas: call.oog.unwrap_or(false), + undeployed_contract_accesses, + ext_code_access_info: call.ext_code_access_info.clone(), + } + } + + fn check_associated_slot( + addr_str: &str, + addr: Address, + k: &str, + associated_slots: &mut HashMap>, + ) -> anyhow::Result<()> { + if k.starts_with(addr_str) { + associated_slots.entry(addr).or_default().insert( + keccak256(Bytes::from_hex(k).context("failed to parse keccak as hex")?).into(), + ); + } + Ok(()) + } + + fn get_nearest_entity_phase(calls: &[CallWithResult], entities: &EntityInfos) -> usize { + // Call stack is ordered in order in which calls complete. + // To attribute a particular call to an entity, scan from that call forward until + // an entity address is found. + // If no entity address is found, attribute to the account, as the account must exist. + calls + .iter() + .find_map(|c| entities.type_from_address(c.to)) + .map(entity_type_to_phase) + .unwrap_or(1) + } +} + +fn entity_type_to_phase(entity_type: EntityType) -> usize { + match entity_type { + EntityType::Factory => 0, + EntityType::Account => 1, + EntityType::Paymaster => 2, + EntityType::Aggregator => 1, // map aggregator to account + } +} + +impl ValidationContextProvider> +where + P: Provider, + E: EntryPoint + SimulationProvider, +{ + /// Creates a new `ValidationContextProvider` for entry point v0.7 with the given provider and entry point. + pub(crate) fn new(provider: Arc

, entry_point: E, sim_settings: SimulationSettings) -> Self { + Self { + entry_point_address: entry_point.address(), + simulate_validation_tracer: SimulateValidationTracerImpl::new( + provider, + entry_point, + sim_settings.max_verification_gas, + sim_settings.tracer_timeout.clone(), + ), + sim_settings, + } + } +} diff --git a/crates/sim/src/simulation/v0_7/mod.rs b/crates/sim/src/simulation/v0_7/mod.rs new file mode 100644 index 00000000..3550e2db --- /dev/null +++ b/crates/sim/src/simulation/v0_7/mod.rs @@ -0,0 +1,17 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +mod context; +pub(crate) use context::ValidationContextProvider; + +mod tracer; diff --git a/crates/sim/src/simulation/v0_7/tracer.rs b/crates/sim/src/simulation/v0_7/tracer.rs new file mode 100644 index 00000000..51aea7c1 --- /dev/null +++ b/crates/sim/src/simulation/v0_7/tracer.rs @@ -0,0 +1,191 @@ +// This file is part of Rundler. // +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use std::{collections::HashMap, convert::TryFrom, fmt::Debug, sync::Arc}; + +use anyhow::bail; +use async_trait::async_trait; +use ethers::types::{ + Address, BlockId, GethDebugTracerType, GethDebugTracingCallOptions, GethDebugTracingOptions, + GethTrace, U256, +}; +use rundler_provider::{Provider, SimulationProvider}; +use rundler_types::{v0_7::UserOperation, Opcode}; +use serde::Deserialize; + +use crate::{simulation::context::ContractInfo, ExpectedStorage}; + +#[derive(Clone, Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +#[allow(unused)] +pub(super) struct TracerOutput { + pub(super) calls_from_entry_point: Vec, + pub(super) keccak: Vec, + pub(super) calls: Vec, + pub(super) expected_storage: ExpectedStorage, + pub(super) logs: Vec, + pub(super) debug: Option>, +} + +#[derive(Clone, Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +pub(super) struct TopLevelCallInfo { + pub(super) top_level_method_sig: String, + pub(super) top_level_target_address: String, + pub(super) opcodes: HashMap, + pub(super) access: HashMap, + pub(super) contract_info: HashMap, + pub(super) ext_code_access_info: HashMap, + pub(super) oog: Option, +} + +#[derive(Clone, Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +pub(super) struct AccessInfo { + pub(super) reads: HashMap, + pub(super) writes: HashMap, +} + +#[derive(Clone, Debug, Deserialize)] +#[serde(rename_all = "camelCase", untagged)] +pub(super) enum CallInfo { + Exit(ExitInfo), + Method(MethodInfo), +} + +#[derive(Clone, Debug, Deserialize)] +pub(super) struct ExitInfo { + #[serde(rename = "type")] + pub(super) exit_type: ExitType, + #[serde(rename = "gasUsed")] + pub(super) gas_used: u64, + pub(super) data: String, +} + +#[derive(Clone, Debug, Deserialize)] +#[serde(rename_all = "UPPERCASE")] +pub(super) enum ExitType { + Return, + Revert, +} + +#[derive(Clone, Debug, Deserialize)] +pub(super) struct MethodInfo { + #[serde(rename = "type")] + pub(super) method_type: Opcode, + pub(super) from: Address, + pub(super) to: Address, + pub(super) method: String, + pub(super) value: Option, + pub(super) gas: u64, +} + +#[derive(Clone, Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +#[allow(unused)] +pub(super) struct LogInfo { + pub(super) topics: Vec, + pub(super) data: String, +} + +impl TryFrom for TracerOutput { + type Error = anyhow::Error; + fn try_from(trace: GethTrace) -> Result { + match trace { + GethTrace::Unknown(value) => Ok(TracerOutput::deserialize(&value)?), + GethTrace::Known(_) => { + bail!("Failed to deserialize simulation trace") + } + } + } +} + +/// Trait for tracing the simulation of a user operation. +#[async_trait] +pub(super) trait SimulateValidationTracer: Send + Sync + 'static { + /// Traces the simulation of a user operation. + async fn trace_simulate_validation( + &self, + op: UserOperation, + block_id: BlockId, + ) -> anyhow::Result; +} + +/// Tracer implementation for the bundler's custom tracer. +#[derive(Debug)] +pub(crate) struct SimulateValidationTracerImpl { + provider: Arc

, + entry_point: E, + max_validation_gas: u64, + tracer_timeout: String, +} + +/// Runs the bundler's custom tracer on the entry point's `simulateValidation` +/// method for the provided user operation. + +#[async_trait] +impl SimulateValidationTracer for SimulateValidationTracerImpl +where + P: Provider, + E: SimulationProvider, +{ + async fn trace_simulate_validation( + &self, + op: UserOperation, + block_id: BlockId, + ) -> anyhow::Result { + let (tx, state_override) = self + .entry_point + .get_tracer_simulate_validation_call(op, self.max_validation_gas); + + let out = self + .provider + .debug_trace_call( + tx, + Some(block_id), + GethDebugTracingCallOptions { + tracing_options: GethDebugTracingOptions { + tracer: Some(GethDebugTracerType::JsTracer( + validation_tracer_js().to_string(), + )), + timeout: Some(self.tracer_timeout.clone()), + ..Default::default() + }, + state_overrides: Some(state_override), + }, + ) + .await?; + + TracerOutput::try_from(out) + } +} + +impl SimulateValidationTracerImpl { + /// Creates a new instance of the bundler's custom tracer. + pub(crate) fn new( + provider: Arc

, + entry_point: E, + max_validation_gas: u64, + tracer_timeout: String, + ) -> Self { + Self { + provider, + entry_point, + max_validation_gas, + tracer_timeout, + } + } +} + +fn validation_tracer_js() -> &'static str { + include_str!("../../../tracer/dist/validationTracerV0_7.js").trim_end_matches(";export{};") +} diff --git a/crates/sim/src/simulation/validation_results.rs b/crates/sim/src/simulation/validation_results.rs deleted file mode 100644 index 3699596f..00000000 --- a/crates/sim/src/simulation/validation_results.rs +++ /dev/null @@ -1,142 +0,0 @@ -// This file is part of Rundler. -// -// Rundler is free software: you can redistribute it and/or modify it under the -// terms of the GNU Lesser General Public License as published by the Free Software -// Foundation, either version 3 of the License, or (at your option) any later version. -// -// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; -// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -// See the GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License along with Rundler. -// If not, see https://www.gnu.org/licenses/. - -use ethers::{ - abi, - abi::{AbiDecode, AbiError}, - types::{Address, Bytes, U256}, -}; -use rundler_types::{ - contracts::entry_point::{ValidationResult, ValidationResultWithAggregation}, - Timestamp, -}; - -/// Equivalent to the generated `ValidationResult` or -/// `ValidationResultWithAggregation` from `EntryPoint`, but with named structs -/// instead of tuples and with a helper for deserializing. -#[derive(Debug)] -pub(crate) struct ValidationOutput { - pub(crate) return_info: ValidationReturnInfo, - pub(crate) sender_info: StakeInfo, - pub(crate) factory_info: StakeInfo, - pub(crate) paymaster_info: StakeInfo, - pub(crate) aggregator_info: Option, -} - -impl AbiDecode for ValidationOutput { - fn decode(bytes: impl AsRef<[u8]>) -> Result { - if let Ok(result) = ValidationResult::decode(bytes.as_ref()) { - return Ok(result.into()); - } - if let Ok(result) = ValidationResultWithAggregation::decode(bytes) { - return Ok(result.into()); - } - Err(AbiError::DecodingError(abi::Error::InvalidData)) - } -} - -impl From for ValidationOutput { - fn from(value: ValidationResult) -> Self { - let ValidationResult { - return_info, - sender_info, - factory_info, - paymaster_info, - } = value; - Self { - return_info: return_info.into(), - sender_info: sender_info.into(), - factory_info: factory_info.into(), - paymaster_info: paymaster_info.into(), - aggregator_info: None, - } - } -} - -impl From for ValidationOutput { - fn from(value: ValidationResultWithAggregation) -> Self { - let ValidationResultWithAggregation { - return_info, - sender_info, - factory_info, - paymaster_info, - aggregator_info, - } = value; - Self { - return_info: return_info.into(), - sender_info: sender_info.into(), - factory_info: factory_info.into(), - paymaster_info: paymaster_info.into(), - aggregator_info: Some(aggregator_info.into()), - } - } -} - -#[derive(Debug)] -pub(crate) struct ValidationReturnInfo { - pub(crate) pre_op_gas: U256, - pub(crate) sig_failed: bool, - pub(crate) valid_after: Timestamp, - pub(crate) valid_until: Timestamp, - pub(crate) paymaster_context: Bytes, -} - -impl From<(U256, U256, bool, u64, u64, Bytes)> for ValidationReturnInfo { - fn from(value: (U256, U256, bool, u64, u64, Bytes)) -> Self { - let ( - pre_op_gas, - _, /* prefund */ - sig_failed, - valid_after, - valid_until, - paymaster_context, - ) = value; - Self { - pre_op_gas, - sig_failed, - valid_after: valid_after.into(), - valid_until: valid_until.into(), - paymaster_context, - } - } -} - -#[derive(Clone, Copy, Debug)] -pub(crate) struct StakeInfo { - pub(crate) stake: U256, - pub(crate) unstake_delay_sec: U256, -} - -impl From<(U256, U256)> for StakeInfo { - fn from((stake, unstake_delay_sec): (U256, U256)) -> Self { - Self { - stake, - unstake_delay_sec, - } - } -} - -#[derive(Clone, Copy, Debug)] -pub(crate) struct AggregatorInfo { - pub(crate) address: Address, - pub(crate) stake_info: StakeInfo, -} - -impl From<(Address, (U256, U256))> for AggregatorInfo { - fn from((address, stake_info): (Address, (U256, U256))) -> Self { - Self { - address, - stake_info: stake_info.into(), - } - } -} diff --git a/crates/sim/src/types.rs b/crates/sim/src/types.rs index e1612604..3c45e139 100644 --- a/crates/sim/src/types.rs +++ b/crates/sim/src/types.rs @@ -14,13 +14,13 @@ use std::collections::{btree_map, BTreeMap}; use anyhow::bail; -use ethers::types::{Address, H256}; +use ethers::types::{Address, H256, U256}; use serde::{Deserialize, Serialize}; /// The expected storage values for a user operation that must /// be checked to determine if this operation is valid. #[derive(Clone, Debug, Default, Deserialize, Serialize)] -pub struct ExpectedStorage(BTreeMap>); +pub struct ExpectedStorage(pub BTreeMap>); impl ExpectedStorage { /// Merge this expected storage with another one, accounting for conflicts. @@ -46,6 +46,17 @@ impl ExpectedStorage { } Ok(()) } + + /// Insert a new storage slot value for a given address. + pub fn insert(&mut self, address: Address, slot: U256, value: U256) { + let buf: [u8; 32] = slot.into(); + let slot = H256::from_slice(&buf); + + let buf: [u8; 32] = value.into(); + let value = H256::from_slice(&buf); + + self.0.entry(address).or_default().insert(slot, value); + } } use std::fmt::{Display, Formatter}; diff --git a/crates/sim/src/utils.rs b/crates/sim/src/utils.rs index 3385cc86..73be29ec 100644 --- a/crates/sim/src/utils.rs +++ b/crates/sim/src/utils.rs @@ -12,26 +12,9 @@ // If not, see https://www.gnu.org/licenses/. use anyhow::Context; -use ethers::{ - abi::{AbiDecode, AbiEncode}, - types::{spoof, Address, BlockId, Bytes, Eip1559TransactionRequest, Selector, H256, U256}, -}; -use rundler_provider::{Provider, ProviderError}; -use rundler_types::contracts::{ - get_code_hashes::{CodeHashesResult, GETCODEHASHES_BYTECODE}, - get_gas_used::{GasUsedResult, GETGASUSED_BYTECODE}, -}; - -/// Creates call data from a method and its arguments. The arguments should be -/// passed as a tuple. -/// -/// Important: if the method takes a single argument, then this function should -/// be passed a single-element tuple, and not just the argument by itself. -pub(crate) fn call_data_of(selector: Selector, args: impl AbiEncode) -> Bytes { - let mut bytes = selector.to_vec(); - bytes.extend(args.encode()); - bytes.into() -} +use ethers::types::{spoof, Address, BlockId, H256}; +use rundler_provider::Provider; +use rundler_types::contracts::utils::get_code_hashes::{CodeHashesResult, GETCODEHASHES_BYTECODE}; /// Hashes together the code from all the provided addresses. The order of the input addresses does /// not matter. @@ -41,70 +24,14 @@ pub(crate) async fn get_code_hash( block_id: Option, ) -> anyhow::Result { addresses.sort(); - let out: CodeHashesResult = call_constructor( - provider, - &GETCODEHASHES_BYTECODE, - addresses, - block_id, - &spoof::state(), - ) - .await - .context("should compute code hashes")?; - Ok(H256(out.hash)) -} - -/// Measures the gas used by a call to target with value and data. -pub(crate) async fn get_gas_used( - provider: &P, - target: Address, - value: U256, - data: Bytes, - state_overrides: &spoof::State, -) -> anyhow::Result { - call_constructor( - provider, - &GETGASUSED_BYTECODE, - (target, value, data), - None, - state_overrides, - ) - .await -} - -async fn call_constructor( - provider: &P, - bytecode: &Bytes, - args: Args, - block_id: Option, - state_overrides: &spoof::State, -) -> anyhow::Result { - //println!("HC utils.rs call_constructor start"); - let mut data = bytecode.to_vec(); - data.extend(AbiEncode::encode(args)); - let tx = Eip1559TransactionRequest { - data: Some(data.into()), - ..Default::default() - }; - //println!("HC utils.rs call_constructor before providerCall"); - let error = provider - .call(&tx.into(), block_id, state_overrides) + let out: CodeHashesResult = provider + .call_constructor( + &GETCODEHASHES_BYTECODE, + addresses, + block_id, + &spoof::state(), + ) .await - .err() - .context("called constructor should revert")?; - //println!("HC utils.rs call_constructor after providerCall, error {:?}", error); - get_revert_data(error).context("should decode revert data from called constructor") -} - -// Gets and decodes the revert data from a provider error, if it is a revert error. -fn get_revert_data(error: ProviderError) -> Result { - let ProviderError::JsonRpcError(jsonrpc_error) = &error else { - return Err(error); - }; - if !jsonrpc_error.is_revert() { - return Err(error); - } - match jsonrpc_error.decode_revert_data() { - Some(ret) => Ok(ret), - None => Err(error), - } + .context("should compute code hashes")?; + Ok(H256(out.hash)) } diff --git a/crates/sim/tracer/package.json b/crates/sim/tracer/package.json index dbfa9885..3f5c8646 100644 --- a/crates/sim/tracer/package.json +++ b/crates/sim/tracer/package.json @@ -4,7 +4,7 @@ "license": "UNLICENSED", "scripts": { "clean": "rm -rf dist/*", - "build": "swc src/validationTracer.ts -d dist", + "build": "swc src/validationTracerV0_6.ts -d dist && swc src/validationTracerV0_7.ts -d dist", "typecheck": "tsc --noEmit", "watch": "yarn build --watch" }, diff --git a/crates/sim/tracer/src/validationTracer.ts b/crates/sim/tracer/src/validationTracerV0_6.ts similarity index 95% rename from crates/sim/tracer/src/validationTracer.ts rename to crates/sim/tracer/src/validationTracerV0_6.ts index 295e1002..962178b2 100644 --- a/crates/sim/tracer/src/validationTracer.ts +++ b/crates/sim/tracer/src/validationTracerV0_6.ts @@ -21,7 +21,7 @@ declare function toWord(s: string | Bytes): Bytes; interface Output { phases: Phase[]; revertData: string | null; - accessedContractAddresses: string[]; + accessedContracts: Record; associatedSlotsByAddress: Record; factoryCalledCreate2Twice: boolean; expectedStorage: Record>; @@ -51,6 +51,12 @@ interface RelevantStepData { stackEnd: BigInt | null; } +interface ContractInfo { + opcode: string; + length: number; + header: string; +} + type InternalPhase = Omit< Phase, | "forbiddenOpcodesUsed" @@ -84,6 +90,8 @@ type StringSet = Record; "TIMESTAMP", "BASEFEE", "BLOCKHASH", + "BLOBBASEFEE", + "BLOBHASH", "NUMBER", "SELFBALANCE", "BALANCE", @@ -104,7 +112,7 @@ type StringSet = Record; // address as their *first* argument, or modify the handling below. const EXT_OPCODES = stringSet(["EXTCODECOPY", "EXTCODEHASH", "EXTCODESIZE"]); - const READ_WRITE_OPCODES = stringSet(["SSTORE", "SLOAD"]); + const READ_WRITE_OPCODES = stringSet(["SSTORE", "SLOAD", "TSTORE", "TLOAD"]); // Whitelisted precompile addresses. const PRECOMPILE_WHITELIST = stringSet([ "0x0000000000000000000000000000000000000001", // ecRecover @@ -116,11 +124,12 @@ type StringSet = Record; "0x0000000000000000000000000000000000000007", // ecMul "0x0000000000000000000000000000000000000008", // ecPairing "0x0000000000000000000000000000000000000009", // black2f + "0x0000000000000000000000000000000000000100", // RIP-7212 ]); const phases: Phase[] = []; let revertData: string | null = null; - const accessedContractAddresses: StringSet = {}; + const accessedContracts: Record = {}; const associatedSlotsByAddressMap: Record = {}; const allStorageAccesses: Record> = {}; let factoryCreate2Count = 0; @@ -230,7 +239,7 @@ type StringSet = Record; return { phases, revertData, - accessedContractAddresses: Object.keys(accessedContractAddresses), + accessedContracts, associatedSlotsByAddress, factoryCalledCreate2Twice: factoryCreate2Count > 1, expectedStorage, @@ -366,9 +375,9 @@ type StringSet = Record; const index = EXT_OPCODES[opcode] ? 0 : 1; const address = toAddress(log.stack.peek(index).toString(16)); const addressHex = toHex(address); - if (!isPrecompiled(address)) { + if (!isPrecompiled(address) && !PRECOMPILE_WHITELIST[addressHex]) { if ( - !accessedContractAddresses[addressHex] || + !accessedContracts[addressHex] || currentPhase.undeployedContractAccesses[addressHex] ) { // The spec says validation must not access code of undeployed @@ -383,7 +392,11 @@ type StringSet = Record; delete currentPhase.undeployedContractAccesses[addressHex]; } } - accessedContractAddresses[addressHex] = true; + accessedContracts[addressHex] = { + header: toHex(db.getCode(address).subarray(0, 3)), + opcode, + length: db.getCode(address).length, + }; } else if (!PRECOMPILE_WHITELIST[addressHex]) { currentPhase.forbiddenPrecompilesUsed[ getContractCombinedKey(log, addressHex) diff --git a/crates/sim/tracer/src/validationTracerV0_7.ts b/crates/sim/tracer/src/validationTracerV0_7.ts new file mode 100644 index 00000000..66a124d1 --- /dev/null +++ b/crates/sim/tracer/src/validationTracerV0_7.ts @@ -0,0 +1,444 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + + +// Adapted with minimal changes from: https://github.com/eth-infinitism/bundler/blob/main/packages/validation-manager/src/BundlerCollectorTracer.ts + + +// javascript code of tracer function +// NOTE: we process this locally for hardhat, but send to geth for remote tracing. +// should NOT "require" anything, or use logs. +// see LogTrace for valid types (but alas, this one must be javascript, not typescript). + +// This file contains references to validation rules, in the format [xxx-###] +// where xxx is OP/STO/COD/EP/SREP/EREP/UREP/ALT, and ### is a number +// the validation rules are defined in erc-aa-validation.md + +import { Address, Bytes, LogCallFrame, LogContext, LogDb, LogFrameResult, LogStep, LogTracer } from './types' + +// functions available in a context of geth tracer +declare function toAddress(s: string | Bytes): Address; +declare function toHex(x: Bytes): string; +declare function toWord(s: string | Bytes): Bytes; + +/** + * return type of our BundlerCollectorTracer. + * collect access and opcodes, split into "levels" based on NUMBER opcode + * keccak, calls and logs are collected globally, since the levels are unimportant for them. + */ +export interface BundlerTracerResult { + /** + * storage and opcode info, collected on top-level calls from EntryPoint + */ + callsFromEntryPoint: TopLevelCallInfo[] + + /** + * values passed into KECCAK opcode + */ + keccak: string[] + + /** + * calls and returns, collected globally + */ + calls: Array + + /** + * logs, collected globally + */ + logs: LogInfo[] + + /** + * expected storage slots, collected globally + */ + expectedStorage: Record> + + debug: string[] +} + +export interface MethodInfo { + type: string + from: string + to: string + method: string + value: any + gas: number +} + +export interface ExitInfo { + type: 'REVERT' | 'RETURN' + gasUsed: number + data: string +} + +export interface TopLevelCallInfo { + topLevelMethodSig: string + topLevelTargetAddress: string + opcodes: { [opcode: string]: number } + access: { [address: string]: AccessInfo } + contractInfo: { [addr: string]: ContractInfo } + extCodeAccessInfo: { [addr: string]: string } + oog?: boolean +} + +/** + * Contract info + * + * It is illegal to access contracts with no code in validation even if it gets deployed later. + * This means we need to store the {@link contractSize} of accessed addresses at the time of access. + * + * Capture the "header" of the contract code for validation. + */ +export interface ContractInfo { + opcode: string + length: number + header: string +} + +export interface AccessInfo { + // slot value, just prior to this operation + reads: { [slot: string]: string } + // count of writes. + writes: { [slot: string]: number } +} + +export interface LogInfo { + topics: string[] + data: string +} + +interface RelevantStepData { + opcode: string + stackTop3: any[] +} + +/** + * type-safe local storage of our collector. contains all return-value properties. + * (also defines all "trace-local" variables and functions) + */ +interface BundlerCollectorTracer extends LogTracer, BundlerTracerResult { + lastOp: string + lastThreeOpcodes: RelevantStepData[] + stopCollectingTopic: string + stopCollecting: boolean + currentLevel: TopLevelCallInfo + topLevelCallCounter: number + allStorageAccesses: Record> + countSlot: (list: { [key: string]: number | undefined }, key: any) => void + computeIfAbsent( + map: Record, + key: K, + getValue: () => V + ): V +} + +/** + * tracer to collect data for opcode banning. + * this method is passed as the "tracer" for eth_traceCall (note, the function itself) + * + * returned data: + * numberLevels: opcodes and memory access, split on execution of "number" opcode. + * keccak: input data of keccak opcode. + * calls: for each call, an array of [type, from, to, value] + * slots: accessed slots (on any address) + */ +((): BundlerCollectorTracer => { + return { + callsFromEntryPoint: [], + currentLevel: null as any, + keccak: [], + expectedStorage: {}, + calls: [], + logs: [], + debug: [], + lastOp: '', + lastThreeOpcodes: [], + // event sent after all validations are done: keccak("BeforeExecution()") + stopCollectingTopic: 'bb47ee3e183a558b1a2ff0874b079f3fc5478b7454eacf2bfc5af2ff5878f972', + stopCollecting: false, + topLevelCallCounter: 0, + allStorageAccesses: {}, + + fault (log: LogStep, _db: LogDb): void { + var err = ""; + const log_err = log.getError(); + if (log_err != undefined) { + err = log_err.toString() + } + this.debug.push('fault depth=', log.getDepth().toString(), ' gas=', log.getGas().toString(), ' cost=', log.getCost().toString(), ' err=', err) + }, + + result (_ctx: LogContext, _db: LogDb): BundlerTracerResult { + Object.keys(this.allStorageAccesses).forEach((address) => { + const slotAccesses = this.allStorageAccesses[address]; + const valuesBySlot: Record = {}; + let hasValues = false; + Object.keys(slotAccesses).forEach((slot) => { + const value = slotAccesses[slot]; + if (value) { + valuesBySlot[slot] = value; + hasValues = true; + } + }); + if (hasValues) { + this.expectedStorage[address] = valuesBySlot; + } + }); + + return { + callsFromEntryPoint: this.callsFromEntryPoint, + keccak: this.keccak, + logs: this.logs, + calls: this.calls, + expectedStorage: this.expectedStorage, + debug: this.debug // for internal debugging. + } + }, + + enter (frame: LogCallFrame): void { + if (this.stopCollecting) { + return + } + // this.debug.push('enter gas=', frame.getGas(), ' type=', frame.getType(), ' to=', toHex(frame.getTo()), ' in=', toHex(frame.getInput()).slice(0, 500)) + this.calls.push({ + type: frame.getType(), + from: toHex(frame.getFrom()), + to: toHex(frame.getTo()), + method: toHex(frame.getInput()).slice(0, 10), + gas: frame.getGas(), + value: frame.getValue() + }) + }, + exit (frame: LogFrameResult): void { + if (this.stopCollecting) { + return + } + this.calls.push({ + type: frame.getError() != null ? 'REVERT' : 'RETURN', + gasUsed: frame.getGasUsed(), + data: toHex(frame.getOutput()).slice(0, 4000) + }) + }, + + // increment the "key" in the list. if the key is not defined yet, then set it to "1" + countSlot (list: { [key: string]: number | undefined }, key: any) { + list[key] = (list[key] ?? 0) + 1 + }, + + computeIfAbsent( + map: Record, + key: K, + getValue: () => V + ): V { + const value = map[key]; + if (value !== undefined) { + return value; + } + const newValue = getValue(); + map[key] = newValue; + return newValue; + }, + + + step (log: LogStep, db: LogDb): any { + if (this.stopCollecting) { + return + } + const opcode = log.op.toString() + + const stackSize = log.stack.length() + const stackTop3 = [] + for (let i = 0; i < 3 && i < stackSize; i++) { + stackTop3.push(log.stack.peek(i)) + } + this.lastThreeOpcodes.push({ opcode, stackTop3 }) + if (this.lastThreeOpcodes.length > 3) { + this.lastThreeOpcodes.shift() + } + // this.debug.push(this.lastOp + '-' + opcode + '-' + log.getDepth() + '-' + log.getGas() + '-' + log.getCost()) + if (log.getGas() < log.getCost() || ( + // special rule for SSTORE with gas metering + opcode === 'SSTORE' && log.getGas() < 2300) + ) { + this.currentLevel.oog = true + } + + if (opcode === 'REVERT' || opcode === 'RETURN') { + if (log.getDepth() === 1) { + // exit() is not called on top-level return/revent, so we reconstruct it + // from opcode + const ofs = parseInt(log.stack.peek(0).toString()) + const len = parseInt(log.stack.peek(1).toString()) + const data = toHex(log.memory.slice(ofs, ofs + len)).slice(0, 4000) + // this.debug.push(opcode + ' ' + data) + this.calls.push({ + type: opcode, + gasUsed: 0, + data + }) + } + // NOTE: flushing all history after RETURN + this.lastThreeOpcodes = [] + } + + if (log.getDepth() === 1) { + if (opcode === 'CALL' || opcode === 'STATICCALL') { + // stack.peek(0) - gas + const addr = toAddress(log.stack.peek(1).toString(16)) + const topLevelTargetAddress = toHex(addr) + // stack.peek(2) - value + const ofs = parseInt(log.stack.peek(3).toString()) + // stack.peek(4) - len + const topLevelMethodSig = toHex(log.memory.slice(ofs, ofs + 4)) + + this.currentLevel = this.callsFromEntryPoint[this.topLevelCallCounter] = { + topLevelMethodSig, + topLevelTargetAddress, + access: {}, + opcodes: {}, + extCodeAccessInfo: {}, + contractInfo: {} + } + this.topLevelCallCounter++ + } else if (opcode === 'LOG1') { + // ignore log data ofs, len + const topic = log.stack.peek(2).toString(16) + if (topic === this.stopCollectingTopic) { + this.stopCollecting = true + } + } + this.lastOp = '' + return + } + + const lastOpInfo = this.lastThreeOpcodes[this.lastThreeOpcodes.length - 2] + // store all addresses touched by EXTCODE* opcodes + if (lastOpInfo?.opcode?.match(/^(EXT.*)$/) != null) { + const addr = toAddress(lastOpInfo.stackTop3[0].toString(16)) + const addrHex = toHex(addr) + const last3opcodesString = this.lastThreeOpcodes.map(x => x.opcode).join(' ') + // only store the last EXTCODE* opcode per address - could even be a boolean for our current use-case + // [OP-051] + if (last3opcodesString.match(/^(\w+) EXTCODESIZE ISZERO$/) == null) { + this.currentLevel.extCodeAccessInfo[addrHex] = opcode + // this.debug.push(`potentially illegal EXTCODESIZE without ISZERO for ${addrHex}`) + } else { + // this.debug.push(`safe EXTCODESIZE with ISZERO for ${addrHex}`) + } + } + + // not using 'isPrecompiled' to only allow the ones defined by the ERC-4337 as stateless precompiles + // [OP-062] + const isAllowedPrecompiled: (address: any) => boolean = (address) => { + const addrHex = toHex(address) + const addressInt = parseInt(addrHex) + // this.debug.push(`isPrecompiled address=${addrHex} addressInt=${addressInt}`) + + // MODIFICATION: allow precompile RIP-7212 through - which is at 256 + return (addressInt > 0 && addressInt < 10) || addressInt == 256 + } + // [OP-041] + if (opcode.match(/^(EXT.*|CALL|CALLCODE|DELEGATECALL|STATICCALL)$/) != null) { + const idx = opcode.startsWith('EXT') ? 0 : 1 + const addr = toAddress(log.stack.peek(idx).toString(16)) + const addrHex = toHex(addr) + // this.debug.push('op=' + opcode + ' last=' + this.lastOp + ' stacksize=' + log.stack.length() + ' addr=' + addrHex) + if (this.currentLevel.contractInfo[addrHex] == null && !isAllowedPrecompiled(addr)) { + this.currentLevel.contractInfo[addrHex] = { + length: db.getCode(addr).length, + opcode, + header: toHex(db.getCode(addr).subarray(0, 3)) + } + } + } + + // [OP-012] + if (this.lastOp === 'GAS' && !opcode.includes('CALL')) { + // count "GAS" opcode only if not followed by "CALL" + this.countSlot(this.currentLevel.opcodes, 'GAS') + } + if (opcode !== 'GAS') { + // ignore "unimportant" opcodes: + if (opcode.match(/^(DUP\d+|PUSH\d+|SWAP\d+|POP|ADD|SUB|MUL|DIV|EQ|LTE?|S?GTE?|SLT|SH[LR]|AND|OR|NOT|ISZERO)$/) == null) { + this.countSlot(this.currentLevel.opcodes, opcode) + } + } + this.lastOp = opcode + + // MODIFICATION: [OP-070] - Treat TLOAD and TSTORE as SLOAD and SSTORE + if (opcode === 'SLOAD' || opcode === 'SSTORE' || opcode === 'TLOAD' || opcode === 'TSTORE') { + const slot = toWord(log.stack.peek(0).toString(16)) + const slotHex = toHex(slot) + const addr = log.contract.getAddress() + const addrHex = toHex(addr) + let access = this.currentLevel.access[addrHex] + + let initialValuesBySlot = this.computeIfAbsent( + this.allStorageAccesses, + addrHex, + (): Record => ({}) + ); + + if (access == null) { + access = { + reads: {}, + writes: {} + } + this.currentLevel.access[addrHex] = access + } + if (opcode === 'SLOAD' || opcode === 'TLOAD') { + // read slot values before this UserOp was created + // (so saving it if it was written before the first read) + if (access.reads[slotHex] == null && access.writes[slotHex] == null) { + access.reads[slotHex] = toHex(db.getState(addr, slot)) + } + + if (!(slotHex in initialValuesBySlot)) { + initialValuesBySlot[slotHex] = toHex(db.getState(addr, slot)); + } + } else { + this.countSlot(access.writes, slotHex) + + if (!(slotHex in initialValuesBySlot)) { + initialValuesBySlot[slotHex] = null; + } + } + } + + if (opcode === 'KECCAK256') { + // collect keccak on 64-byte blocks + const ofs = parseInt(log.stack.peek(0).toString()) + const len = parseInt(log.stack.peek(1).toString()) + // currently, solidity uses only 2-word (6-byte) for a key. this might change.. + // still, no need to return too much + if (len > 20 && len < 512) { + // if (len === 64) { + this.keccak.push(toHex(log.memory.slice(ofs, ofs + len))) + } + } else if (opcode.startsWith('LOG')) { + const count = parseInt(opcode.substring(3)) + const ofs = parseInt(log.stack.peek(0).toString()) + const len = parseInt(log.stack.peek(1).toString()) + const topics = [] + for (let i = 0; i < count; i++) { + // eslint-disable-next-line @typescript-eslint/restrict-plus-operands + topics.push('0x' + log.stack.peek(2 + i).toString(16)) + } + const data = toHex(log.memory.slice(ofs, ofs + len)) + this.logs.push({ + topics, + data + }) + } + } + } +})(); \ No newline at end of file diff --git a/crates/task/src/grpc/metrics.rs b/crates/task/src/grpc/metrics.rs index f2e13f89..75b05fd6 100644 --- a/crates/task/src/grpc/metrics.rs +++ b/crates/task/src/grpc/metrics.rs @@ -132,26 +132,26 @@ struct GrpcMetricsRecorder; impl GrpcMetricsRecorder { // Increment the number of requests for a given method and service. fn increment_num_requests(method_name: &str, scope: &str) { - metrics::increment_counter!("grpc_num_requests", "method_name" => method_name.to_string(), "service" => scope.to_string()) + metrics::counter!("grpc_num_requests", "method_name" => method_name.to_string(), "service" => scope.to_string()).increment(1) } // Increment the number of open requests for a given method and service. fn increment_open_requests(method_name: &str, scope: &str) { - metrics::increment_gauge!("grpc_open_requests", 1_f64, "method_name" => method_name.to_string(), "service" => scope.to_string()) + metrics::gauge!("grpc_open_requests", "method_name" => method_name.to_string(), "service" => scope.to_string()).increment(1_f64) } // Decrement the number of open requests for a given method and service. fn decrement_open_requests(method_name: &str, scope: &str) { - metrics::decrement_gauge!("grpc_open_requests", 1_f64, "method_name" => method_name.to_string(), "service" => scope.to_string()) + metrics::gauge!("grpc_open_requests", "method_name" => method_name.to_string(), "service" => scope.to_string()).decrement(1_f64) } // Increment the number of gRPC errors for a given method and service. fn increment_rpc_error_count(method_name: &str, scope: &str) { - metrics::increment_counter!("grpc_error_count", "method_name" => method_name.to_string(), "service" => scope.to_string()) + metrics::counter!("grpc_error_count", "method_name" => method_name.to_string(), "service" => scope.to_string()).increment(1) } // Record the latency of a request for a given method and service. fn record_request_latency(method_name: &str, scope: &str, latency: Duration) { - metrics::histogram!("grpc_request_latency", latency, "method_name" => method_name.to_string(), "service" => scope.to_string()) + metrics::histogram!("grpc_request_latency", "method_name" => method_name.to_string(), "service" => scope.to_string()).record(latency) } } diff --git a/crates/task/src/grpc/protos.rs b/crates/task/src/grpc/protos.rs index 97b8a059..bb7966f2 100644 --- a/crates/task/src/grpc/protos.rs +++ b/crates/task/src/grpc/protos.rs @@ -13,7 +13,7 @@ //! Protobuf utilities -use ethers::types::{Address, H256, U256}; +use ethers::types::{Address, Bytes, H256, U128, U256}; /// Error type for conversions from protobuf types to Ethers/local types. #[derive(Debug, thiserror::Error)] @@ -27,13 +27,9 @@ pub enum ConversionError { /// Invalid enum value, does not map to a valid enum variant #[error("Invalid enum value {0}")] InvalidEnumValue(i32), -} - -/// Convert an Ethers U256 to little endian bytes for packing into a proto struct. -pub fn to_le_bytes(n: U256) -> Vec { - let mut vec = vec![0_u8; 32]; - n.to_little_endian(&mut vec); - vec + /// Other error + #[error(transparent)] + Other(#[from] anyhow::Error), } /// Convert proto bytes into a type that implements `FromProtoBytes`. @@ -79,6 +75,14 @@ impl FromFixedLengthProtoBytes for Address { } } +impl FromFixedLengthProtoBytes for U128 { + const LEN: usize = 16; + + fn from_fixed_length_bytes(bytes: &[u8]) -> Self { + Self::from_little_endian(bytes) + } +} + impl FromFixedLengthProtoBytes for U256 { const LEN: usize = 32; @@ -94,3 +98,43 @@ impl FromFixedLengthProtoBytes for H256 { Self::from_slice(bytes) } } + +/// Trait for a type that can be converted to protobuf bytes. +pub trait ToProtoBytes { + /// Convert to protobuf bytes. + fn to_proto_bytes(&self) -> Vec; +} + +impl ToProtoBytes for Address { + fn to_proto_bytes(&self) -> Vec { + self.as_bytes().to_vec() + } +} + +impl ToProtoBytes for U128 { + fn to_proto_bytes(&self) -> Vec { + let mut vec = vec![0_u8; 16]; + self.to_little_endian(&mut vec); + vec + } +} + +impl ToProtoBytes for U256 { + fn to_proto_bytes(&self) -> Vec { + let mut vec = vec![0_u8; 32]; + self.to_little_endian(&mut vec); + vec + } +} + +impl ToProtoBytes for H256 { + fn to_proto_bytes(&self) -> Vec { + self.as_bytes().to_vec() + } +} + +impl ToProtoBytes for Bytes { + fn to_proto_bytes(&self) -> Vec { + self.to_vec() + } +} diff --git a/crates/types/.gitignore b/crates/types/.gitignore index 1eb78168..8e68a776 100644 --- a/crates/types/.gitignore +++ b/crates/types/.gitignore @@ -1,2 +1,6 @@ # Generated code -/src/contracts +/src/contracts/v0_6 +/src/contracts/v0_7 +/src/contracts/arbitrum +/src/contracts/optimism +/src/contracts/utils diff --git a/crates/types/Cargo.toml b/crates/types/Cargo.toml index 47d190d4..909e4c59 100644 --- a/crates/types/Cargo.toml +++ b/crates/types/Cargo.toml @@ -11,12 +11,27 @@ rundler-utils = { path = "../utils" } once_cell = "1.19.0" anyhow.workspace = true +async-trait.workspace = true chrono = "0.4.24" +constcat = "0.4.1" +const-hex = "1.11.3" ethers.workspace = true -parse-display = "0.8.0" +futures-util.workspace = true +num_enum = "0.7.2" +parse-display.workspace = true +rand.workspace = true serde.workspace = true serde_json.workspace = true strum.workspace = true +thiserror.workspace = true + +mockall = {workspace = true, optional = true } [build-dependencies] ethers.workspace = true + +[dev-dependencies] +rundler-types = { path = ".", features = ["test-utils"] } + +[features] +test-utils = [ "mockall" ] diff --git a/crates/types/build.rs b/crates/types/build.rs index e1c81d47..ab32c291 100644 --- a/crates/types/build.rs +++ b/crates/types/build.rs @@ -19,59 +19,133 @@ fn main() -> Result<(), Box> { println!("cargo:rerun-if-changed=contracts/lib"); println!("cargo:rerun-if-changed=contracts/src"); println!("cargo:rerun-if-changed=contracts/foundry.toml"); - update_submodules()?; - generate_contract_bindings()?; + generate_v0_6_bindings()?; + generate_v0_7_bindings()?; + generate_utils_bindings()?; + generate_arbitrum_bindings()?; + generate_optimism_bindings()?; Ok(()) } -fn generate_contract_bindings() -> Result<(), Box> { - generate_abis()?; +fn generate_v0_6_bindings() -> Result<(), Box> { + run_command( + forge_build("v0_6") + .arg("--remappings") + .arg("@openzeppelin/=lib/openzeppelin-contracts-versions/v4_9"), + "https://getfoundry.sh/", + "generate ABIs", + )?; + MultiAbigen::from_abigens([ - abigen_of("IEntryPoint")?, - abigen_of("EntryPoint")?, - abigen_of("IAggregator")?, - abigen_of("IStakeManager")?, - abigen_of("GetCodeHashes")?, - abigen_of("PaymasterHelper")?, - abigen_of("GetGasUsed")?, - abigen_of("CallGasEstimationProxy")?, - abigen_of("SimpleAccount")?, - abigen_of("SimpleAccountFactory")?, - abigen_of("VerifyingPaymaster")?, - abigen_of("NodeInterface")?, - abigen_of("GasPriceOracle")?, - abigen_of("INonceManager")?, - abigen_of("HCHelper")?, + abigen_of("v0_6", "IEntryPoint")?, + abigen_of("v0_6", "IAggregator")?, + abigen_of("v0_6", "IStakeManager")?, + abigen_of("v0_6", "GetBalances")?, + abigen_of("v0_6", "SimpleAccount")?, + abigen_of("v0_6", "SimpleAccountFactory")?, + abigen_of("v0_6", "VerifyingPaymaster")?, + abigen_of("v0_6", "CallGasEstimationProxy")?, + // hybrid compute + abigen_of("v0_6", "INonceManager")?, + abigen_of("", "HCHelper")?, ]) .build()? - .write_to_module("src/contracts", false)?; + .write_to_module("src/contracts/v0_6", false)?; + Ok(()) } -fn abigen_of(contract: &str) -> Result> { - Ok(Abigen::new( - contract, - format!("contracts/out/{contract}.sol/{contract}.json"), - )?) +fn generate_v0_7_bindings() -> Result<(), Box> { + run_command( + forge_build("v0_7") + .arg("--remappings") + .arg("@openzeppelin/=lib/openzeppelin-contracts-versions/v5_0"), + "https://getfoundry.sh/", + "generate ABIs", + )?; + + MultiAbigen::from_abigens([ + abigen_of("v0_7", "IEntryPoint")?, + abigen_of("v0_7", "IAccount")?, + abigen_of("v0_7", "IPaymaster")?, + abigen_of("v0_7", "IAggregator")?, + abigen_of("v0_7", "IStakeManager")?, + abigen_of("v0_7", "GetBalances")?, + abigen_of("v0_7", "EntryPointSimulations")?, + abigen_of("v0_7", "CallGasEstimationProxy")?, + abigen_of("v0_7", "SenderCreator")?, + ]) + .build()? + .write_to_module("src/contracts/v0_7", false)?; + + Ok(()) } -fn generate_abis() -> Result<(), Box> { +fn generate_utils_bindings() -> Result<(), Box> { run_command( - Command::new("forge") - .arg("build") - .arg("--root") - .arg("./contracts"), + &mut forge_build("utils"), "https://getfoundry.sh/", "generate ABIs", - ) + )?; + + MultiAbigen::from_abigens([ + abigen_of("utils", "GetCodeHashes")?, + abigen_of("utils", "GetGasUsed")?, + abigen_of("utils", "StorageLoader")?, + ]) + .build()? + .write_to_module("src/contracts/utils", false)?; + + Ok(()) } -fn update_submodules() -> Result<(), Box> { +fn generate_arbitrum_bindings() -> Result<(), Box> { run_command( - Command::new("git").arg("submodule").arg("update"), - "https://github.com/git-guides/install-git", - "update submodules", - ) + &mut forge_build("arbitrum"), + "https://getfoundry.sh/", + "generate ABIs", + )?; + + MultiAbigen::from_abigens([abigen_of("arbitrum", "NodeInterface")?]) + .build()? + .write_to_module("src/contracts/arbitrum", false)?; + + Ok(()) +} + +fn generate_optimism_bindings() -> Result<(), Box> { + run_command( + &mut forge_build("optimism"), + "https://getfoundry.sh/", + "generate ABIs", + )?; + + MultiAbigen::from_abigens([abigen_of("optimism", "GasPriceOracle")?]) + .build()? + .write_to_module("src/contracts/optimism", false)?; + + Ok(()) +} + +fn forge_build(src: &str) -> Command { + let mut cmd = Command::new("forge"); + + cmd.arg("build") + .arg("--root") + .arg("./contracts") + .arg("--contracts") + .arg(format!("src/{src}")) + .arg("--out") + .arg(format!("out/{src}")); + + cmd +} + +fn abigen_of(extra_path: &str, contract: &str) -> Result> { + Ok(Abigen::new( + contract, + format!("contracts/out/{extra_path}/{contract}.sol/{contract}.json"), + )?) } fn run_command( diff --git a/crates/types/contracts/bytecode/entrypoint/0x5FF137D4b0FDCD49DcA30c7CF57E578a026d2789_deployed.txt b/crates/types/contracts/bytecode/entrypoint/0x5FF137D4b0FDCD49DcA30c7CF57E578a026d2789_deployed.txt new file mode 100644 index 00000000..baa9e5c8 --- /dev/null +++ b/crates/types/contracts/bytecode/entrypoint/0x5FF137D4b0FDCD49DcA30c7CF57E578a026d2789_deployed.txt @@ -0,0 +1 @@ +0x60806040526004361015610023575b361561001957600080fd5b610021615531565b005b60003560e01c80630396cb60146101b35780630bd28e3b146101aa5780631b2e01b8146101a15780631d732756146101985780631fad948c1461018f578063205c28781461018657806335567e1a1461017d5780634b1d7cf5146101745780635287ce121461016b57806370a08231146101625780638f41ec5a14610159578063957122ab146101505780639b249f6914610147578063a61935311461013e578063b760faf914610135578063bb9fe6bf1461012c578063c23a5cea14610123578063d6383f941461011a578063ee219423146101115763fc7e286d0361000e5761010c611bcd565b61000e565b5061010c6119b5565b5061010c61184d565b5061010c6116b4565b5061010c611536565b5061010c6114f7565b5061010c6114d6565b5061010c611337565b5061010c611164565b5061010c611129565b5061010c6110a4565b5061010c610f54565b5061010c610bf8565b5061010c610b33565b5061010c610994565b5061010c6108ba565b5061010c6106e7565b5061010c610467565b5061010c610385565b5060207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc3601126103595760043563ffffffff8116808203610359576103547fa5ae833d0bb1dcd632d98a8b70973e8516812898e19bf27b70071ebc8dc52c01916102716102413373ffffffffffffffffffffffffffffffffffffffff166000526000602052604060002090565b9161024d811515615697565b61026a610261600185015463ffffffff1690565b63ffffffff1690565b11156156fc565b54926103366dffffffffffffffffffffffffffff946102f461029834888460781c166121d5565b966102a4881515615761565b6102b0818911156157c6565b6102d4816102bc6105ec565b941684906dffffffffffffffffffffffffffff169052565b6001602084015287166dffffffffffffffffffffffffffff166040830152565b63ffffffff83166060820152600060808201526103313373ffffffffffffffffffffffffffffffffffffffff166000526000602052604060002090565b61582b565b6040805194855263ffffffff90911660208501523393918291820190565b0390a2005b600080fd5b6024359077ffffffffffffffffffffffffffffffffffffffffffffffff8216820361035957565b50346103595760207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc3601126103595760043577ffffffffffffffffffffffffffffffffffffffffffffffff81168103610359576104149033600052600160205260406000209077ffffffffffffffffffffffffffffffffffffffffffffffff16600052602052604060002090565b61041e8154612491565b9055005b73ffffffffffffffffffffffffffffffffffffffff81160361035957565b6024359061044d82610422565b565b60c4359061044d82610422565b359061044d82610422565b50346103595760407ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc3601126103595760206104fc6004356104a881610422565b73ffffffffffffffffffffffffffffffffffffffff6104c561035e565b91166000526001835260406000209077ffffffffffffffffffffffffffffffffffffffffffffffff16600052602052604060002090565b54604051908152f35b507f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b60a0810190811067ffffffffffffffff82111761055157604052565b610559610505565b604052565b610100810190811067ffffffffffffffff82111761055157604052565b67ffffffffffffffff811161055157604052565b6060810190811067ffffffffffffffff82111761055157604052565b90601f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0910116810190811067ffffffffffffffff82111761055157604052565b6040519061044d82610535565b6040519060c0820182811067ffffffffffffffff82111761055157604052565b604051906040820182811067ffffffffffffffff82111761055157604052565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f60209267ffffffffffffffff8111610675575b01160190565b61067d610505565b61066f565b92919261068e82610639565b9161069c60405193846105ab565b829481845281830111610359578281602093846000960137010152565b9181601f840112156103595782359167ffffffffffffffff8311610359576020838186019501011161035957565b5034610359576101c07ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc3601126103595767ffffffffffffffff60043581811161035957366023820112156103595761074a903690602481600401359101610682565b907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffdc36016101808112610359576101006040519161078783610535565b12610359576040516107988161055e565b6107a0610440565b815260443560208201526064356040820152608435606082015260a43560808201526107ca61044f565b60a082015260e43560c08201526101043560e082015281526101243560208201526101443560408201526101643560608201526101843560808201526101a4359182116103595761083e9261082661082e9336906004016106b9565b9290916128b1565b6040519081529081906020820190565b0390f35b9060407ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc8301126103595760043567ffffffffffffffff9283821161035957806023830112156103595781600401359384116103595760248460051b830101116103595760240191906024356108b781610422565b90565b5034610359576108c936610842565b6108d4929192611e3a565b6108dd83611d2d565b60005b84811061095d57506000927fbb47ee3e183a558b1a2ff0874b079f3fc5478b7454eacf2bfc5af2ff5878f9728480a183915b85831061092d576109238585611ed7565b6100216001600255565b909193600190610953610941878987611dec565b61094b8886611dca565b51908861233f565b0194019190610912565b8061098b610984610972600194869896611dca565b5161097e848a88611dec565b84613448565b9083612f30565b019290926108e0565b50346103595760407ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc360112610359576004356109d081610422565b6024359060009133835282602052604083206dffffffffffffffffffffffffffff81541692838311610ad557848373ffffffffffffffffffffffffffffffffffffffff829593610a788496610a3f610a2c8798610ad29c6121c0565b6dffffffffffffffffffffffffffff1690565b6dffffffffffffffffffffffffffff167fffffffffffffffffffffffffffffffffffff0000000000000000000000000000825416179055565b6040805173ffffffffffffffffffffffffffffffffffffffff831681526020810185905233917fd1c19fbcd4551a5edfb66d43d2e337c04837afda3482b42bdf569a8fccdae5fb91a2165af1610acc611ea7565b50615ba2565b80f35b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f576974686472617720616d6f756e7420746f6f206c61726765000000000000006044820152fd5b50346103595760407ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc360112610359576020600435610b7181610422565b73ffffffffffffffffffffffffffffffffffffffff610b8e61035e565b911660005260018252610bc98160406000209077ffffffffffffffffffffffffffffffffffffffffffffffff16600052602052604060002090565b547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000006040519260401b16178152f35b503461035957610c0736610842565b610c0f611e3a565b6000805b838210610df657610c249150611d2d565b7fbb47ee3e183a558b1a2ff0874b079f3fc5478b7454eacf2bfc5af2ff5878f972600080a16000805b848110610d5c57505060008093815b818110610c9357610923868660007f575ff3acadd5ab348fe1855e217e0f3678f8d767d7494c9f9fefbee2e17cca4d8180a2611ed7565b610cf7610ca182848a6124cb565b610ccc610cb3610cb36020840161256d565b73ffffffffffffffffffffffffffffffffffffffff1690565b7f575ff3acadd5ab348fe1855e217e0f3678f8d767d7494c9f9fefbee2e17cca4d600080a280612519565b906000915b808310610d1457505050610d0f90612491565b610c5c565b90919497610d4f610d49610d5592610d438c8b610d3c82610d368e8b8d611dec565b92611dca565b519161233f565b906121d5565b99612491565b95612491565b9190610cfc565b610d678186886124cb565b6020610d7f610d768380612519565b9290930161256d565b9173ffffffffffffffffffffffffffffffffffffffff60009316905b828410610db45750505050610daf90612491565b610c4d565b90919294610d4f81610de985610de2610dd0610dee968d611dca565b51610ddc8c8b8a611dec565b85613448565b908b613148565b612491565b929190610d9b565b610e018285876124cb565b90610e0c8280612519565b92610e1c610cb36020830161256d565b9173ffffffffffffffffffffffffffffffffffffffff8316610e416001821415612577565b610e62575b505050610e5c91610e56916121d5565b91612491565b90610c13565b909592610e7b6040999693999895989788810190611fc8565b92908a3b156103595789938b918a5193849283927fe3563a4f00000000000000000000000000000000000000000000000000000000845260049e8f850193610ec294612711565b03815a93600094fa9081610f3b575b50610f255786517f86a9f75000000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8a16818a0190815281906020010390fd5b0390fd5b9497509295509093509181610e56610e5c610e46565b80610f48610f4e9261057b565b8061111e565b38610ed1565b50346103595760207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc3601126103595761083e73ffffffffffffffffffffffffffffffffffffffff600435610fa881610422565b608060409283928351610fba81610535565b60009381858093528260208201528287820152826060820152015216815280602052209061104965ffffffffffff6001835194610ff686610535565b80546dffffffffffffffffffffffffffff8082168852607082901c60ff161515602089015260789190911c1685870152015463ffffffff8116606086015260201c16608084019065ffffffffffff169052565b5191829182919091608065ffffffffffff8160a08401956dffffffffffffffffffffffffffff808251168652602082015115156020870152604082015116604086015263ffffffff6060820151166060860152015116910152565b50346103595760207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc3601126103595773ffffffffffffffffffffffffffffffffffffffff6004356110f581610422565b16600052600060205260206dffffffffffffffffffffffffffff60406000205416604051908152f35b600091031261035957565b50346103595760007ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261035957602060405160018152f35b50346103595760607ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc36011261035957600467ffffffffffffffff8135818111610359576111b590369084016106b9565b9050602435916111c483610422565b604435908111610359576111db90369085016106b9565b92909115908161132d575b506112c6576014821015611236575b610f21836040519182917f08c379a0000000000000000000000000000000000000000000000000000000008352820160409060208152600060208201520190565b6112466112529261124c92612b88565b90612b96565b60601c90565b3b1561125f5738806111f5565b610f21906040519182917f08c379a0000000000000000000000000000000000000000000000000000000008352820160609060208152601b60208201527f41413330207061796d6173746572206e6f74206465706c6f796564000000000060408201520190565b610f21836040519182917f08c379a0000000000000000000000000000000000000000000000000000000008352820160609060208152601960208201527f41413230206163636f756e74206e6f74206465706c6f7965640000000000000060408201520190565b90503b15386111e6565b50346103595760207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc3601126103595760043567ffffffffffffffff81116103595761138960249136906004016106b9565b906113bf6040519283927f570e1a3600000000000000000000000000000000000000000000000000000000845260048401612d2c565b0360208273ffffffffffffffffffffffffffffffffffffffff92816000857f0000000000000000000000007fc98430eaedbb6070b35b39d798725049088348165af1918215611471575b600092611441575b50604051917f6ca7b806000000000000000000000000000000000000000000000000000000008352166004820152fd5b61146391925060203d811161146a575b61145b81836105ab565b810190612d17565b9038611411565b503d611451565b611479612183565b611409565b90816101609103126103595790565b60207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc820112610359576004359067ffffffffffffffff8211610359576108b79160040161147e565b50346103595760206114ef6114ea3661148d565b612a0c565b604051908152f35b5060207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc3601126103595761002160043561153181610422565b61562b565b5034610359576000807ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc3601126116b1573381528060205260408120600181019063ffffffff825416908115611653576115f06115b5611618936115a76115a2855460ff9060701c1690565b61598f565b65ffffffffffff42166159f4565b84547fffffffffffffffffffffffffffffffffffffffffffff000000000000ffffffff16602082901b69ffffffffffff000000001617909455565b7fffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffff8154169055565b60405165ffffffffffff91909116815233907ffa9b3c14cc825c412c9ed81b3ba365a5b459439403f18829e572ed53a4180f0a90602090a280f35b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600a60248201527f6e6f74207374616b6564000000000000000000000000000000000000000000006044820152fd5b80fd5b50346103595760207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc360112610359576004356116f081610422565b610ad273ffffffffffffffffffffffffffffffffffffffff6117323373ffffffffffffffffffffffffffffffffffffffff166000526000602052604060002090565b926117ea611755610a2c86546dffffffffffffffffffffffffffff9060781c1690565b94611761861515615a0e565b6117c26001820161179a65ffffffffffff611786835465ffffffffffff9060201c1690565b16611792811515615a73565b421015615ad8565b80547fffffffffffffffffffffffffffffffffffffffffffff00000000000000000000169055565b7fffffff0000000000000000000000000000ffffffffffffffffffffffffffffff8154169055565b6040805173ffffffffffffffffffffffffffffffffffffffff831681526020810186905233917fb7c918e0e249f999e965cafeb6c664271b3f4317d296461500e71da39f0cbda391a2600080809581948294165af1611847611ea7565b50615b3d565b50346103595760607ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc3601126103595767ffffffffffffffff6004358181116103595761189e90369060040161147e565b602435916118ab83610422565b604435908111610359576118c6610f219136906004016106b9565b6118ce611caa565b6118d785612e2b565b6118ea6118e48287613240565b906153ba565b946118fa826000924384526121e2565b96438252819360609573ffffffffffffffffffffffffffffffffffffffff8316611981575b50505050608001519361194e6040611940602084015165ffffffffffff1690565b92015165ffffffffffff1690565b906040519687967f8b7ac980000000000000000000000000000000000000000000000000000000008852600488016127e1565b8395508394965061199b60409492939451809481936127d3565b03925af19060806119aa611ea7565b92919038808061191f565b5034610359576119c43661148d565b6119cc611caa565b6119d582612e2b565b6119df8183613240565b825160a00151919391611a0c9073ffffffffffffffffffffffffffffffffffffffff166154dc565b6154dc565b90611a30611a07855173ffffffffffffffffffffffffffffffffffffffff90511690565b94611a39612b50565b50611a68611a4c60409586810190611fc8565b90600060148310611bc55750611246611a079261124c92612b88565b91611a72916153ba565b805173ffffffffffffffffffffffffffffffffffffffff169073ffffffffffffffffffffffffffffffffffffffff821660018114916080880151978781015191886020820151611ac79065ffffffffffff1690565b91015165ffffffffffff16916060015192611ae06105f9565b9a8b5260208b0152841515898b015265ffffffffffff1660608a015265ffffffffffff16608089015260a088015215159081611bbc575b50611b515750610f2192519485947fe0cff05f00000000000000000000000000000000000000000000000000000000865260048601612cbd565b9190610f2193611b60846154dc565b611b87611b6b610619565b73ffffffffffffffffffffffffffffffffffffffff9096168652565b6020850152519586957ffaecb4e400000000000000000000000000000000000000000000000000000000875260048701612c2b565b90501538611b17565b9150506154dc565b50346103595760207ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc3601126103595773ffffffffffffffffffffffffffffffffffffffff600435611c1e81610422565b16600052600060205260a0604060002065ffffffffffff60018254920154604051926dffffffffffffffffffffffffffff90818116855260ff8160701c161515602086015260781c16604084015263ffffffff8116606084015260201c166080820152f35b60209067ffffffffffffffff8111611c9d575b60051b0190565b611ca5610505565b611c96565b60405190611cb782610535565b604051608083610100830167ffffffffffffffff811184821017611d20575b60405260009283815283602082015283604082015283606082015283838201528360a08201528360c08201528360e082015281528260208201528260408201528260608201520152565b611d28610505565b611cd6565b90611d3782611c83565b611d4460405191826105ab565b8281527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0611d728294611c83565b019060005b828110611d8357505050565b602090611d8e611caa565b82828501015201611d77565b507f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b6020918151811015611ddf575b60051b010190565b611de7611d9a565b611dd7565b9190811015611e2d575b60051b810135907ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffea181360301821215610359570190565b611e35611d9a565b611df6565b6002805414611e495760028055565b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f5265656e7472616e637947756172643a207265656e7472616e742063616c6c006044820152fd5b3d15611ed2573d90611eb882610639565b91611ec660405193846105ab565b82523d6000602084013e565b606090565b73ffffffffffffffffffffffffffffffffffffffff168015611f6a57600080809381935af1611f04611ea7565b5015611f0c57565b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f41413931206661696c65642073656e6420746f2062656e6566696369617279006044820152fd5b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f4141393020696e76616c69642062656e656669636961727900000000000000006044820152fd5b9035907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe181360301821215610359570180359067ffffffffffffffff82116103595760200191813603831361035957565b90816020910312610359575190565b601f82602094937fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0938186528686013760008582860101520116010190565b60005b83811061207a5750506000910152565b818101518382015260200161206a565b907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f6020936120c681518092818752878088019101612067565b0116010190565b906120e76080916108b796946101c0808652850191612028565b9360e0815173ffffffffffffffffffffffffffffffffffffffff80825116602087015260208201516040870152604082015160608701526060820151858701528482015160a087015260a08201511660c086015260c081015182860152015161010084015260208101516101208401526040810151610140840152606081015161016084015201516101808201526101a081840391015261208a565b506040513d6000823e3d90fd5b507f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b919082039182116121cd57565b61044d612190565b919082018092116121cd57565b905a918160206121fb6060830151936060810190611fc8565b906122348560405195869485947f1d732756000000000000000000000000000000000000000000000000000000008652600486016120cd565b03816000305af16000918161230f575b50612308575060206000803e7fdeaddead000000000000000000000000000000000000000000000000000000006000511461229b5761229561228a6108b7945a906121c0565b6080840151906121d5565b91614afc565b6040517f220266b600000000000000000000000000000000000000000000000000000000815280610f21600482016080906000815260406020820152600f60408201527f41413935206f7574206f6620676173000000000000000000000000000000000060608201520190565b9250505090565b61233191925060203d8111612338575b61232981836105ab565b810190612019565b9038612244565b503d61231f565b909291925a9380602061235b6060830151946060810190611fc8565b906123948660405195869485947f1d732756000000000000000000000000000000000000000000000000000000008652600486016120cd565b03816000305af160009181612471575b5061246a575060206000803e7fdeaddead00000000000000000000000000000000000000000000000000000000600051146123fc576123f66123eb6108b795965a906121c0565b6080830151906121d5565b92614ddf565b610f21836040519182917f220266b600000000000000000000000000000000000000000000000000000000835260048301608091815260406020820152600f60408201527f41413935206f7574206f6620676173000000000000000000000000000000000060608201520190565b9450505050565b61248a91925060203d81116123385761232981836105ab565b90386123a4565b6001907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff81146124bf570190565b6124c7612190565b0190565b919081101561250c575b60051b810135907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa181360301821215610359570190565b612514611d9a565b6124d5565b9035907fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe181360301821215610359570180359067ffffffffffffffff821161035957602001918160051b3603831361035957565b356108b781610422565b1561257e57565b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f4141393620696e76616c69642061676772656761746f720000000000000000006044820152fd5b90357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe18236030181121561035957016020813591019167ffffffffffffffff821161035957813603831361035957565b6108b7916126578161263d8461045c565b73ffffffffffffffffffffffffffffffffffffffff169052565b602082013560208201526126f26126a361268861267760408601866125dc565b610160806040880152860191612028565b61269560608601866125dc565b908583036060870152612028565b6080840135608084015260a084013560a084015260c084013560c084015260e084013560e084015261010080850135908401526101206126e5818601866125dc565b9185840390860152612028565b9161270361014091828101906125dc565b929091818503910152612028565b949391929083604087016040885252606086019360608160051b8801019482600090815b848310612754575050505050508460206108b795968503910152612028565b9091929394977fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa08b820301855288357ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffea1843603018112156127cf57600191846127bd920161262c565b98602090810196950193019190612735565b8280fd5b908092918237016000815290565b9290936108b796959260c0958552602085015265ffffffffffff8092166040850152166060830152151560808201528160a0820152019061208a565b1561282457565b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601760248201527f4141393220696e7465726e616c2063616c6c206f6e6c790000000000000000006044820152fd5b9060406108b79260008152816020820152019061208a565b6040906108b793928152816020820152019061208a565b909291925a936128c230331461281d565b8151946040860151955a6113886060830151890101116129e2576108b7966000958051612909575b50505090612903915a9003608084015101943691610682565b91615047565b612938916129349161292f855173ffffffffffffffffffffffffffffffffffffffff1690565b615c12565b1590565b612944575b80806128ea565b61290392919450612953615c24565b908151612967575b5050600193909161293d565b7f1c4fada7374c0a9ee8841fc38afe82932dc0f8e69012e927f061a8bae611a20173ffffffffffffffffffffffffffffffffffffffff6020870151926129d860206129c6835173ffffffffffffffffffffffffffffffffffffffff1690565b9201519560405193849316968361289a565b0390a3388061295b565b7fdeaddead0000000000000000000000000000000000000000000000000000000060005260206000fd5b612a22612a1c6040830183611fc8565b90615c07565b90612a33612a1c6060830183611fc8565b90612ae9612a48612a1c610120840184611fc8565b60405194859360208501956101008201359260e08301359260c08101359260a08201359260808301359273ffffffffffffffffffffffffffffffffffffffff60208201359135168c9693909a9998959261012098959273ffffffffffffffffffffffffffffffffffffffff6101408a019d168952602089015260408801526060870152608086015260a085015260c084015260e08301526101008201520152565b0391612b1b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0938481018352826105ab565b51902060408051602081019283523091810191909152466060820152608092830181529091612b4a90826105ab565b51902090565b604051906040820182811067ffffffffffffffff821117612b7b575b60405260006020838281520152565b612b83610505565b612b6c565b906014116103595790601490565b7fffffffffffffffffffffffffffffffffffffffff0000000000000000000000009035818116939260148110612bcb57505050565b60140360031b82901b16169150565b9060c060a06108b793805184526020810151602085015260408101511515604085015265ffffffffffff80606083015116606086015260808201511660808501520151918160a0820152019061208a565b9294612c8c61044d95612c7a610100959998612c68612c54602097610140808c528b0190612bda565b9b878a019060208091805184520151910152565b80516060890152602001516080880152565b805160a08701526020015160c0860152565b73ffffffffffffffffffffffffffffffffffffffff81511660e0850152015191019060208091805184520151910152565b612d0661044d94612cf4612cdf60a0959998969960e0865260e0860190612bda565b98602085019060208091805184520151910152565b80516060840152602001516080830152565b019060208091805184520151910152565b9081602091031261035957516108b781610422565b9160206108b7938181520191612028565b90612d6c73ffffffffffffffffffffffffffffffffffffffff916108b797959694606085526060850191612028565b941660208201526040818503910152612028565b60009060033d11612d8d57565b905060046000803e60005160e01c90565b600060443d106108b7576040517ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc91823d016004833e815167ffffffffffffffff918282113d602484011117612e1a57818401948551938411612e22573d85010160208487010111612e1a57506108b7929101602001906105ab565b949350505050565b50949350505050565b612e386040820182611fc8565b612e50612e448461256d565b93610120810190611fc8565b9290303b1561035957600093612e949160405196879586957f957122ab00000000000000000000000000000000000000000000000000000000875260048701612d3d565b0381305afa9081612f1d575b5061044d576001612eaf612d80565b6308c379a014612ec8575b612ec057565b61044d612183565b612ed0612d9e565b80612edc575b50612eba565b80516000925015612ed657610f21906040519182917f220266b600000000000000000000000000000000000000000000000000000000835260048301612882565b80610f48612f2a9261057b565b38612ea0565b9190612f3b9061317f565b73ffffffffffffffffffffffffffffffffffffffff929183166130da5761306c57612f659061317f565b9116612ffe57612f725750565b604080517f220266b600000000000000000000000000000000000000000000000000000000815260048101929092526024820152602160448201527f41413332207061796d61737465722065787069726564206f72206e6f7420647560648201527f6500000000000000000000000000000000000000000000000000000000000000608482015260a490fd5b610f21826040519182917f220266b600000000000000000000000000000000000000000000000000000000835260048301608091815260406020820152601460408201527f41413334207369676e6174757265206572726f7200000000000000000000000060608201520190565b610f21836040519182917f220266b600000000000000000000000000000000000000000000000000000000835260048301608091815260406020820152601760408201527f414132322065787069726564206f72206e6f742064756500000000000000000060608201520190565b610f21846040519182917f220266b600000000000000000000000000000000000000000000000000000000835260048301608091815260406020820152601460408201527f41413234207369676e6174757265206572726f7200000000000000000000000060608201520190565b9291906131549061317f565b909273ffffffffffffffffffffffffffffffffffffffff808095169116036130da5761306c57612f65905b80156131d25761318e9061535f565b73ffffffffffffffffffffffffffffffffffffffff65ffffffffffff8060408401511642119081156131c2575b5091511691565b90506020830151164210386131bb565b50600090600090565b156131e257565b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f41413934206761732076616c756573206f766572666c6f7700000000000000006044820152fd5b916000915a9381519061325382826136b3565b61325c81612a0c565b602084015261329a6effffffffffffffffffffffffffffff60808401516060850151176040850151176101008401359060e0850135171711156131db565b6132a382613775565b6132ae818584613836565b97906132df6129346132d4875173ffffffffffffffffffffffffffffffffffffffff1690565b60208801519061546c565b6133db576132ec43600052565b73ffffffffffffffffffffffffffffffffffffffff61332460a0606097015173ffffffffffffffffffffffffffffffffffffffff1690565b166133c1575b505a810360a0840135106133545760809360c092604087015260608601525a900391013501910152565b6040517f220266b600000000000000000000000000000000000000000000000000000000815280610f21600482016080906000815260406020820152601e60408201527f41413430206f76657220766572696669636174696f6e4761734c696d6974000060608201520190565b909350816133d2929750858461455c565b9590923861332a565b6040517f220266b600000000000000000000000000000000000000000000000000000000815280610f21600482016080906000815260406020820152601a60408201527f4141323520696e76616c6964206163636f756e74206e6f6e636500000000000060608201520190565b9290916000925a825161345b81846136b3565b61346483612a0c565b60208501526134a26effffffffffffffffffffffffffffff60808301516060840151176040840151176101008601359060e0870135171711156131db565b6134ab81613775565b6134b78186868b613ba2565b98906134e86129346134dd865173ffffffffffffffffffffffffffffffffffffffff1690565b60208701519061546c565b6135e0576134f543600052565b73ffffffffffffffffffffffffffffffffffffffff61352d60a0606096015173ffffffffffffffffffffffffffffffffffffffff1690565b166135c5575b505a840360a08601351061355f5750604085015260608401526080919060c0905a900391013501910152565b604080517f220266b600000000000000000000000000000000000000000000000000000000815260048101929092526024820152601e60448201527f41413430206f76657220766572696669636174696f6e4761734c696d697400006064820152608490fd5b909250816135d79298508686856147ef565b96909138613533565b610f21826040519182917f220266b600000000000000000000000000000000000000000000000000000000835260048301608091815260406020820152601a60408201527f4141323520696e76616c6964206163636f756e74206e6f6e636500000000000060608201520190565b1561365557565b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f4141393320696e76616c6964207061796d6173746572416e64446174610000006044820152fd5b613725906136dd6136c38261256d565b73ffffffffffffffffffffffffffffffffffffffff168452565b602081013560208401526080810135604084015260a0810135606084015260c0810135608084015260e081013560c084015261010081013560e0840152610120810190611fc8565b90811561376a5761374f61124c6112468460a09461374a601461044d9998101561364e565b612b88565b73ffffffffffffffffffffffffffffffffffffffff16910152565b505060a06000910152565b60a081015173ffffffffffffffffffffffffffffffffffffffff16156137b75760c060035b60ff60408401519116606084015102016080830151019101510290565b60c0600161379a565b6137d86040929594939560608352606083019061262c565b9460208201520152565b9061044d602f60405180947f414132332072657665727465643a20000000000000000000000000000000000060208301526138268151809260208686019101612067565b810103600f8101855201836105ab565b916000926000925a936139046020835193613865855173ffffffffffffffffffffffffffffffffffffffff1690565b9561387d6138766040830183611fc8565b9084613e0d565b60a086015173ffffffffffffffffffffffffffffffffffffffff16906138a243600052565b85809373ffffffffffffffffffffffffffffffffffffffff809416159889613b3a575b60600151908601516040517f3a871cdd0000000000000000000000000000000000000000000000000000000081529788968795869390600485016137c0565b03938a1690f1829181613b1a575b50613b115750600190613923612d80565b6308c379a014613abd575b50613a50575b613941575b50505a900391565b61396b9073ffffffffffffffffffffffffffffffffffffffff166000526000602052604060002090565b613986610a2c82546dffffffffffffffffffffffffffff1690565b8083116139e3576139dc926dffffffffffffffffffffffffffff9103166dffffffffffffffffffffffffffff167fffffffffffffffffffffffffffffffffffff0000000000000000000000000000825416179055565b3880613939565b6040517f220266b600000000000000000000000000000000000000000000000000000000815280610f21600482016080906000815260406020820152601760408201527f41413231206469646e2774207061792070726566756e6400000000000000000060608201520190565b6040517f220266b600000000000000000000000000000000000000000000000000000000815280610f21600482016080906000815260406020820152601660408201527f4141323320726576657274656420286f72204f4f47290000000000000000000060608201520190565b613ac5612d9e565b9081613ad1575061392e565b610f2191613adf91506137e2565b6040519182917f220266b600000000000000000000000000000000000000000000000000000000835260048301612882565b95506139349050565b613b3391925060203d81116123385761232981836105ab565b9038613912565b9450613b80610a2c613b6c8c73ffffffffffffffffffffffffffffffffffffffff166000526000602052604060002090565b546dffffffffffffffffffffffffffff1690565b8b811115613b975750856060835b969150506138c5565b606087918d03613b8e565b90926000936000935a94613beb6020835193613bd2855173ffffffffffffffffffffffffffffffffffffffff1690565b9561387d613be36040830183611fc8565b90848c61412b565b03938a1690f1829181613ded575b50613de45750600190613c0a612d80565b6308c379a014613d8e575b50613d20575b613c29575b5050505a900391565b613c539073ffffffffffffffffffffffffffffffffffffffff166000526000602052604060002090565b91613c6f610a2c84546dffffffffffffffffffffffffffff1690565b90818311613cba575082547fffffffffffffffffffffffffffffffffffff0000000000000000000000000000169190036dffffffffffffffffffffffffffff16179055388080613c20565b604080517f220266b600000000000000000000000000000000000000000000000000000000815260048101929092526024820152601760448201527f41413231206469646e2774207061792070726566756e640000000000000000006064820152608490fd5b610f21846040519182917f220266b600000000000000000000000000000000000000000000000000000000835260048301608091815260406020820152601660408201527f4141323320726576657274656420286f72204f4f47290000000000000000000060608201520190565b613d96612d9e565b9081613da25750613c15565b8691613dae91506137e2565b90610f216040519283927f220266b60000000000000000000000000000000000000000000000000000000084526004840161289a565b9650613c1b9050565b613e0691925060203d81116123385761232981836105ab565b9038613bf9565b909180613e1957505050565b81515173ffffffffffffffffffffffffffffffffffffffff1692833b6140be57606083510151604051907f570e1a3600000000000000000000000000000000000000000000000000000000825260208280613e78878760048401612d2c565b0381600073ffffffffffffffffffffffffffffffffffffffff95867f0000000000000000000000007fc98430eaedbb6070b35b39d7987250490883481690f19182156140b1575b600092614091575b508082169586156140245716809503613fb7573b15613f4a5761124c6112467fd51a9c61267aa6196961883ecf5ff2da6619c37dac0fa92122513fb32c032d2d93613f1193612b88565b602083810151935160a001516040805173ffffffffffffffffffffffffffffffffffffffff9485168152939091169183019190915290a3565b6040517f220266b600000000000000000000000000000000000000000000000000000000815280610f21600482016080906000815260406020820152602060408201527f4141313520696e6974436f6465206d757374206372656174652073656e64657260608201520190565b6040517f220266b600000000000000000000000000000000000000000000000000000000815280610f21600482016080906000815260406020820152602060408201527f4141313420696e6974436f6465206d7573742072657475726e2073656e64657260608201520190565b6040517f220266b600000000000000000000000000000000000000000000000000000000815280610f21600482016080906000815260406020820152601b60408201527f4141313320696e6974436f6465206661696c6564206f72204f4f47000000000060608201520190565b6140aa91925060203d811161146a5761145b81836105ab565b9038613ec7565b6140b9612183565b613ebf565b6040517f220266b600000000000000000000000000000000000000000000000000000000815280610f21600482016080906000815260406020820152601f60408201527f414131302073656e64657220616c726561647920636f6e73747275637465640060608201520190565b9290918161413a575b50505050565b82515173ffffffffffffffffffffffffffffffffffffffff1693843b6143e257606084510151604051907f570e1a3600000000000000000000000000000000000000000000000000000000825260208280614199888860048401612d2c565b0381600073ffffffffffffffffffffffffffffffffffffffff95867f0000000000000000000000007fc98430eaedbb6070b35b39d7987250490883481690f19182156143d5575b6000926143b5575b5080821696871561434757168096036142d9573b15614273575061124c6112467fd51a9c61267aa6196961883ecf5ff2da6619c37dac0fa92122513fb32c032d2d9361423393612b88565b602083810151935160a001516040805173ffffffffffffffffffffffffffffffffffffffff9485168152939091169183019190915290a338808080614134565b604080517f220266b600000000000000000000000000000000000000000000000000000000815260048101929092526024820152602060448201527f4141313520696e6974436f6465206d757374206372656174652073656e6465726064820152608490fd5b610f21826040519182917f220266b600000000000000000000000000000000000000000000000000000000835260048301608091815260406020820152602060408201527f4141313420696e6974436f6465206d7573742072657475726e2073656e64657260608201520190565b610f21846040519182917f220266b600000000000000000000000000000000000000000000000000000000835260048301608091815260406020820152601b60408201527f4141313320696e6974436f6465206661696c6564206f72204f4f47000000000060608201520190565b6143ce91925060203d811161146a5761145b81836105ab565b90386141e8565b6143dd612183565b6141e0565b604080517f220266b600000000000000000000000000000000000000000000000000000000815260048101929092526024820152601f60448201527f414131302073656e64657220616c726561647920636f6e7374727563746564006064820152608490fd5b1561444f57565b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f4141343120746f6f206c6974746c6520766572696669636174696f6e476173006044820152fd5b919060408382031261035957825167ffffffffffffffff81116103595783019080601f83011215610359578151916144e483610639565b916144f260405193846105ab565b838352602084830101116103595760209261451291848085019101612067565b92015190565b9061044d602f60405180947f414133332072657665727465643a20000000000000000000000000000000000060208301526138268151809260208686019101612067565b93919260609460009460009380519261459b60a08a86015195614580888811614448565b015173ffffffffffffffffffffffffffffffffffffffff1690565b916145c68373ffffffffffffffffffffffffffffffffffffffff166000526000602052604060002090565b946145e2610a2c87546dffffffffffffffffffffffffffff1690565b968588106147825773ffffffffffffffffffffffffffffffffffffffff60208a98946146588a966dffffffffffffffffffffffffffff8b6146919e03166dffffffffffffffffffffffffffff167fffffffffffffffffffffffffffffffffffff0000000000000000000000000000825416179055565b015194604051998a98899788937ff465c77e000000000000000000000000000000000000000000000000000000008552600485016137c0565b0395169103f190818391849361475c575b506147555750506001906146b4612d80565b6308c379a014614733575b506146c657565b6040517f220266b600000000000000000000000000000000000000000000000000000000815280610f21600482016080906000815260406020820152601660408201527f4141333320726576657274656420286f72204f4f47290000000000000000000060608201520190565b61473b612d9e565b908161474757506146bf565b610f2191613adf9150614518565b9450925050565b90925061477b91503d8085833e61477381836105ab565b8101906144ad565b91386146a2565b6040517f220266b600000000000000000000000000000000000000000000000000000000815280610f21600482016080906000815260406020820152601e60408201527f41413331207061796d6173746572206465706f73697420746f6f206c6f77000060608201520190565b91949293909360609560009560009382519061481660a08b84015193614580848611614448565b936148418573ffffffffffffffffffffffffffffffffffffffff166000526000602052604060002090565b61485c610a2c82546dffffffffffffffffffffffffffff1690565b8781106149b7579273ffffffffffffffffffffffffffffffffffffffff60208a989693946146588a966dffffffffffffffffffffffffffff8d6148d69e9c9a03166dffffffffffffffffffffffffffff167fffffffffffffffffffffffffffffffffffff0000000000000000000000000000825416179055565b0395169103f1908183918493614999575b506149915750506001906148f9612d80565b6308c379a014614972575b5061490c5750565b604080517f220266b600000000000000000000000000000000000000000000000000000000815260048101929092526024820152601660448201527f4141333320726576657274656420286f72204f4f4729000000000000000000006064820152608490fd5b61497a612d9e565b90816149865750614904565b613dae925050614518565b955093505050565b9092506149b091503d8085833e61477381836105ab565b91386148e7565b610f218a6040519182917f220266b600000000000000000000000000000000000000000000000000000000835260048301608091815260406020820152601e60408201527f41413331207061796d6173746572206465706f73697420746f6f206c6f77000060608201520190565b60031115614a2f57565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b929190614a7c6040916002865260606020870152606086019061208a565b930152565b939291906003811015614a2f57604091614a7c91865260606020870152606086019061208a565b9061044d603660405180947f4141353020706f73744f702072657665727465643a20000000000000000000006020830152614aec8151809260208686019101612067565b81010360168101855201836105ab565b929190925a93600091805191614b1183615318565b9260a0810195614b35875173ffffffffffffffffffffffffffffffffffffffff1690565b73ffffffffffffffffffffffffffffffffffffffff93908481169081614ca457505050614b76825173ffffffffffffffffffffffffffffffffffffffff1690565b985b5a90030193840297604084019089825110614c37577f49628fd1471006c1482da88028e9ce4dbb080b815c9b0344d39e5a8e6ec1419f94614bc26020928c614c329551039061553a565b015194896020614c04614be9865173ffffffffffffffffffffffffffffffffffffffff1690565b9a5173ffffffffffffffffffffffffffffffffffffffff1690565b9401519785604051968796169a16988590949392606092608083019683521515602083015260408201520152565b0390a4565b6040517f220266b600000000000000000000000000000000000000000000000000000000815280610f21600482016080906000815260406020820152602060408201527f414135312070726566756e642062656c6f772061637475616c476173436f737460608201520190565b9a918051614cb4575b5050614b78565b6060850151600099509091803b15614ddb579189918983614d07956040518097819682957fa9a234090000000000000000000000000000000000000000000000000000000084528c029060048401614a5e565b0393f19081614dc8575b50614dc3576001614d20612d80565b6308c379a014614da4575b614d37575b3880614cad565b6040517f220266b600000000000000000000000000000000000000000000000000000000815280610f21600482016080906000815260406020820152601260408201527f4141353020706f73744f7020726576657274000000000000000000000000000060608201520190565b614dac612d9e565b80614db75750614d2b565b613adf610f2191614aa8565b614d30565b80610f48614dd59261057b565b38614d11565b8980fd5b9392915a90600092805190614df382615318565b9360a0830196614e17885173ffffffffffffffffffffffffffffffffffffffff1690565b73ffffffffffffffffffffffffffffffffffffffff95908681169081614f0d57505050614e58845173ffffffffffffffffffffffffffffffffffffffff1690565b915b5a9003019485029860408301908a825110614ea757507f49628fd1471006c1482da88028e9ce4dbb080b815c9b0344d39e5a8e6ec1419f949392614bc2614c32938c60209451039061553a565b604080517f220266b600000000000000000000000000000000000000000000000000000000815260048101929092526024820152602060448201527f414135312070726566756e642062656c6f772061637475616c476173436f73746064820152608490fd5b93918051614f1d575b5050614e5a565b606087015160009a509091803b1561504357918a918a83614f70956040518097819682957fa9a234090000000000000000000000000000000000000000000000000000000084528c029060048401614a5e565b0393f19081615030575b5061502b576001614f89612d80565b6308c379a01461500e575b614fa0575b3880614f16565b610f218b6040519182917f220266b600000000000000000000000000000000000000000000000000000000835260048301608091815260406020820152601260408201527f4141353020706f73744f7020726576657274000000000000000000000000000060608201520190565b615016612d9e565b806150215750614f94565b613dae8d91614aa8565b614f99565b80610f4861503d9261057b565b38614f7a565b8a80fd5b909392915a9480519161505983615318565b9260a081019561507d875173ffffffffffffffffffffffffffffffffffffffff1690565b73ffffffffffffffffffffffffffffffffffffffff938185169182615165575050506150bd825173ffffffffffffffffffffffffffffffffffffffff1690565b985b5a90030193840297604084019089825110614c37577f49628fd1471006c1482da88028e9ce4dbb080b815c9b0344d39e5a8e6ec1419f946151096020928c614c329551039061553a565b61511288614a25565b015194896020615139614be9865173ffffffffffffffffffffffffffffffffffffffff1690565b940151604080519182529815602082015297880152606087015290821695909116939081906080820190565b9a918151615175575b50506150bf565b8784026151818a614a25565b60028a1461520c576060860151823b15610359576151d493600080948d604051978896879586937fa9a2340900000000000000000000000000000000000000000000000000000000855260048501614a81565b0393f180156151ff575b6151ec575b505b388061516e565b80610f486151f99261057b565b386151e3565b615207612183565b6151de565b6060860151823b156103595761525793600080948d604051978896879586937fa9a2340900000000000000000000000000000000000000000000000000000000855260048501614a81565b0393f19081615305575b50615300576001615270612d80565b6308c379a0146152ed575b156151e5576040517f220266b600000000000000000000000000000000000000000000000000000000815280610f21600482016080906000815260406020820152601260408201527f4141353020706f73744f7020726576657274000000000000000000000000000060608201520190565b6152f5612d9e565b80614db7575061527b565b6151e5565b80610f486153129261057b565b38615261565b60e060c082015191015180821461533c57480180821015615337575090565b905090565b5090565b6040519061534d8261058f565b60006040838281528260208201520152565b615367615340565b5065ffffffffffff808260a01c1680156153b3575b604051926153898461058f565b73ffffffffffffffffffffffffffffffffffffffff8116845260d01c602084015216604082015290565b508061537c565b6153cf6153d5916153c9615340565b5061535f565b9161535f565b9073ffffffffffffffffffffffffffffffffffffffff9182825116928315615461575b65ffffffffffff928391826040816020850151169301511693836040816020840151169201511690808410615459575b50808511615451575b506040519561543f8761058f565b16855216602084015216604082015290565b935038615431565b925038615428565b8151811693506153f8565b73ffffffffffffffffffffffffffffffffffffffff16600052600160205267ffffffffffffffff6154c88260401c60406000209077ffffffffffffffffffffffffffffffffffffffffffffffff16600052602052604060002090565b918254926154d584612491565b9055161490565b9073ffffffffffffffffffffffffffffffffffffffff6154fa612b50565b9216600052600060205263ffffffff600160406000206dffffffffffffffffffffffffffff815460781c1685520154166020830152565b61044d3361562b565b73ffffffffffffffffffffffffffffffffffffffff16600052600060205260406000206dffffffffffffffffffffffffffff8082541692830180931161561e575b8083116155c05761044d92166dffffffffffffffffffffffffffff167fffffffffffffffffffffffffffffffffffff0000000000000000000000000000825416179055565b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601060248201527f6465706f736974206f766572666c6f77000000000000000000000000000000006044820152fd5b615626612190565b61557b565b73ffffffffffffffffffffffffffffffffffffffff9061564b348261553a565b168060005260006020527f2da466a7b24304f47e87fa2e1e5a81b9831ce54fec19055ce277ca2f39ba42c460206dffffffffffffffffffffffffffff60406000205416604051908152a2565b1561569e57565b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601a60248201527f6d757374207370656369667920756e7374616b652064656c61790000000000006044820152fd5b1561570357565b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f63616e6e6f7420646563726561736520756e7374616b652074696d65000000006044820152fd5b1561576857565b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f6e6f207374616b652073706563696669656400000000000000000000000000006044820152fd5b156157cd57565b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152600e60248201527f7374616b65206f766572666c6f770000000000000000000000000000000000006044820152fd5b9065ffffffffffff6080600161044d9461588b6dffffffffffffffffffffffffffff86511682906dffffffffffffffffffffffffffff167fffffffffffffffffffffffffffffffffffff0000000000000000000000000000825416179055565b602085015115156eff000000000000000000000000000082549160701b16807fffffffffffffffffffffffffffffffffff00ffffffffffffffffffffffffffff83161783557fffffff000000000000000000000000000000ffffffffffffffffffffffffffff7cffffffffffffffffffffffffffff000000000000000000000000000000604089015160781b16921617178155019263ffffffff6060820151167fffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000008554161784550151167fffffffffffffffffffffffffffffffffffffffffffff000000000000ffffffff69ffffffffffff0000000083549260201b169116179055565b1561599657565b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f616c726561647920756e7374616b696e670000000000000000000000000000006044820152fd5b91909165ffffffffffff808094169116019182116121cd57565b15615a1557565b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601460248201527f4e6f207374616b6520746f2077697468647261770000000000000000000000006044820152fd5b15615a7a57565b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f6d7573742063616c6c20756e6c6f636b5374616b6528292066697273740000006044820152fd5b15615adf57565b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601b60248201527f5374616b65207769746864726177616c206973206e6f742064756500000000006044820152fd5b15615b4457565b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f6661696c656420746f207769746864726177207374616b6500000000000000006044820152fd5b15615ba957565b60646040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601260248201527f6661696c656420746f20776974686472617700000000000000000000000000006044820152fd5b816040519182372090565b9060009283809360208451940192f190565b3d610800808211615c4b575b50604051906020818301016040528082526000602083013e90565b905038615c3056fea2646970667358221220a706d8b02d7086d80e9330811f5af84b2614abdc5e9a1f2260126070a31d7cee64736f6c63430008110033 \ No newline at end of file diff --git a/crates/types/contracts/foundry.toml b/crates/types/contracts/foundry.toml index baea869e..52cafd5d 100644 --- a/crates/types/contracts/foundry.toml +++ b/crates/types/contracts/foundry.toml @@ -4,16 +4,12 @@ out = 'out' libs = ['lib'] test = 'test' cache_path = 'cache' +solc_version = '0.8.26' cbor_metadata = true remappings = [ 'forge-std/=lib/forge-std/src', 'ds-test/=lib/forge-std/lib/ds-test/src/', - 'account-abstraction/=lib/account-abstraction/contracts/', - '@openzeppelin/=lib/openzeppelin-contracts/' + 'account-abstraction/v0_6=lib/account-abstraction-versions/v0_6/contracts/', + 'account-abstraction/v0_7=lib/account-abstraction-versions/v0_7/contracts/', ] - -[etherscan] -unknown_chain = { key = "Zh6Ck2RK9cewB2bOIXBAvXdpkVhLUjYz", chain=28882, url="https://api.tenderly.co/api/v1/account/mmontour1306/project/aa-hc-sepolia/etherscan/verify/network/28882/public" } -#unknown_chain = { key = "Zh6Ck2RK9cewB2bOIXBAvXdpkVhLUjYz", chain=28882, url="https://boba-sepolia.gateway.tenderly.co/27jDOLrTf7Wu4ubQGL3tY" } - diff --git a/crates/types/contracts/hc_scripts/DeployHybridAccount.sol b/crates/types/contracts/hc_scripts/DeployHybridAccount.sol index 45e3cb04..e96df8cd 100644 --- a/crates/types/contracts/hc_scripts/DeployHybridAccount.sol +++ b/crates/types/contracts/hc_scripts/DeployHybridAccount.sol @@ -2,10 +2,10 @@ pragma solidity ^0.8.13; import "forge-std/Script.sol"; -import "lib/account-abstraction/contracts/core/EntryPoint.sol"; -import "lib/account-abstraction/contracts/core/HCHelper.sol"; -import "lib/account-abstraction/contracts/samples/HybridAccountFactory.sol"; -import "lib/account-abstraction/contracts/samples/SimpleAccountFactory.sol"; +import "lib/account-abstraction-versions/v0_6/contracts/core/EntryPoint.sol"; +import "lib/account-abstraction-versions/v0_6/contracts/core/HCHelper.sol"; +import "lib/account-abstraction-versions/v0_6/contracts/samples/HybridAccountFactory.sol"; +import "lib/account-abstraction-versions/v0_6/contracts/samples/SimpleAccountFactory.sol"; contract LocalDeploy is Script { function run() external diff --git a/crates/types/contracts/hc_scripts/ExampleDeploy.s.sol b/crates/types/contracts/hc_scripts/ExampleDeploy.s.sol index 4c2b7404..2bf652be 100644 --- a/crates/types/contracts/hc_scripts/ExampleDeploy.s.sol +++ b/crates/types/contracts/hc_scripts/ExampleDeploy.s.sol @@ -2,14 +2,14 @@ pragma solidity ^0.8.13; import "forge-std/Script.sol"; -import "lib/account-abstraction/contracts/samples/HybridAccount.sol"; -import "lib/account-abstraction/contracts/test/TestAuctionSystem.sol"; -import "lib/account-abstraction/contracts/test/TestCaptcha.sol"; -import "lib/account-abstraction/contracts/test/TestCounter.sol"; -import "lib/account-abstraction/contracts/test/TestRainfallInsurance.sol"; -import "lib/account-abstraction/contracts/test/TestSportsBetting.sol"; -import "lib/account-abstraction/contracts/test/TestKyc.sol"; -import "lib/account-abstraction/contracts/test/TestTokenPrice.sol"; +import "lib/account-abstraction-versions/v0_6/contracts/samples/HybridAccount.sol"; +import "lib/account-abstraction-versions/v0_6/contracts/test/TestAuctionSystem.sol"; +import "lib/account-abstraction-versions/v0_6/contracts/test/TestCaptcha.sol"; +import "lib/account-abstraction-versions/v0_6/contracts/test/TestCounter.sol"; +import "lib/account-abstraction-versions/v0_6/contracts/test/TestRainfallInsurance.sol"; +import "lib/account-abstraction-versions/v0_6/contracts/test/TestSportsBetting.sol"; +import "lib/account-abstraction-versions/v0_6/contracts/test/TestKyc.sol"; +import "lib/account-abstraction-versions/v0_6/contracts/test/TestTokenPrice.sol"; contract LocalDeploy is Script { function run() external diff --git a/crates/types/contracts/hc_scripts/LocalDeploy.s.sol b/crates/types/contracts/hc_scripts/LocalDeploy.s.sol index 6d7ea657..ae6c9b09 100644 --- a/crates/types/contracts/hc_scripts/LocalDeploy.s.sol +++ b/crates/types/contracts/hc_scripts/LocalDeploy.s.sol @@ -2,10 +2,10 @@ pragma solidity ^0.8.13; import "forge-std/Script.sol"; -import "lib/account-abstraction/contracts/core/EntryPoint.sol"; -import "lib/account-abstraction/contracts/core/HCHelper.sol"; -import "lib/account-abstraction/contracts/samples/HybridAccountFactory.sol"; -import "lib/account-abstraction/contracts/samples/SimpleAccountFactory.sol"; +import "lib/account-abstraction-versions/v0_6/contracts/core/EntryPoint.sol"; +import "lib/account-abstraction-versions/v0_6/contracts/core/HCHelper.sol"; +import "lib/account-abstraction-versions/v0_6/contracts/samples/HybridAccountFactory.sol"; +import "lib/account-abstraction-versions/v0_6/contracts/samples/SimpleAccountFactory.sol"; contract LocalDeploy is Script { function run() external diff --git a/crates/types/contracts/lib/account-abstraction b/crates/types/contracts/lib/account-abstraction-versions/v0_6 similarity index 100% rename from crates/types/contracts/lib/account-abstraction rename to crates/types/contracts/lib/account-abstraction-versions/v0_6 diff --git a/crates/types/contracts/lib/account-abstraction-versions/v0_7 b/crates/types/contracts/lib/account-abstraction-versions/v0_7 new file mode 160000 index 00000000..7af70c89 --- /dev/null +++ b/crates/types/contracts/lib/account-abstraction-versions/v0_7 @@ -0,0 +1 @@ +Subproject commit 7af70c8993a6f42973f520ae0752386a5032abe7 diff --git a/crates/types/contracts/lib/openzeppelin-contracts b/crates/types/contracts/lib/openzeppelin-contracts deleted file mode 160000 index 0a25c194..00000000 --- a/crates/types/contracts/lib/openzeppelin-contracts +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 0a25c1940ca220686588c4af3ec526f725fe2582 diff --git a/crates/types/contracts/lib/openzeppelin-contracts-versions/v4_9 b/crates/types/contracts/lib/openzeppelin-contracts-versions/v4_9 new file mode 160000 index 00000000..dc44c9f1 --- /dev/null +++ b/crates/types/contracts/lib/openzeppelin-contracts-versions/v4_9 @@ -0,0 +1 @@ +Subproject commit dc44c9f1a4c3b10af99492eed84f83ed244203f6 diff --git a/crates/types/contracts/lib/openzeppelin-contracts-versions/v5_0 b/crates/types/contracts/lib/openzeppelin-contracts-versions/v5_0 new file mode 160000 index 00000000..dbb6104c --- /dev/null +++ b/crates/types/contracts/lib/openzeppelin-contracts-versions/v5_0 @@ -0,0 +1 @@ +Subproject commit dbb6104ce834628e473d2173bbc9d47f81a9eec3 diff --git a/crates/types/contracts/src/GetGasUsed.sol b/crates/types/contracts/src/GetGasUsed.sol deleted file mode 100644 index 74309768..00000000 --- a/crates/types/contracts/src/GetGasUsed.sol +++ /dev/null @@ -1,24 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0 -pragma solidity ^0.8.19; - -// Not intended to be deployed on-chain.. Instead, using a call to simulate -// deployment will revert with an error containing the desired result. - -contract GetGasUsed { - error GasUsedResult(uint256 gasUsed, bool success, bytes result); - - constructor(address target, uint256 value, bytes memory data) { - (uint256 gasUsed, bool success, bytes memory result) = getGas(target, value, data); - revert GasUsedResult(gasUsed, success, result); - } - - function getGas( - address target, - uint256 value, - bytes memory data - ) public returns (uint256, bool, bytes memory) { - uint256 preGas = gasleft(); - (bool success, bytes memory result) = target.call{value : value}(data); - return (preGas - gasleft(), success, result); - } -} diff --git a/crates/types/contracts/src/PaymasterHelper.sol b/crates/types/contracts/src/PaymasterHelper.sol deleted file mode 100644 index 58547045..00000000 --- a/crates/types/contracts/src/PaymasterHelper.sol +++ /dev/null @@ -1,26 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-only -pragma solidity ^0.8.12; - -import "account-abstraction/interfaces/IStakeManager.sol"; - -contract PaymasterHelper { - IStakeManager public stakeManager; - - constructor(address _stakeManagerAddress) { - stakeManager = IStakeManager(_stakeManagerAddress); - } - - function getBalances(address[] calldata addresses) external view returns (uint256[] memory) { - uint256[] memory balances = new uint256[](addresses.length); - - for (uint256 i = 0; i < addresses.length; i++) { - balances[i] = stakeManager.balanceOf(addresses[i]); - } - - return balances; - } - - function getDepositInfo(address account) external view returns (IStakeManager.DepositInfo memory info) { - return stakeManager.getDepositInfo(account); - } -} diff --git a/crates/types/contracts/src/imports.sol b/crates/types/contracts/src/imports.sol deleted file mode 100644 index 83f6ba59..00000000 --- a/crates/types/contracts/src/imports.sol +++ /dev/null @@ -1,12 +0,0 @@ -// SPDX-License-Identifier: UNLICENSED -pragma solidity ^0.8.13; - -// Simply importing a dependency is enough for Forge to include it in builds. - -import "account-abstraction/samples/SimpleAccount.sol"; -import "account-abstraction/samples/SimpleAccountFactory.sol"; -import "account-abstraction/samples/VerifyingPaymaster.sol"; -import "account-abstraction/core/EntryPoint.sol"; -import "account-abstraction/interfaces/IAggregator.sol"; -import "account-abstraction/interfaces/IStakeManager.sol"; -import "account-abstraction/core/HCHelper.sol"; diff --git a/crates/types/contracts/src/utils/CallGasEstimationProxyTypes.sol b/crates/types/contracts/src/utils/CallGasEstimationProxyTypes.sol new file mode 100644 index 00000000..5f466f8c --- /dev/null +++ b/crates/types/contracts/src/utils/CallGasEstimationProxyTypes.sol @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity ^0.8.13; + +error EstimateCallGasResult(uint256 gasEstimate, uint256 numRounds); + +error EstimateCallGasContinuation(uint256 minGas, uint256 maxGas, uint256 numRounds); + +error EstimateCallGasRevertAtMax(bytes revertData); + +error TestCallGasResult(bool success, uint256 gasUsed, bytes revertData); + +// keccak("CallGasEstimationProxy")[:20] +// Don't use an immutable constant. We want the "deployedBytecode" in +// the generated JSON to contain this constant. +address constant IMPLEMENTATION_ADDRESS_MARKER = 0xA13dB4eCfbce0586E57D1AeE224FbE64706E8cd3; diff --git a/crates/types/contracts/src/GetCodeHashes.sol b/crates/types/contracts/src/utils/GetCodeHashes.sol similarity index 100% rename from crates/types/contracts/src/GetCodeHashes.sol rename to crates/types/contracts/src/utils/GetCodeHashes.sol diff --git a/crates/types/contracts/src/utils/GetGasUsed.sol b/crates/types/contracts/src/utils/GetGasUsed.sol new file mode 100644 index 00000000..5585abbf --- /dev/null +++ b/crates/types/contracts/src/utils/GetGasUsed.sol @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-3.0 +pragma solidity ^0.8.19; + +// Not intended to be deployed on-chain.. Instead use state overrides to deploy the bytecode and call it. + +contract GetGasUsed { + struct GasUsedResult { + uint256 gasUsed; + bool success; + bytes result; + } + + /** + * Contract should not be deployed + */ + constructor() { + require(block.number < 100, "should not be deployed"); + } + + function getGas( + address target, + uint256 value, + bytes memory data + ) public returns (GasUsedResult memory) { + uint256 preGas = gasleft(); + (bool success, bytes memory result) = target.call{value : value}(data); + return GasUsedResult({ + gasUsed: preGas - gasleft(), + success: success, + result: result + }); + } +} diff --git a/crates/types/contracts/src/utils/StorageLoader.sol b/crates/types/contracts/src/utils/StorageLoader.sol new file mode 100644 index 00000000..1cdab1d1 --- /dev/null +++ b/crates/types/contracts/src/utils/StorageLoader.sol @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity ^0.8.25; + +contract StorageLoader { + fallback() external payable { + assembly { + let cursor := 0 + + for {} lt(cursor, calldatasize()) {cursor := add(cursor, 0x20)} { + let slot := calldataload(cursor) + mstore(cursor, sload(slot)) + } + + return(0, cursor) + } + } +} diff --git a/crates/types/contracts/src/CallGasEstimationProxy.sol b/crates/types/contracts/src/v0_6/CallGasEstimationProxy.sol similarity index 72% rename from crates/types/contracts/src/CallGasEstimationProxy.sol rename to crates/types/contracts/src/v0_6/CallGasEstimationProxy.sol index 331c3173..d28765de 100644 --- a/crates/types/contracts/src/CallGasEstimationProxy.sol +++ b/crates/types/contracts/src/v0_6/CallGasEstimationProxy.sol @@ -1,9 +1,10 @@ // SPDX-License-Identifier: UNLICENSED pragma solidity ^0.8.13; -import "account-abstraction/interfaces/IEntryPoint.sol"; -import "@openzeppelin/contracts/proxy/Proxy.sol"; -import "@openzeppelin/contracts/utils/math/Math.sol"; +import "openzeppelin-contracts-versions/v5_0/contracts/proxy/Proxy.sol"; +import "openzeppelin-contracts-versions/v5_0/contracts/utils/math/Math.sol"; + +import "../utils/CallGasEstimationProxyTypes.sol"; /** * Contract used in `eth_call`'s "overrides" parameter in order to estimate the @@ -36,17 +37,8 @@ import "@openzeppelin/contracts/utils/math/Math.sol"; contract CallGasEstimationProxy is Proxy { using Math for uint256; - function _implementation() - internal - pure - virtual - override - returns (address) - { - // keccak("CallGasEstimationProxy")[:20] - // Don't use an immutable constant. We want the "deployedBytecode" in - // the generated JSON to contain this constant. - return 0xA13dB4eCfbce0586E57D1AeE224FbE64706E8cd3; + function _implementation() internal pure virtual override returns (address) { + return IMPLEMENTATION_ADDRESS_MARKER; } struct EstimateCallGasArgs { @@ -58,12 +50,6 @@ contract CallGasEstimationProxy is Proxy { bool isContinuation; } - error EstimateCallGasResult(uint256 gasEstimate, uint256 numRounds); - - error EstimateCallGasContinuation(uint256 minGas, uint256 maxGas, uint256 numRounds); - - error EstimateCallGasRevertAtMax(bytes revertData); - /** * Runs a binary search to find the smallest amount of gas at which the call * succeeds. @@ -94,21 +80,14 @@ contract CallGasEstimationProxy is Proxy { uint256 scaledGuess = 0; if (!args.isContinuation) { // Make one call at full gas to make sure success is even possible. - ( - bool success, - uint256 gasUsed, - bytes memory revertData - ) = innerCall(args.sender, args.callData, args.maxGas); + (bool success, uint256 gasUsed, bytes memory revertData) = + innerCall(args.sender, args.callData, args.maxGas); if (!success) { revert EstimateCallGasRevertAtMax(revertData); } scaledGuess = (gasUsed * 2) / args.rounding; } else { - scaledGuess = chooseGuess( - scaledMaxFailureGas, - scaledMinSuccessGas, - scaledGasUsedInSuccess - ); + scaledGuess = chooseGuess(scaledMaxFailureGas, scaledMinSuccessGas, scaledGasUsedInSuccess); } uint256 numRounds = 0; while (scaledMaxFailureGas + 1 < scaledMinSuccessGas) { @@ -119,37 +98,32 @@ contract CallGasEstimationProxy is Proxy { uint256 nextMax = scaledMinSuccessGas * args.rounding; revert EstimateCallGasContinuation(nextMin, nextMax, numRounds); } - (bool success, uint256 gasUsed, ) = innerCall( - args.sender, - args.callData, - guess - ); + (bool success, uint256 gasUsed,) = innerCall(args.sender, args.callData, guess); if (success) { - scaledGasUsedInSuccess = scaledGasUsedInSuccess.min( - gasUsed.ceilDiv(args.rounding) - ); + scaledGasUsedInSuccess = scaledGasUsedInSuccess.min(gasUsed.ceilDiv(args.rounding)); scaledMinSuccessGas = scaledGuess; } else { scaledMaxFailureGas = scaledGuess; } - scaledGuess = chooseGuess( - scaledMaxFailureGas, - scaledMinSuccessGas, - scaledGasUsedInSuccess - ); + scaledGuess = chooseGuess(scaledMaxFailureGas, scaledMinSuccessGas, scaledGasUsedInSuccess); } - revert EstimateCallGasResult( - args.maxGas.min(scaledMinSuccessGas * args.rounding), - numRounds - ); + revert EstimateCallGasResult(args.maxGas.min(scaledMinSuccessGas * args.rounding), numRounds); } - function chooseGuess( - uint256 highestFailureGas, - uint256 lowestSuccessGas, - uint256 lowestGasUsedInSuccess - ) private pure returns (uint256) { + /** + * A helper function for testing execution at a given gas limit. + */ + function testCallGas(address sender, bytes calldata callData, uint256 callGasLimit) external { + (bool success, uint256 gasUsed, bytes memory revertData) = innerCall(sender, callData, callGasLimit); + revert TestCallGasResult(success, gasUsed, revertData); + } + + function chooseGuess(uint256 highestFailureGas, uint256 lowestSuccessGas, uint256 lowestGasUsedInSuccess) + private + pure + returns (uint256) + { uint256 average = (highestFailureGas + lowestSuccessGas) / 2; if (lowestGasUsedInSuccess <= highestFailureGas) { // Handle pathological cases where the contract requires a lot of @@ -174,11 +148,10 @@ contract CallGasEstimationProxy is Proxy { error _InnerCallResult(bool success, uint256 gasUsed, bytes revertData); - function innerCall( - address sender, - bytes calldata callData, - uint256 gas - ) private returns (bool success, uint256 gasUsed, bytes memory revertData) { + function innerCall(address sender, bytes calldata callData, uint256 gas) + private + returns (bool success, uint256 gasUsed, bytes memory revertData) + { try this._innerCall(sender, callData, gas) { // Should never happen. _innerCall should always revert. revert(); @@ -187,21 +160,14 @@ contract CallGasEstimationProxy is Proxy { assembly { innerCallRevertData := add(innerCallRevertData, 0x04) } - (success, gasUsed, revertData) = abi.decode( - innerCallRevertData, - (bool, uint256, bytes) - ); + (success, gasUsed, revertData) = abi.decode(innerCallRevertData, (bool, uint256, bytes)); } } - function _innerCall( - address sender, - bytes calldata callData, - uint256 gas - ) external { + function _innerCall(address sender, bytes calldata callData, uint256 gas) external { uint256 preGas = gasleft(); (bool success, bytes memory data) = sender.call{gas: gas}(callData); - uint gasUsed = preGas - gasleft(); + uint256 gasUsed = preGas - gasleft(); bytes memory revertData = success ? bytes("") : data; revert _InnerCallResult(success, gasUsed, revertData); } diff --git a/crates/types/contracts/src/v0_6/GetBalances.sol b/crates/types/contracts/src/v0_6/GetBalances.sol new file mode 100644 index 00000000..e857bdb3 --- /dev/null +++ b/crates/types/contracts/src/v0_6/GetBalances.sol @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-3.0-only +pragma solidity ^0.8.12; + +import "account-abstraction/v0_6/interfaces/IStakeManager.sol"; + +contract GetBalances { + error GetBalancesResult(uint256[] balances); + + constructor(address stakeManager, address[] memory addresses) { + revert GetBalancesResult(getBalancesHelper(stakeManager, addresses)); + } + + function getBalancesHelper(address stakeManager, address[] memory addresses) public view returns (uint256[] memory) { + uint256[] memory balances = new uint256[](addresses.length); + IStakeManager istakeManager = IStakeManager(stakeManager); + + for (uint256 i = 0; i < addresses.length; i++) { + balances[i] = istakeManager.balanceOf(addresses[i]); + } + + return balances; + } +} diff --git a/crates/types/contracts/src/PrecompileAccount.sol b/crates/types/contracts/src/v0_6/PrecompileAccount.sol similarity index 94% rename from crates/types/contracts/src/PrecompileAccount.sol rename to crates/types/contracts/src/v0_6/PrecompileAccount.sol index 31165850..3c037204 100644 --- a/crates/types/contracts/src/PrecompileAccount.sol +++ b/crates/types/contracts/src/v0_6/PrecompileAccount.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-3.0 pragma solidity ^0.8.13; -import "account-abstraction/interfaces/IAccount.sol"; +import "account-abstraction/v0_6/interfaces/IAccount.sol"; contract PrecompileAccount is IAccount { address public precompile; diff --git a/crates/types/contracts/src/v0_6/imports.sol b/crates/types/contracts/src/v0_6/imports.sol new file mode 100644 index 00000000..49c96875 --- /dev/null +++ b/crates/types/contracts/src/v0_6/imports.sol @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity ^0.8.13; + +// Simply importing a dependency is enough for Forge to include it in builds. + +import "account-abstraction/v0_6/samples/SimpleAccount.sol"; +import "account-abstraction/v0_6/samples/SimpleAccountFactory.sol"; +import "account-abstraction/v0_6/samples/VerifyingPaymaster.sol"; +import "account-abstraction/v0_6/interfaces/IEntryPoint.sol"; +import "account-abstraction/v0_6/interfaces/IAggregator.sol"; +import "account-abstraction/v0_6/interfaces/IStakeManager.sol"; +import "account-abstraction/v0_6/core/HCHelper.sol"; diff --git a/crates/types/contracts/src/v0_7/CallGasEstimationProxy.sol b/crates/types/contracts/src/v0_7/CallGasEstimationProxy.sol new file mode 100644 index 00000000..22283f7d --- /dev/null +++ b/crates/types/contracts/src/v0_7/CallGasEstimationProxy.sol @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity ^0.8.13; + +import "openzeppelin-contracts-versions/v5_0/contracts/proxy/Proxy.sol"; +import "openzeppelin-contracts-versions/v5_0/contracts/utils/math/Math.sol"; +import "account-abstraction/v0_7/interfaces/IAccountExecute.sol"; +import "account-abstraction/v0_7/interfaces/PackedUserOperation.sol"; +import "account-abstraction/v0_7/interfaces/IEntryPoint.sol"; + +import "../utils/CallGasEstimationProxyTypes.sol"; + +/** + * Contract used in `eth_call`'s "overrides" parameter in order to estimate the + * required `callGasLimit` for a user operation. + * + * This contract is solving the problem that the entry point's + * `simulateHandleOp` doesn't return whether the op's call succeeded, thus + * making it impossible to use directly for trying call gas limits to see if + * they work. We could call the sender directly with its call data, but that + * fails because we do need to run the validation step first, as it may cause + * changes to the sender's state or even deploy the sender in the first place. + * We can use `simulateHandleOp`s optional `target` and `targetData` parameters + * to run code after the validation step, but we need to watch out for the + * restriction that a typical sender will reject calls not coming from the + * entry point address. + * + * The solution is to create a proxy contract which delegates to the entry point + * but also exposes a method for estimating call gas by binary searching. + * We then call `simulateHandleOp` on this contract and use `target` and + * `targetData` to have this contract call itself to run a binary search to + * discover the call gas estimate. Thus when we call `simulateHandleOp`, we call + * it on this contract, using `eth_call`s overrides to move the original entry + * point code to a different address, then putting this contract's code at the + * original entry point address and having it's proxy target be the address to + * which we moved the entry point code. + * + * Note that this contract is never deployed. It is only used for its compiled + * bytecode, which is passed as an override in `eth_call`. + */ +contract CallGasEstimationProxy is Proxy { + using Math for uint256; + + function _implementation() internal pure virtual override returns (address) { + return IMPLEMENTATION_ADDRESS_MARKER; + } + + struct EstimateCallGasArgs { + PackedUserOperation userOp; + uint256 minGas; + uint256 maxGas; + uint256 rounding; + bool isContinuation; + } + + /** + * Runs a binary search to find the smallest amount of gas at which the call + * succeeds. + * + * Always reverts with its result, which is one of the following: + * + * - The successful gas estimate + * - That the call fails even with max gas + * - A new min and max gas to be used in a follow-up call, if we ran out of + * gas before completing the binary search. + * + * Takes a `rounding` parameter which rounds all guesses and the final + * result to a multiple of that parameter. + * + * As an optimization, if a round of binary search just completed + * successfully and used N gas, then the next round will try 2N gas if it's + * lower than the next (low + high) / 2 guess. This helps us quickly narrow + * down the common case where the gas needed is much smaller than the + * initial upper bound. + */ + function estimateCallGas(EstimateCallGasArgs calldata args) external { + // Will only be violated if the op is doing shinanigans where it tries + // to call this method on the entry point to throw off gas estimates. + require(msg.sender == address(this)); + uint256 scaledMaxFailureGas = args.minGas / args.rounding; + uint256 scaledMinSuccessGas = args.maxGas.ceilDiv(args.rounding); + uint256 scaledGasUsedInSuccess = scaledMinSuccessGas; + uint256 scaledGuess = 0; + bytes32 userOpHash = _getUserOpHashInternal(args.userOp); + if (!args.isContinuation) { + // Make one call at full gas to make sure success is even possible. + (bool success, uint256 gasUsed, bytes memory revertData) = innerCall(args.userOp, userOpHash, args.maxGas); + if (!success) { + revert EstimateCallGasRevertAtMax(revertData); + } + scaledGuess = (gasUsed * 2) / args.rounding; + } else { + scaledGuess = chooseGuess(scaledMaxFailureGas, scaledMinSuccessGas, scaledGasUsedInSuccess); + } + uint256 numRounds = 0; + while (scaledMaxFailureGas + 1 < scaledMinSuccessGas) { + numRounds++; + uint256 guess = scaledGuess * args.rounding; + if (!isEnoughGasForGuess(guess)) { + uint256 nextMin = scaledMaxFailureGas * args.rounding; + uint256 nextMax = scaledMinSuccessGas * args.rounding; + revert EstimateCallGasContinuation(nextMin, nextMax, numRounds); + } + (bool success, uint256 gasUsed,) = innerCall(args.userOp, userOpHash, guess); + if (success) { + scaledGasUsedInSuccess = scaledGasUsedInSuccess.min(gasUsed.ceilDiv(args.rounding)); + scaledMinSuccessGas = scaledGuess; + } else { + scaledMaxFailureGas = scaledGuess; + } + + scaledGuess = chooseGuess(scaledMaxFailureGas, scaledMinSuccessGas, scaledGasUsedInSuccess); + } + revert EstimateCallGasResult(args.maxGas.min(scaledMinSuccessGas * args.rounding), numRounds); + } + + /** + * A helper function for testing execution at a given gas limit. + */ + function testCallGas(PackedUserOperation calldata userOp, uint256 callGasLimit) external { + bytes32 userOpHash = _getUserOpHashInternal(userOp); + (bool success, uint256 gasUsed, bytes memory revertData) = innerCall(userOp, userOpHash, callGasLimit); + revert TestCallGasResult(success, gasUsed, revertData); + } + + function chooseGuess(uint256 highestFailureGas, uint256 lowestSuccessGas, uint256 lowestGasUsedInSuccess) + private + pure + returns (uint256) + { + uint256 average = (highestFailureGas + lowestSuccessGas) / 2; + if (lowestGasUsedInSuccess <= highestFailureGas) { + // Handle pathological cases where the contract requires a lot of + // gas but uses very little, which without this branch could cause + // the guesses to inch up a tiny bit at a time. + return average; + } else { + return average.min(2 * lowestGasUsedInSuccess); + } + } + + function isEnoughGasForGuess(uint256 guess) private view returns (bool) { + // Because of the 1/64 rule and the fact that we need two levels of + // calls, we need + // + // guess < (63/64)^2 * (gas - some_overhead) + // + // We'll take the overhead to be 50000, which should leave plenty left + // over for us to hand the result back to the EntryPoint to return. + return (64 * 64 * guess) / (63 * 63) + 50000 < gasleft(); + } + + error _InnerCallResult(bool success, uint256 gasUsed, bytes revertData); + + function innerCall(PackedUserOperation calldata userOp, bytes32 userOpHash, uint256 gas) + private + returns (bool success, uint256 gasUsed, bytes memory revertData) + { + bytes calldata callData = userOp.callData; + bytes4 methodSig; + assembly { + let len := callData.length + if gt(len, 3) { methodSig := calldataload(callData.offset) } + } + + bytes memory executeCall; + if (methodSig == IAccountExecute.executeUserOp.selector) { + executeCall = abi.encodeCall(IAccountExecute.executeUserOp, (userOp, userOpHash)); + } else { + executeCall = callData; + } + + try this._innerCall(userOp.sender, executeCall, gas) { + // Should never happen. _innerCall should always revert. + revert(); + } catch (bytes memory innerCallRevertData) { + require(bytes4(innerCallRevertData) == _InnerCallResult.selector); + assembly { + innerCallRevertData := add(innerCallRevertData, 0x04) + } + (success, gasUsed, revertData) = abi.decode(innerCallRevertData, (bool, uint256, bytes)); + } + } + + function _innerCall(address sender, bytes calldata callData, uint256 gas) external { + uint256 preGas = gasleft(); + (bool success, bytes memory data) = sender.call{gas: gas}(callData); + uint256 gasUsed = preGas - gasleft(); + bytes memory revertData = success ? bytes("") : data; + revert _InnerCallResult(success, gasUsed, revertData); + } + + function _getUserOpHashInternal(PackedUserOperation calldata userOp) internal returns (bytes32) { + (bool success, bytes memory data) = + address(this).call(abi.encodeWithSelector(IEntryPoint.getUserOpHash.selector, userOp)); + require(success, "Call to getUserOpHash failed"); + return abi.decode(data, (bytes32)); + } +} diff --git a/crates/types/contracts/src/v0_7/GetBalances.sol b/crates/types/contracts/src/v0_7/GetBalances.sol new file mode 100644 index 00000000..bda81add --- /dev/null +++ b/crates/types/contracts/src/v0_7/GetBalances.sol @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-3.0-only +pragma solidity ^0.8.12; + +import "account-abstraction/v0_7/interfaces/IStakeManager.sol"; + +contract GetBalances { + error GetBalancesResult(uint256[] balances); + + constructor(address stakeManager, address[] memory addresses) { + revert GetBalancesResult(getBalancesHelper(stakeManager, addresses)); + } + + function getBalancesHelper(address stakeManager, address[] memory addresses) public view returns (uint256[] memory) { + uint256[] memory balances = new uint256[](addresses.length); + IStakeManager istakeManager = IStakeManager(stakeManager); + + for (uint256 i = 0; i < addresses.length; i++) { + balances[i] = istakeManager.balanceOf(addresses[i]); + } + + return balances; + } +} diff --git a/crates/types/contracts/src/v0_7/imports.sol b/crates/types/contracts/src/v0_7/imports.sol new file mode 100644 index 00000000..8b094c63 --- /dev/null +++ b/crates/types/contracts/src/v0_7/imports.sol @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity ^0.8.13; + +// Simply importing a dependency is enough for Forge to include it in builds. + +import "account-abstraction/v0_7/interfaces/IEntryPoint.sol"; +import "account-abstraction/v0_7/interfaces/IAccount.sol"; +import "account-abstraction/v0_7/interfaces/IPaymaster.sol"; +import "account-abstraction/v0_7/interfaces/IAggregator.sol"; +import "account-abstraction/v0_7/interfaces/IStakeManager.sol"; +import "account-abstraction/v0_7/core/EntryPointSimulations.sol"; +import "account-abstraction/v0_7/core/SenderCreator.sol"; diff --git a/crates/types/contracts/test/PrecompileAccountTest.sol b/crates/types/contracts/test/PrecompileAccountTest.sol deleted file mode 100644 index 8b59023b..00000000 --- a/crates/types/contracts/test/PrecompileAccountTest.sol +++ /dev/null @@ -1,33 +0,0 @@ -// SPDX-License-Identifier: UNLICENSED -pragma solidity ^0.8.14; - -import "../src/PrecompileAccount.sol"; -import "account-abstraction/interfaces/UserOperation.sol"; -import "forge-std/Test.sol"; - -contract PrecompileAccountTest is Test { - PrecompileAccount public account; - - function setUp() public { - account = new PrecompileAccount( - 0x000000000000000000000000000000000000006D - ); - } - - function testValidateUserOp() public view { - UserOperation memory userOp = UserOperation( - address(0), - 0, - "", - "", - 0, - 0, - 0, - 0, - 0, - "", - "" - ); - account.validateUserOp(userOp, 0, 0); - } -} diff --git a/crates/pool/src/server/error.rs b/crates/types/src/builder/error.rs similarity index 59% rename from crates/pool/src/server/error.rs rename to crates/types/src/builder/error.rs index fc07035a..bfe06d6e 100644 --- a/crates/pool/src/server/error.rs +++ b/crates/types/src/builder/error.rs @@ -11,27 +11,13 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -use crate::mempool::MempoolError; - -/// Pool server error type +/// Builder server errors #[derive(Debug, thiserror::Error)] -pub enum PoolServerError { - /// Mempool error occurred - #[error(transparent)] - MempoolError(MempoolError), - /// Unexpected response from PoolServer - #[error("Unexpected response from PoolServer")] +pub enum BuilderError { + /// Builder returned an unexpected response type for the given request + #[error("Unexpected response from Builder")] UnexpectedResponse, - /// Internal error + /// Internal errors #[error(transparent)] Other(#[from] anyhow::Error), } - -impl From for PoolServerError { - fn from(error: MempoolError) -> Self { - match error { - MempoolError::Other(e) => Self::Other(e), - _ => Self::MempoolError(error), - } - } -} diff --git a/crates/types/src/builder/mod.rs b/crates/types/src/builder/mod.rs new file mode 100644 index 00000000..152a572e --- /dev/null +++ b/crates/types/src/builder/mod.rs @@ -0,0 +1,23 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +//! Rundler builder types + +mod error; +pub use error::*; + +mod traits; +pub use traits::*; + +mod types; +pub use types::*; diff --git a/crates/provider/src/traits/stake_manager.rs b/crates/types/src/builder/traits.rs similarity index 53% rename from crates/provider/src/traits/stake_manager.rs rename to crates/types/src/builder/traits.rs index 1459bbb8..208969a7 100644 --- a/crates/provider/src/traits/stake_manager.rs +++ b/crates/types/src/builder/traits.rs @@ -11,17 +11,27 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -use ethers::types::Address; +use ethers::types::{Address, H256}; #[cfg(feature = "test-utils")] use mockall::automock; -use rundler_types::contracts::i_stake_manager::DepositInfo; -/// Trait for interacting with an stake manager contract. -/// Implemented for the v0.6 version of the stake manager contract. -/// [Contracts can be found here](https://github.com/eth-infinitism/account-abstraction/tree/v0.6.0). +use super::{error::BuilderError, types::BundlingMode}; + +/// Builder result +pub type BuilderResult = std::result::Result; + +/// Builder #[cfg_attr(feature = "test-utils", automock)] #[async_trait::async_trait] -pub trait StakeManager: Send + Sync + 'static { - /// Get the deposit info from address - async fn get_deposit_info(&self, address: Address) -> anyhow::Result; +pub trait Builder: Send + Sync + 'static { + /// Get the supported entry points of this builder + async fn get_supported_entry_points(&self) -> BuilderResult>; + + /// Trigger the builder to send a bundle now, used for debugging. + /// + /// Bundling mode must be set to `Manual`, or this will error + async fn debug_send_bundle_now(&self) -> BuilderResult<(H256, u64)>; + + /// Set the bundling mode + async fn debug_set_bundling_mode(&self, mode: BundlingMode) -> BuilderResult<()>; } diff --git a/crates/provider/src/traits/paymaster_helper.rs b/crates/types/src/builder/types.rs similarity index 54% rename from crates/provider/src/traits/paymaster_helper.rs rename to crates/types/src/builder/types.rs index f18acd95..52a9e00e 100644 --- a/crates/provider/src/traits/paymaster_helper.rs +++ b/crates/types/src/builder/types.rs @@ -11,17 +11,20 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -use ethers::types::{Address, U256}; -#[cfg(feature = "test-utils")] -use mockall::automock; -use rundler_types::DepositInfo; +use parse_display::Display; +use serde::{Deserialize, Serialize}; -/// Trait for interacting with the PaymasterHelper contract -#[cfg_attr(feature = "test-utils", automock)] -#[async_trait::async_trait] -pub trait PaymasterHelper: Send + Sync + 'static { - /// Get the deposit info from address - async fn get_balances(&self, addresses: Vec

) -> anyhow::Result>; - /// Get deposit info for paymaster - async fn get_deposit_info(&self, address: Address) -> anyhow::Result; +/// Builder bundling mode +#[derive(Display, Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize)] +#[display(style = "lowercase")] +#[serde(rename_all = "lowercase")] +pub enum BundlingMode { + /// Manual bundling mode for debugging. + /// + /// Bundles will only be sent when `debug_send_bundle_now` is called. + Manual, + /// Auto bundling mode for normal operation. + /// + /// Bundles will be sent automatically. + Auto, } diff --git a/crates/types/src/chain.rs b/crates/types/src/chain.rs index 1685c4ad..b1db68f2 100644 --- a/crates/types/src/chain.rs +++ b/crates/types/src/chain.rs @@ -11,44 +11,168 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -//! Grouped/Labeled chain IDs for various networks - -use ethers::types::Chain; - -/// Known chain IDs that use the Optimism Bedrock stack -pub const OP_BEDROCK_CHAIN_IDS: &[u64] = &[ - Chain::Optimism as u64, - Chain::OptimismGoerli as u64, - 11155420, // OptimismSepolia - Chain::Base as u64, - Chain::BaseGoerli as u64, - 84532, // BaseSepolia - 288, // Boba Mainnet - 28882, // Boba Sepolia - 901, // Boba/Optimism local devnet -]; - -// TODO use chain from ethers types once my PR is merged into ethers -// https://github.com/gakonst/ethers-rs/pull/2657 -/// Known chain IDs for the Base ecosystem -pub const ARBITRUM_CHAIN_IDS: &[u64] = &[ - Chain::Arbitrum as u64, - Chain::ArbitrumGoerli as u64, - 421614, /* ArbitrumSepolia */ - Chain::ArbitrumNova as u64, -]; - -/// Known chain IDs for the Base ecosystem -pub const BASE_CHAIN_IDS: &[u64] = &[ - Chain::Base as u64, - Chain::BaseGoerli as u64, - 84532, /* BaseSepolia */ -]; - -/// Known chain IDs for the Polygon ecosystem -pub const POLYGON_CHAIN_IDS: &[u64] = &[Chain::Polygon as u64, Chain::PolygonMumbai as u64]; - -/// Return true if the chain ID has a dynamic preVerificationGas field -pub fn is_dynamic_pvg(chain_id: u64) -> bool { - ARBITRUM_CHAIN_IDS.contains(&chain_id) || OP_BEDROCK_CHAIN_IDS.contains(&chain_id) +//! Chain specification for Rundler + +use std::str::FromStr; + +use ethers::types::{Address, U256}; +use serde::{Deserialize, Serialize}; + +const ENTRY_POINT_ADDRESS_V6_0: &str = "0x5FF137D4b0FDCD49DcA30c7CF57E578a026d2789"; +const ENTRY_POINT_ADDRESS_V7_0: &str = "0x0000000071727De22E5E9d8BAf0edAc6f37da032"; + +/// Chain specification for Rundler +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct ChainSpec { + /* + * Chain constants + */ + /// name for logging purposes, e.g. "Ethereum", no logic is performed on this + pub name: String, + /// chain id + pub id: u64, + /// entry point address for v0_6 + pub entry_point_address_v0_6: Address, + /// entry point address for v0_7 + pub entry_point_address_v0_7: Address, + /// Overhead when preforming gas estimation to account for the deposit storage + /// and transfer overhead. + /// + /// NOTE: This must take into account when the storage slot was originally 0 + /// and is now non-zero, making the overhead slightly higher for most operations. + pub deposit_transfer_overhead: U256, + /// The maximum size of a transaction in bytes + pub max_transaction_size_bytes: usize, + /// Intrinsic gas cost for a transaction + pub transaction_intrinsic_gas: U256, + /// Per user operation gas cost for v0.6 + pub per_user_op_v0_6_gas: U256, + /// Per user operation gas cost for v0.7 + pub per_user_op_v0_7_gas: U256, + /// Per user operation deploy gas cost overhead, to capture + /// deploy costs that are not metered by the entry point + pub per_user_op_deploy_overhead_gas: U256, + /// Gas cost for a user operation word in a bundle transaction + pub per_user_op_word_gas: U256, + /// Gas cost for a zero byte in calldata + pub calldata_zero_byte_gas: U256, + /// Gas cost for a non-zero byte in calldata + pub calldata_non_zero_byte_gas: U256, + + /* + * Gas estimation + */ + /// true if calldata is priced in preVerificationGas + pub calldata_pre_verification_gas: bool, + /// type of gas oracle contract for pricing calldata in preVerificationGas + /// If calldata_pre_verification_gas is true, this must not be None + pub l1_gas_oracle_contract_type: L1GasOracleContractType, + /// address of gas oracle contract for pricing calldata in preVerificationGas + pub l1_gas_oracle_contract_address: Address, + /// true if L1 calldata gas should be included in the gas limit + /// only applies when calldata_pre_verification_gas is true + pub include_l1_gas_in_gas_limit: bool, + + /* + * Fee estimation + */ + /// true if eip1559 is enabled, and thus priority fees are used + pub eip1559_enabled: bool, + /// Type of oracle for estimating priority fees + pub priority_fee_oracle_type: PriorityFeeOracleType, + /// Minimum max priority fee per gas for the network + pub min_max_priority_fee_per_gas: U256, + /// Maximum max priority fee per gas for the network + pub max_max_priority_fee_per_gas: U256, + /// Usage ratio of the chain that determines "congestion" + /// Some chains have artificially high block gas limits but + /// actually cap block gas usage at a lower value. + pub congestion_trigger_usage_ratio_threshold: f64, + + /* + * Bundle building + */ + /// The maximum amount of time to wait before sending a bundle. + /// + /// The bundle builder will always try to send a bundle when a new block is received. + /// This parameter is used to trigger the builder to send a bundle after a specified + /// amount of time, before a new block is not received. + pub bundle_max_send_interval_millis: u64, + + /* + * Senders + */ + /// True if the flashbots sender is enabled on this chain + pub flashbots_enabled: bool, + /// URL for the flashbots relay, must be set if flashbots is enabled + pub flashbots_relay_url: Option, + /// URL for the flashbots status, must be set if flashbots is enabled + pub flashbots_status_url: Option, + /// True if the bloxroute sender is enabled on this chain + pub bloxroute_enabled: bool, + + /* + * Pool + */ + /// Size of the chain history to keep to handle reorgs + pub chain_history_size: u64, +} + +/// Type of gas oracle contract for pricing calldata in preVerificationGas +#[derive(Clone, Copy, Debug, Deserialize, Default, Serialize)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE")] +pub enum L1GasOracleContractType { + /// No gas oracle contract + #[default] + None, + /// Arbitrum Nitro type gas oracle contract + ArbitrumNitro, + /// Optimism Bedrock type gas oracle contract + OptimismBedrock, +} + +/// Type of oracle for estimating priority fees +#[derive(Clone, Debug, Deserialize, Default, Serialize)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE")] +pub enum PriorityFeeOracleType { + /// Use eth_maxPriorityFeePerGas on the provider + #[default] + Provider, + /// Use the usage based oracle + UsageBased, +} + +impl Default for ChainSpec { + fn default() -> Self { + Self { + name: "Unknown".to_string(), + id: 0, + entry_point_address_v0_6: Address::from_str(ENTRY_POINT_ADDRESS_V6_0).unwrap(), + entry_point_address_v0_7: Address::from_str(ENTRY_POINT_ADDRESS_V7_0).unwrap(), + deposit_transfer_overhead: U256::from(30_000), + transaction_intrinsic_gas: U256::from(21_000), + per_user_op_v0_6_gas: U256::from(18_300), + per_user_op_v0_7_gas: U256::from(19_500), + per_user_op_deploy_overhead_gas: U256::from(0), + per_user_op_word_gas: U256::from(4), + calldata_zero_byte_gas: U256::from(4), + calldata_non_zero_byte_gas: U256::from(16), + eip1559_enabled: true, + calldata_pre_verification_gas: false, + l1_gas_oracle_contract_type: L1GasOracleContractType::default(), + l1_gas_oracle_contract_address: Address::zero(), + include_l1_gas_in_gas_limit: true, + priority_fee_oracle_type: PriorityFeeOracleType::default(), + min_max_priority_fee_per_gas: U256::zero(), + max_max_priority_fee_per_gas: U256::MAX, + congestion_trigger_usage_ratio_threshold: 0.75, + max_transaction_size_bytes: 131072, // 128 KiB + bundle_max_send_interval_millis: u64::MAX, + flashbots_enabled: false, + flashbots_relay_url: None, + flashbots_status_url: None, + bloxroute_enabled: false, + chain_history_size: 64, + } + } } diff --git a/crates/types/src/contracts/mod.rs b/crates/types/src/contracts/mod.rs new file mode 100644 index 00000000..c3e544b5 --- /dev/null +++ b/crates/types/src/contracts/mod.rs @@ -0,0 +1,39 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +//! Generated contract interfaces + +#![allow(non_snake_case)] +#![allow(clippy::all)] +#![allow(missing_docs)] + +use ethers::types::Bytes; + +pub mod arbitrum; +pub mod optimism; +pub mod utils; +pub mod v0_6; +pub mod v0_7; + +// https://etherscan.io/address/0x5ff137d4b0fdcd49dca30c7cf57e578a026d2789#code +const __ENTRY_POINT_V0_6_DEPLOYED_BYTECODE_HEX: &[u8] = include_bytes!( + "../../contracts/bytecode/entrypoint/0x5FF137D4b0FDCD49DcA30c7CF57E578a026d2789_deployed.txt" +); +const __ENTRY_POINT_V0_6_DEPLOYED_BYTECODE: [u8; 23689] = { + match const_hex::const_decode_to_array(__ENTRY_POINT_V0_6_DEPLOYED_BYTECODE_HEX) { + Ok(a) => a, + Err(_) => panic!("Failed to decode entrypoint hex"), + } +}; +pub static ENTRY_POINT_V0_6_DEPLOYED_BYTECODE: Bytes = + Bytes::from_static(&__ENTRY_POINT_V0_6_DEPLOYED_BYTECODE); diff --git a/crates/types/src/entity.rs b/crates/types/src/entity.rs index 8da03ebb..cc07456c 100644 --- a/crates/types/src/entity.rs +++ b/crates/types/src/entity.rs @@ -17,7 +17,7 @@ use anyhow::bail; use ethers::{types::Address, utils::to_checksum}; use parse_display::Display; use serde::{ser::SerializeStruct, Deserialize, Serialize}; -use strum::EnumIter; +use strum::{EnumIter, IntoEnumIterator}; /// The type of an entity #[derive( @@ -33,11 +33,13 @@ use strum::EnumIter; Deserialize, Hash, Serialize, + Default, )] #[display(style = "camelCase")] #[serde(rename_all = "camelCase")] pub enum EntityType { /// Account type + #[default] Account, /// Paymaster type Paymaster, @@ -74,7 +76,7 @@ impl FromStr for EntityType { } /// An entity associated with a user operation -#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Default)] pub struct Entity { /// The type of entity pub kind: EntityType, @@ -156,3 +158,128 @@ pub struct EntityUpdate { /// The kind of update to perform for the entity pub update_type: EntityUpdateType, } + +/// additional context about an entity +#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] +pub struct EntityInfo { + /// The entity + pub entity: Entity, + /// Whether the entity is staked or not + pub is_staked: bool, +} + +impl EntityInfo { + /// Create a new entity info + pub fn new(entity: Entity, is_staked: bool) -> Self { + Self { entity, is_staked } + } + + /// Get the entity address + pub fn address(self) -> Address { + self.entity.address + } + + /// Get the entity type + pub fn kind(self) -> EntityType { + self.entity.kind + } + + /// Check if the entity is staked + pub fn is_staked(self) -> bool { + self.is_staked + } +} + +/// additional context for all the entities used in an op +#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] +pub struct EntityInfos { + /// The entity info for the factory + pub factory: Option, + /// The entity info for the op sender + pub sender: EntityInfo, + /// The entity info for the paymaster + pub paymaster: Option, + /// The entity info for the aggregator + pub aggregator: Option, +} + +impl EntityInfos { + /// Get iterator over the entities + pub fn entities(&'_ self) -> impl Iterator + '_ { + EntityType::iter().filter_map(|t| self.get(t).map(|info| (t, info))) + } + + /// Get the EntityInfo of a specific entity + pub fn get(self, entity: EntityType) -> Option { + match entity { + EntityType::Factory => self.factory, + EntityType::Account => Some(self.sender), + EntityType::Paymaster => self.paymaster, + EntityType::Aggregator => self.aggregator, + } + } + + /// Get the type of an entity from its address, if any + pub fn type_from_address(self, address: Address) -> Option { + if address.eq(&self.sender.entity.address) { + return Some(EntityType::Account); + } + + if let Some(factory) = self.factory { + if address.eq(&factory.entity.address) { + return Some(EntityType::Factory); + } + } + + if let Some(paymaster) = self.paymaster { + if address.eq(&paymaster.entity.address) { + return Some(EntityType::Paymaster); + } + } + + if let Some(aggregator) = self.aggregator { + if address.eq(&aggregator.entity.address) { + return Some(EntityType::Aggregator); + } + } + + None + } + + /// Get the sender address + pub fn sender_address(self) -> Address { + self.sender.entity.address + } + + /// Set the sender info + pub fn set_sender(&mut self, addr: Address, is_staked: bool) { + self.sender = EntityInfo { + entity: Entity::account(addr), + is_staked, + }; + } + + /// Set the factory info + pub fn set_factory(&mut self, addr: Address, is_staked: bool) { + self.factory = Some(EntityInfo { + entity: Entity::factory(addr), + is_staked, + }); + } + + /// Set the paymaster info + pub fn set_paymaster(&mut self, addr: Address, is_staked: bool) { + self.paymaster = Some(EntityInfo { + entity: Entity::paymaster(addr), + is_staked, + }); + } + + /// Set the aggregator info + pub fn set_aggregator(&mut self, addr: Address, is_staked: bool) { + self.aggregator = Some(EntityInfo { + entity: Entity::aggregator(addr), + is_staked, + }); + } +} diff --git a/crates/types/src/hybrid_compute.rs b/crates/types/src/hybrid_compute.rs index 63b819cd..2ab23b30 100644 --- a/crates/types/src/hybrid_compute.rs +++ b/crates/types/src/hybrid_compute.rs @@ -22,7 +22,8 @@ use ethers::{ signers::{LocalWallet, Signer}, }; -use crate::contracts::shared_types::UserOperation; +use crate::v0_6::UserOperation as UserOperationV0_6; +use crate::user_operation::{UserOperation}; use std::{sync::Mutex, collections::HashMap, str::FromStr}; @@ -48,7 +49,7 @@ pub struct HcEntry { /// Extracted calldata //pub call_data: Bytes, /// Full operation - pub user_op: UserOperation, + pub user_op: UserOperationV0_6, /// Creation timestamp, used to prune expired entries pub ts: SystemTime, /// The total computed offchain gas (all 3 phases) @@ -235,7 +236,7 @@ fn make_external_op( sig_hex: String, oo_nonce: U256, cfg: &HcCfg, -) -> UserOperation { +) -> UserOperationV0_6 { let tmp_bytes:Bytes = Bytes::from(response_payload.to_vec()); @@ -247,7 +248,7 @@ fn make_external_op( println!("HC external_op call_data len {:?} {:?} gas {:?} {:?}", response_payload.len(), call_data.len(), call_gas, call_data); - let mut new_op:UserOperation = UserOperation{ + let mut new_op:UserOperationV0_6 = UserOperationV0_6{ sender: ep_addr, nonce: oo_nonce.into(), init_code: Bytes::new(), @@ -284,7 +285,7 @@ pub async fn external_op( ) -> HcErr { let mut new_op = make_external_op(src_addr,nonce,op_success,response_payload,sub_key,ep_addr,sig_hex.clone(),oo_nonce,cfg); - let check_hash = new_op.op_hash(cfg.entry_point, cfg.chain_id); + let check_hash = new_op.hash(cfg.entry_point, cfg.chain_id); let check_sig: ethers::types::Signature = ethers::types::Signature::from_str(&sig_hex).expect("Signature decode"); let check_msg: ethers::types::RecoveryMessage = Data(check_hash.to_fixed_bytes().to_vec()); @@ -309,14 +310,14 @@ fn make_err_op( nn: U256, oo_nonce: U256, cfg: &HcCfg, -) -> UserOperation { +) -> UserOperationV0_6 { let response_payload:Bytes = AbiEncode::encode((src_addr, nn, err_hc.code, err_hc.message)).into(); let call_data = make_err_calldata(cfg.helper_addr, sub_key, Bytes::from(response_payload.to_vec())); println!("HC err_op call_data {:?}", call_data); - let new_op:UserOperation = UserOperation{ + let new_op:UserOperationV0_6 = UserOperationV0_6{ sender: cfg.sys_account, nonce: oo_nonce.into(), init_code: Bytes::new(), @@ -351,7 +352,7 @@ pub async fn err_op( let key_bytes: Bytes = cfg.sys_privkey.as_fixed_bytes().into(); let wallet = LocalWallet::from_bytes(&key_bytes).unwrap(); - let hh = new_op.op_hash(entry_point, cfg.chain_id); + let hh = new_op.hash(entry_point, cfg.chain_id); let signature = wallet.sign_message(hh).await; new_op.signature = signature.as_ref().unwrap().to_vec().into(); @@ -366,11 +367,11 @@ pub async fn rr_op( cfg: &HcCfg, oo_nonce: U256, keys: Vec, -) -> UserOperation { +) -> UserOperationV0_6 { let call_data = make_rr_calldata(keys); println!("HC rr_op call_data {:?}", call_data); - let mut new_op:UserOperation = UserOperation{ + let mut new_op:UserOperationV0_6 = UserOperationV0_6{ sender: cfg.sys_account, nonce: oo_nonce.into(), init_code: Bytes::new(), @@ -387,7 +388,7 @@ pub async fn rr_op( let key_bytes: Bytes = cfg.sys_privkey.as_fixed_bytes().into(); let wallet = LocalWallet::from_bytes(&key_bytes).unwrap(); - let hh = new_op.op_hash(cfg.entry_point, cfg.chain_id); + let hh = new_op.hash(cfg.entry_point, cfg.chain_id); println!("HC pre_sign hash {:?}", hh); let signature = wallet.sign_message(hh).await; @@ -558,7 +559,7 @@ mod test { ); let e_calldata = "0xb61d27f60000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000124dfc98ae82222222222222222222222222222222222222222222222222222222222222222000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000010000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000064000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000".parse::().unwrap(); - let expected:UserOperation = UserOperation{ + let expected:UserOperationV0_6 = UserOperationV0_6{ sender: "0x2000000000000000000000000000000000000002".parse::
().unwrap(), nonce: U256::from(222), init_code: Bytes::new(), @@ -597,12 +598,12 @@ mod test { ); let e_calldata = "0xb61d27f60000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000124fde89b642222222222222222222222222222222222222222222222222222222222222222000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000020000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000064000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000009756e69742074657374000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".parse::().unwrap(); - let expected:UserOperation = UserOperation{ + let expected:UserOperationV0_6 = UserOperationV0_6{ sender: "0x0000000000000000000000000000000000000002".parse::
().unwrap(), nonce: U256::from(222), init_code: Bytes::new(), call_data: e_calldata, - call_gas_limit: U256::from(196608), + call_gas_limit: U256::from(262144), verification_gas_limit: U256::from(65536), pre_verification_gas: U256::from(65536), max_fee_per_gas: U256::from(0), diff --git a/crates/types/src/lib.rs b/crates/types/src/lib.rs index 07538f3d..8e818fee 100644 --- a/crates/types/src/lib.rs +++ b/crates/types/src/lib.rs @@ -20,29 +20,37 @@ //! Rundler common types +pub mod builder; + pub mod chain; -/// Generated contracts module -#[allow(non_snake_case)] #[rustfmt::skip] -#[allow(clippy::all)] -#[allow(missing_docs)] pub mod contracts; -pub use contracts::shared_types::{DepositInfo, UserOperation, UserOpsPerAggregator}; mod entity; -pub use entity::{Entity, EntityType, EntityUpdate, EntityUpdateType}; +pub use entity::{Entity, EntityInfo, EntityInfos, EntityType, EntityUpdate, EntityUpdateType}; + +mod opcode; +pub use opcode::{Opcode, ViolationOpCode}; mod gas; pub use gas::GasFees; +pub mod pool; + mod timestamp; pub use timestamp::{Timestamp, ValidTimeRange}; mod user_operation; -pub use user_operation::UserOperationId; +pub use user_operation::*; mod storage; pub use storage::StorageSlot; +mod validation_results; +pub use validation_results::{ + parse_validation_data, AggregatorInfo, StakeInfo, ValidationError, ValidationOutput, + ValidationReturnInfo, ValidationRevert, +}; + pub mod hybrid_compute; diff --git a/crates/types/src/opcode.rs b/crates/types/src/opcode.rs new file mode 100644 index 00000000..fd47b256 --- /dev/null +++ b/crates/types/src/opcode.rs @@ -0,0 +1,426 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use num_enum::TryFromPrimitive; +use serde::{Deserialize, Serialize}; +use strum::{AsRefStr, Display, EnumCount, EnumIter, EnumString, VariantNames}; + +/// A wrapper around Opcode that implements extra traits +#[derive(Debug, PartialEq, Clone, parse_display::Display, Eq)] +#[display("{0:?}")] +pub struct ViolationOpCode(pub Opcode); + +impl PartialOrd for ViolationOpCode { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for ViolationOpCode { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + let left = self.0 as i32; + let right = other.0 as i32; + + left.cmp(&right) + } +} + +// Credit for this section goes to ethers-rs +// https://github.com/gakonst/ethers-rs/blob/51fe937f6515689b17a3a83b74a05984ad3a7f11/ethers-core/src/types/opcode.rs +// TODO(danc): remove this once the PR is merged and released + +/// An [EVM Opcode](https://evm.codes). +#[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + AsRefStr, + Display, + EnumString, + VariantNames, + EnumIter, + EnumCount, + TryFromPrimitive, + Serialize, + Deserialize, +)] +#[repr(u8)] +pub enum Opcode { + // 0x0 range - arithmetic ops. + /// Opcode 0x0 - Halts execution + STOP = 0x00, + /// Opcode 0x1 - Addition operation + ADD, + /// Opcode 0x2 - Multiplication operation + MUL, + /// Opcode 0x3 - Subtraction operation + SUB, + /// Opcode 0x4 - Integer division operation + DIV, + /// Opcode 0x5 - Signed integer division operation (truncated) + SDIV, + /// Opcode 0x6 - Modulo remainder operation + MOD, + /// Opcode 0x7 - Signed modulo remainder operation + SMOD, + /// Opcode 0x8 - Modulo addition operation + ADDMOD, + /// Opcode 0x9 - Modulo multiplication operation + MULMOD, + /// Opcode 0xA - Exponential operation + EXP, + /// Opcode 0xB - Extend length of two’s complement signed integer + SIGNEXTEND, + + // 0x0C - 0x0F are invalid + + // 0x10 range - comparison ops. + /// Opcode 0x10 - Less-than comparison + LT = 0x10, + /// Opcode 0x11 - Greater-than comparison + GT, + /// Opcode 0x12 - Signed less-than comparison + SLT, + /// Opcode 0x13 - Signed greater-than comparison + SGT, + /// Opcode 0x14 - Equality comparison + EQ, + /// Opcode 0x15 - Simple not operator + ISZERO, + /// Opcode 0x16 - Bitwise AND operation + AND, + /// Opcode 0x17 - Bitwise OR operation + OR, + /// Opcode 0x18 - Bitwise XOR operation + XOR, + /// Opcode 0x19 - Bitwise NOT operation + NOT, + /// Opcode 0x1A - Retrieve single byte from word + BYTE, + /// Opcode 0x1B - Left shift operation + SHL, + /// Opcode 0x1C - Logical right shift operation + SHR, + /// Opcode 0x1D - Arithmetic (signed) right shift operation + SAR, + + // 0x1E - 0x1F are invalid + + // 0x20 range - crypto. + /// Opcode 0x20 - Compute Keccak-256 hash + #[serde(alias = "KECCAK256")] + SHA3 = 0x20, + + // 0x21 - 0x2F are invalid + + // 0x30 range - closure state. + /// Opcode 0x30 - Get address of currently executing account + ADDRESS = 0x30, + /// Opcode 0x31 - Get address of currently executing account + BALANCE, + /// Opcode 0x32 - Get execution origination address + ORIGIN, + /// Opcode 0x33 - Get caller address + CALLER, + /// Opcode 0x34 - Get deposited value by the instruction/transaction responsible for this + /// execution + CALLVALUE, + /// Opcode 0x35 - Get input data of current environment + CALLDATALOAD, + /// Opcode 0x36 - Get size of input data in current environment + CALLDATASIZE, + /// Opcode 0x37 - Copy input data in current environment to memory + CALLDATACOPY, + /// Opcode 0x38 - Get size of code running in current environment + CODESIZE, + /// Opcode 0x39 - Copy code running in current environment to memory + CODECOPY, + /// Opcode 0x3A - Get price of gas in current environment + GASPRICE, + /// Opcode 0x3B - Get size of an account’s code + EXTCODESIZE, + /// Opcode 0x3C - Copy an account’s code to memory + EXTCODECOPY, + /// Opcode 0x3D - Get size of output data from the previous call from the current environment + RETURNDATASIZE, + /// Opcode 0x3E - Copy output data from the previous call to memory + RETURNDATACOPY, + /// Opcode 0x3F - Get hash of an account’s code + EXTCODEHASH, + + // 0x40 range - block operations. + /// Opcode 0x40 - Get the hash of one of the 256 most recent complete blocks + BLOCKHASH = 0x40, + /// Opcode 0x41 - Get the block’s beneficiary address + COINBASE, + /// Opcode 0x42 - Get the block’s timestamp + TIMESTAMP, + /// Opcode 0x43 - Get the block’s number + NUMBER, + /// Opcode 0x44 - Get the block’s difficulty + #[serde(alias = "PREVRANDAO", alias = "RANDOM")] + #[strum( + to_string = "DIFFICULTY", + serialize = "PREVRANDAO", + serialize = "RANDOM" + )] + DIFFICULTY, + /// Opcode 0x45 - Get the block’s gas limit + GASLIMIT, + /// Opcode 0x46 - Get the chain ID + CHAINID, + /// Opcode 0x47 - Get balance of currently executing account + SELFBALANCE, + /// Opcode 0x48 - Get the base fee + BASEFEE, + /// Opcode 0x49 - Get versioned hashes + BLOBHASH, + /// Opcode 0x4A - Returns the value of the blob base-fee of the current block + BLOBBASEFEE, + + // 0x4B - 0x4F are invalid + + // 0x50 range - 'storage' and execution. + /// Opcode 0x50 - Remove item from stack + POP = 0x50, + /// Opcode 0x51 - Load word from memory + MLOAD, + /// Opcode 0x52 - Save word to memory + MSTORE, + /// Opcode 0x53 - Save byte to memory + MSTORE8, + /// Opcode 0x54 - Load word from storage + SLOAD, + /// Opcode 0x55 - Save word to storage + SSTORE, + /// Opcode 0x56 - Alter the program counter + JUMP, + /// Opcode 0x57 - Conditionally alter the program counter + JUMPI, + /// Opcode 0x58 - Get the value of the program counter prior to the increment corresponding to + /// this instruction + PC, + /// Opcode 0x59 - Get the size of active memory in bytes + MSIZE, + /// Opcode 0x5A - Get the amount of available gas, including the corresponding reduction for + /// the cost of this instruction + GAS, + /// Opcode 0x5B - Mark a valid destination for jumps + JUMPDEST, + /// Opcode 0x5C - Load word from transient storage + TLOAD, + /// Opcode 0x5D - Save word to transient storage + TSTORE, + /// Opcode 0x5E - Copy memory areas + MCOPY, + + // 0x5F range - pushes. + /// Opcode 0x5F - Place the constant value 0 on stack + PUSH0 = 0x5f, + /// Opcode 0x60 - Place 1 byte item on stack + PUSH1 = 0x60, + /// Opcode 0x61 - Place 2 byte item on stack + PUSH2, + /// Opcode 0x62 - Place 3 byte item on stack + PUSH3, + /// Opcode 0x63 - Place 4 byte item on stack + PUSH4, + /// Opcode 0x64 - Place 5 byte item on stack + PUSH5, + /// Opcode 0x65 - Place 6 byte item on stack + PUSH6, + /// Opcode 0x66 - Place 7 byte item on stack + PUSH7, + /// Opcode 0x67 - Place 8 byte item on stack + PUSH8, + /// Opcode 0x68 - Place 9 byte item on stack + PUSH9, + /// Opcode 0x69 - Place 10 byte item on stack + PUSH10, + /// Opcode 0x6A - Place 11 byte item on stack + PUSH11, + /// Opcode 0x6B - Place 12 byte item on stack + PUSH12, + /// Opcode 0x6C - Place 13 byte item on stack + PUSH13, + /// Opcode 0x6D - Place 14 byte item on stack + PUSH14, + /// Opcode 0x6E - Place 15 byte item on stack + PUSH15, + /// Opcode 0x6F - Place 16 byte item on stack + PUSH16, + /// Opcode 0x70 - Place 17 byte item on stack + PUSH17, + /// Opcode 0x71 - Place 18 byte item on stack + PUSH18, + /// Opcode 0x72 - Place 19 byte item on stack + PUSH19, + /// Opcode 0x73 - Place 20 byte item on stack + PUSH20, + /// Opcode 0x74 - Place 21 byte item on stack + PUSH21, + /// Opcode 0x75 - Place 22 byte item on stack + PUSH22, + /// Opcode 0x76 - Place 23 byte item on stack + PUSH23, + /// Opcode 0x77 - Place 24 byte item on stack + PUSH24, + /// Opcode 0x78 - Place 25 byte item on stack + PUSH25, + /// Opcode 0x79 - Place 26 byte item on stack + PUSH26, + /// Opcode 0x7A - Place 27 byte item on stack + PUSH27, + /// Opcode 0x7B - Place 28 byte item on stack + PUSH28, + /// Opcode 0x7C - Place 29 byte item on stack + PUSH29, + /// Opcode 0x7D - Place 30 byte item on stack + PUSH30, + /// Opcode 0x7E - Place 31 byte item on stack + PUSH31, + /// Opcode 0x7F - Place 32 byte item on stack + PUSH32, + + // 0x80 range - dups. + /// Opcode 0x80 - Duplicate 1st stack item + DUP1 = 0x80, + /// Opcode 0x81 - Duplicate 2nd stack item + DUP2, + /// Opcode 0x82 - Duplicate 3rd stack item + DUP3, + /// Opcode 0x83 - Duplicate 4th stack item + DUP4, + /// Opcode 0x84 - Duplicate 5th stack item + DUP5, + /// Opcode 0x85 - Duplicate 6th stack item + DUP6, + /// Opcode 0x86 - Duplicate 7th stack item + DUP7, + /// Opcode 0x87 - Duplicate 8th stack item + DUP8, + /// Opcode 0x88 - Duplicate 9th stack item + DUP9, + /// Opcode 0x89 - Duplicate 10th stack item + DUP10, + /// Opcode 0x8A - Duplicate 11th stack item + DUP11, + /// Opcode 0x8B - Duplicate 12th stack item + DUP12, + /// Opcode 0x8C - Duplicate 13th stack item + DUP13, + /// Opcode 0x8D - Duplicate 14th stack item + DUP14, + /// Opcode 0x8E - Duplicate 15th stack item + DUP15, + /// Opcode 0x8F - Duplicate 16th stack item + DUP16, + + // 0x90 range - swaps. + /// Opcode 0x90 - Exchange 1st and 2nd stack items + SWAP1 = 0x90, + /// Opcode 0x91 - Exchange 1st and 3rd stack items + SWAP2, + /// Opcode 0x92 - Exchange 1st and 4th stack items + SWAP3, + /// Opcode 0x93 - Exchange 1st and 5th stack items + SWAP4, + /// Opcode 0x94 - Exchange 1st and 6th stack items + SWAP5, + /// Opcode 0x95 - Exchange 1st and 7th stack items + SWAP6, + /// Opcode 0x96 - Exchange 1st and 8th stack items + SWAP7, + /// Opcode 0x97 - Exchange 1st and 9th stack items + SWAP8, + /// Opcode 0x98 - Exchange 1st and 10th stack items + SWAP9, + /// Opcode 0x99 - Exchange 1st and 11th stack items + SWAP10, + /// Opcode 0x9A - Exchange 1st and 12th stack items + SWAP11, + /// Opcode 0x9B - Exchange 1st and 13th stack items + SWAP12, + /// Opcode 0x9C - Exchange 1st and 14th stack items + SWAP13, + /// Opcode 0x9D - Exchange 1st and 15th stack items + SWAP14, + /// Opcode 0x9E - Exchange 1st and 16th stack items + SWAP15, + /// Opcode 0x9F - Exchange 1st and 17th stack items + SWAP16, + + // 0xA0 range - logging ops. + /// Opcode 0xA0 - Append log record with one topic + LOG0 = 0xa0, + /// Opcode 0xA1 - Append log record with two topics + LOG1, + /// Opcode 0xA2 - Append log record with three topics + LOG2, + /// Opcode 0xA3 - Append log record with four topics + LOG3, + /// Opcode 0xA4 - Append log record with five topics + LOG4, + + // 0xA5 - 0xEF are invalid + + // 0xF0 range - closures. + /// Opcode 0xF0 - Create a new account with associated code + CREATE = 0xf0, + /// Opcode 0xF1 - Message-call into an account + CALL, + /// Opcode 0xF2 - Message-call into this account with alternative account’s code + CALLCODE, + /// Opcode 0xF3 - Halt execution returning output data + RETURN, + /// Opcode 0xF4 - Message-call into this account with an alternative account’s code, but + /// persisting the current values for sender and value + DELEGATECALL, + /// Opcode 0xF5 - Create a new account with associated code at a predictable address + CREATE2, + + // 0xF6 - 0xF9 are invalid + + // 0xFA range - closures + /// Opcode 0xFA - Static message-call into an account + STATICCALL = 0xfa, + + // 0xFB - 0xFC are invalid + + // 0xfd range - closures + /// Opcode 0xFD - Halt execution reverting state changes but returning data and remaining gas + REVERT = 0xfd, + /// Opcode 0xFE - Designated invalid instruction + INVALID = 0xfe, + /// Opcode 0xFF - Halt execution and register account for later deletion + SELFDESTRUCT = 0xff, +} + +// See comment in ./chain.rs +#[allow(clippy::derivable_impls)] +impl Default for Opcode { + fn default() -> Self { + Opcode::INVALID + } +} + +impl From for u8 { + fn from(value: Opcode) -> Self { + value as u8 + } +} diff --git a/crates/types/src/pool/error.rs b/crates/types/src/pool/error.rs new file mode 100644 index 00000000..1defbf47 --- /dev/null +++ b/crates/types/src/pool/error.rs @@ -0,0 +1,250 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use ethers::types::{Address, U256}; + +use crate::{ + validation_results::ValidationRevert, Entity, EntityType, StorageSlot, Timestamp, + ViolationOpCode, +}; + +/// Pool server error type +#[derive(Debug, thiserror::Error)] +pub enum PoolError { + /// Mempool error occurred + #[error(transparent)] + MempoolError(MempoolError), + /// Unexpected response from PoolServer + #[error("Unexpected response from PoolServer")] + UnexpectedResponse, + /// Internal error + #[error(transparent)] + Other(#[from] anyhow::Error), +} + +impl From for PoolError { + fn from(error: MempoolError) -> Self { + match error { + MempoolError::Other(e) => Self::Other(e), + _ => Self::MempoolError(error), + } + } +} + +/// Mempool error type. +#[derive(Debug, thiserror::Error)] +pub enum MempoolError { + /// Some other error occurred + #[error(transparent)] + Other(#[from] anyhow::Error), + /// Operation with the same hash already in pool + #[error("Operation already known")] + OperationAlreadyKnown, + /// Operation with same sender/nonce already in pool + /// and the replacement operation has lower gas price. + #[error("Replacement operation underpriced. Existing priority fee: {0}. Existing fee: {1}")] + ReplacementUnderpriced(U256, U256), + /// Max operations reached for unstaked sender [UREP-010] or unstaked non-sender entity [UREP-020] + #[error("Max operations ({0}) reached for entity {1}")] + MaxOperationsReached(usize, Entity), + /// Multiple roles violation + /// Spec rule: STO-040 + #[error("A {} at {} in this UserOperation is used as a sender entity in another UserOperation currently in mempool.", .0.kind, .0.address)] + MultipleRolesViolation(Entity), + /// An associated storage slot that is accessed in the UserOperation is being used as a sender by another UserOperation in the mempool. + /// Spec rule: STO-041 + #[error("An associated storage slot that is accessed in the UserOperation is being used as a sender by another UserOperation in the mempool")] + AssociatedStorageIsAlternateSender, + /// Sender address used as different entity in another UserOperation currently in the mempool. + /// Spec rule: STO-040 + #[error("The sender address {0} is used as a different entity in another UserOperation currently in mempool")] + SenderAddressUsedAsAlternateEntity(Address), + /// An entity associated with the operation is throttled/banned. + #[error("Entity {0} is throttled/banned")] + EntityThrottled(Entity), + /// Operation was discarded on inserting due to size limit + #[error("Operation was discarded on inserting")] + DiscardedOnInsert, + /// Paymaster balance too low + /// Spec rule: EREP-010 + #[error("Paymaster balance too low. Required balance: {0}. Current balance {1}")] + PaymasterBalanceTooLow(U256, U256), + /// Operation was rejected due to a precheck violation + #[error("Operation violation during precheck {0}")] + PrecheckViolation(PrecheckViolation), + /// Operation was rejected due to a simulation violation + #[error("Operation violation during simulation {0}")] + SimulationViolation(SimulationViolation), + /// Operation was rejected because it used an unsupported aggregator + #[error("Unsupported aggregator {0}")] + UnsupportedAggregator(Address), + /// An unknown entry point was specified + #[error("Unknown entry point {0}")] + UnknownEntryPoint(Address), + /// The operation drop attempt too soon after being added to the pool + #[error("Operation drop attempt too soon after being added to the pool. Added at {0}, attempted to drop at {1}, must wait {2} blocks.")] + OperationDropTooSoon(u64, u64, u64), +} + +/// Precheck violation enumeration +/// +/// All possible errors that can be returned from a precheck. +#[derive(Clone, Debug, parse_display::Display, Eq, PartialEq, Ord, PartialOrd)] +pub enum PrecheckViolation { + /// The sender is not deployed, and no init code is provided. + #[display("sender {0:?} is not a contract and initCode is empty")] + SenderIsNotContractAndNoInitCode(Address), + /// The sender is already deployed, and an init code is provided. + #[display("sender {0:?} is an existing contract, but initCode is nonempty")] + ExistingSenderWithInitCode(Address), + /// An init code contains a factory address that is not deployed. + #[display("initCode indicates factory with no code: {0:?}")] + FactoryIsNotContract(Address), + /// The total gas limit of the user operation is too high. + /// See `gas::user_operation_execution_gas_limit` for calculation. + #[display("total gas limit is {0} but must be at most {1}")] + TotalGasLimitTooHigh(U256, U256), + /// The verification gas limit of the user operation is too high. + #[display("verificationGasLimit is {0} but must be at most {1}")] + VerificationGasLimitTooHigh(U256, U256), + /// The pre-verification gas of the user operation is too low. + #[display("preVerificationGas is {0} but must be at least {1}")] + PreVerificationGasTooLow(U256, U256), + /// A paymaster is provided, but the address is not deployed. + #[display("paymasterAndData indicates paymaster with no code: {0:?}")] + PaymasterIsNotContract(Address), + /// The paymaster deposit is too low to pay for the user operation's maximum cost. + #[display("paymaster deposit is {0} but must be at least {1} to pay for this operation")] + PaymasterDepositTooLow(U256, U256), + /// The sender balance is too low to pay for the user operation's maximum cost. + /// (when not using a paymaster) + #[display("sender balance and deposit together is {0} but must be at least {1} to pay for this operation")] + SenderFundsTooLow(U256, U256), + /// The provided max priority fee per gas is too low based on the current network rate. + #[display("maxPriorityFeePerGas is {0} but must be at least {1}")] + MaxPriorityFeePerGasTooLow(U256, U256), + /// The provided max fee per gas is too low based on the current network rate. + #[display("maxFeePerGas is {0} but must be at least {1}")] + MaxFeePerGasTooLow(U256, U256), + /// The call gas limit is too low to account for any possible call. + #[display("callGasLimit is {0} but must be at least {1}")] + CallGasLimitTooLow(U256, U256), +} + +/// All possible simulation violations +#[derive(Clone, Debug, parse_display::Display, Ord, Eq, PartialOrd, PartialEq)] +pub enum SimulationViolation { + // Make sure to maintain the order here based on the importance + // of the violation for converting to an JSON RPC error + /// The signature is invalid for either the account or paymaster + /// This is used in v0.6 where the error is not attributable + #[display("invalid signature")] + InvalidSignature, + /// The signature is invalid for the account + #[display("invalid account signature")] + InvalidAccountSignature, + /// The user operation has an invalid time range based on the `valid_until` and `valid_after` fields + #[display( + "User Operation expired or has an invalid time range. validUntil: {0}, validAfter: {1}" + )] + InvalidTimeRange(Timestamp, Timestamp), + /// The signature is invalid for the paymaster + #[display("invalid paymaster signature")] + InvalidPaymasterSignature, + /// The user operation used an opcode that is not allowed + #[display("{0.kind} uses banned opcode: {2} in contract {1:?}")] + UsedForbiddenOpcode(Entity, Address, ViolationOpCode), + /// The user operation used a precompile that is not allowed + #[display("{0.kind} uses banned precompile: {2:?} in contract {1:?}")] + UsedForbiddenPrecompile(Entity, Address, Address), + /// The user operation accessed a contract that has not been deployed + #[display( + "{0.kind} tried to access code at {1} during validation, but that address is not a contract" + )] + AccessedUndeployedContract(Entity, Address), + /// The user operation factory entity called CREATE2 more than once during initialization + #[display("factory may only call CREATE2 once during initialization")] + FactoryCalledCreate2Twice(Address), + /// The user operation accessed a storage slot that is not allowed + #[display("{0.kind} accessed forbidden storage at address {1:?} during validation")] + InvalidStorageAccess(Entity, StorageSlot), + /// The user operation accessed a storage slot on the sender while being deployed + /// and the accessing entity or the factory is not staked + #[display("Sender storage at slot {1:?} accessed during deployment. Factory or accessing entity ({0:?}) must be staked")] + AssociatedStorageDuringDeploy(Option, StorageSlot), + /// The user operation called an entry point method that is not allowed + #[display("{0.kind} called entry point method other than depositTo")] + CalledBannedEntryPointMethod(Entity), + /// The user operation made a call that contained value to a contract other than the entrypoint + /// during validation + #[display("{0.kind} must not send ETH during validation (except from account to entry point)")] + CallHadValue(Entity), + /// The code hash of accessed contracts changed on the second simulation + #[display("code accessed by validation has changed since the last time validation was run")] + CodeHashChanged, + /// The user operation contained an entity that accessed storage without being staked + #[display("{0.needs_stake} needs to be staked: {0.accessing_entity} accessed storage at {0.accessed_address} slot {0.slot} (associated with {0.accessed_entity:?})")] + NotStaked(Box), + /// The user operation uses a paymaster that returns a context while being unstaked + #[display("Unstaked paymaster must not return context")] + UnstakedPaymasterContext, + /// The user operation uses an aggregator entity and it is not staked + #[display("An aggregator must be staked, regardless of storager usage")] + UnstakedAggregator, + /// Simulation reverted with an unintended reason, containing a message + #[display("reverted while simulating {0} validation: {1}")] + UnintendedRevertWithMessage(EntityType, String, Option
), + /// Simulation reverted with an unintended reason + #[display("reverted while simulating {0} validation")] + UnintendedRevert(EntityType, Option
), + /// Validation revert (only used for unsafe sim) + #[display("validation revert: {0}")] + ValidationRevert(ValidationRevert), + /// Simulation did not revert, a revert is always expected + #[display("simulateValidation did not revert. Make sure your EntryPoint is valid")] + DidNotRevert, + /// Simulation had the wrong number of phases + #[display("simulateValidation should have 3 parts but had {0} instead. Make sure your EntryPoint is valid")] + WrongNumberOfPhases(u32), + /// The user operation ran out of gas during validation + #[display("ran out of gas during {0.kind} validation")] + OutOfGas(Entity), + /// The user operation aggregator signature validation failed + #[display("aggregator signature validation failed")] + AggregatorValidationFailed, + /// Verification gas limit doesn't have the required buffer on the measured gas + #[display("verification gas limit doesn't have the required buffer on the measured gas, limit: {0}, needed: {1}")] + VerificationGasLimitBufferTooLow(U256, U256), + /// Unsupported contract type + #[display("accessed unsupported contract type: {0:?} at {1:?}. Address must be whitelisted")] + AccessedUnsupportedContractType(String, Address), +} + +/// Information about a storage violation based on stake status +#[derive(Debug, PartialEq, Clone, PartialOrd, Eq, Ord)] +pub struct NeedsStakeInformation { + /// Entity needing stake info + pub needs_stake: Entity, + /// The entity that accessed the storage requiring stake + pub accessing_entity: EntityType, + /// Type of accessed entity, if it is a known entity + pub accessed_entity: Option, + /// Address that was accessed while unstaked + pub accessed_address: Address, + /// The accessed slot number + pub slot: U256, + /// Minumum stake + pub min_stake: U256, + /// Minumum delay after an unstake event + pub min_unstake_delay: U256, +} diff --git a/crates/types/src/pool/mod.rs b/crates/types/src/pool/mod.rs new file mode 100644 index 00000000..d17f5bfd --- /dev/null +++ b/crates/types/src/pool/mod.rs @@ -0,0 +1,36 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +//! Rundler pool types + +mod error; +pub use error::*; + +mod traits; +pub use traits::*; + +mod types; +pub use types::*; diff --git a/crates/types/src/pool/traits.rs b/crates/types/src/pool/traits.rs new file mode 100644 index 00000000..8b9db710 --- /dev/null +++ b/crates/types/src/pool/traits.rs @@ -0,0 +1,124 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use std::pin::Pin; + +use ethers::types::{Address, H256}; +use futures_util::Stream; +#[cfg(feature = "test-utils")] +use mockall::automock; + +use super::{ + error::PoolError, + types::{NewHead, PaymasterMetadata, PoolOperation, Reputation, ReputationStatus, StakeStatus}, +}; +use crate::{EntityUpdate, UserOperationId, UserOperationVariant}; + +/// Result type for pool server operations. +pub type PoolResult = std::result::Result; + +/// Pool server trait +#[cfg_attr(feature = "test-utils", automock)] +#[async_trait::async_trait] +pub trait Pool: Send + Sync + 'static { + /// Get the supported entry points of the pool + async fn get_supported_entry_points(&self) -> PoolResult>; + + /// Add an operation to the pool + async fn add_op(&self, entry_point: Address, op: UserOperationVariant) -> PoolResult; + + /// Get operations from the pool + async fn get_ops( + &self, + entry_point: Address, + max_ops: u64, + shard_index: u64, + ) -> PoolResult>; + + /// Get an operation from the pool by hash + /// Checks each entry point in order until the operation is found + /// Returns None if the operation is not found + async fn get_op_by_hash(&self, hash: H256) -> PoolResult>; + + /// Remove operations from the pool by hash + async fn remove_ops(&self, entry_point: Address, ops: Vec) -> PoolResult<()>; + + /// Remove an operation from the pool by id + async fn remove_op_by_id( + &self, + entry_point: Address, + id: UserOperationId, + ) -> PoolResult>; + + /// Update operations associated with entities from the pool + async fn update_entities( + &self, + entry_point: Address, + entities: Vec, + ) -> PoolResult<()>; + + /// Subscribe to new chain heads from the pool. + /// + /// The pool will notify the subscriber when a new chain head is received, and the pool + /// has processed all operations up to that head. + async fn subscribe_new_heads(&self) -> PoolResult + Send>>>; + + /// Get reputation status given entrypoint and address + async fn get_reputation_status( + &self, + entry_point: Address, + address: Address, + ) -> PoolResult; + + /// Get stake status given entrypoint and address + async fn get_stake_status( + &self, + entry_point: Address, + address: Address, + ) -> PoolResult; + + /// Clear the pool state, used for debug methods + async fn debug_clear_state( + &self, + clear_mempool: bool, + clear_paymaster: bool, + clear_reputation: bool, + ) -> PoolResult<()>; + + /// Dump all operations in the pool, used for debug methods + async fn debug_dump_mempool(&self, entry_point: Address) -> PoolResult>; + + /// Set reputations for entities, used for debug methods + async fn debug_set_reputations( + &self, + entry_point: Address, + reputations: Vec, + ) -> PoolResult<()>; + + /// Dump reputations for entities, used for debug methods + async fn debug_dump_reputation(&self, entry_point: Address) -> PoolResult>; + + /// Dump paymaster balances, used for debug methods + async fn debug_dump_paymaster_balances( + &self, + entry_point: Address, + ) -> PoolResult>; + + /// Controls whether or not the certain tracking data structures are used to block user operations + async fn admin_set_tracking( + &self, + entry_point: Address, + paymaster: bool, + reputation: bool, + ) -> PoolResult<()>; +} diff --git a/crates/types/src/pool/types.rs b/crates/types/src/pool/types.rs new file mode 100644 index 00000000..70f92805 --- /dev/null +++ b/crates/types/src/pool/types.rs @@ -0,0 +1,156 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use ethers::types::{Address, H256, U256}; +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; + +use crate::{ + entity::EntityInfos, Entity, StakeInfo, UserOperation, UserOperationVariant, ValidTimeRange, +}; + +/// The new head of the chain, as viewed by the pool +#[derive(Clone, Debug, Default)] +pub struct NewHead { + /// The hash of the new head + pub block_hash: H256, + /// The number of the new head + pub block_number: u64, +} + +/// The reputation of an entity +#[derive(Debug, Clone)] +pub struct Reputation { + /// The entity's address + pub address: Address, + /// Number of ops seen in the current interval + pub ops_seen: u64, + /// Number of ops included in the current interval + pub ops_included: u64, +} + +/// Reputation status for an entity +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum ReputationStatus { + /// Entity is not throttled or banned + Ok, + /// Entity is throttled + Throttled, + /// Entity is banned + Banned, +} + +impl Serialize for ReputationStatus { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match self { + ReputationStatus::Ok => serializer.serialize_str("ok"), + ReputationStatus::Throttled => serializer.serialize_str("throttled"), + ReputationStatus::Banned => serializer.serialize_str("banned"), + } + } +} + +impl<'de> Deserialize<'de> for ReputationStatus { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let s = String::deserialize(deserializer)?; + match s.as_str() { + "ok" => Ok(ReputationStatus::Ok), + "throttled" => Ok(ReputationStatus::Throttled), + "banned" => Ok(ReputationStatus::Banned), + _ => Err(de::Error::custom(format!("Invalid reputation status {s}"))), + } + } +} + +/// Stake status structure +#[derive(Debug, Clone, Copy)] +pub struct StakeStatus { + /// Address is staked + pub is_staked: bool, + /// Stake information about address + pub stake_info: StakeInfo, +} + +/// The metadata for a paymaster +#[derive(Debug, Default, Clone, Eq, PartialEq, Copy)] +pub struct PaymasterMetadata { + /// Paymaster address + pub address: Address, + /// The on-chain balance of the paymaster + pub confirmed_balance: U256, + /// The pending balance is the confirm balance subtracted by + /// the max cost of all the pending user operations that use the paymaster + pub pending_balance: U256, +} + +/// A user operation with additional metadata from validation. +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct PoolOperation { + /// The user operation stored in the pool + pub uo: UserOperationVariant, + /// The entry point address for this operation + pub entry_point: Address, + /// The aggregator address for this operation, if any. + pub aggregator: Option
, + /// The valid time range for this operation. + pub valid_time_range: ValidTimeRange, + /// The expected code hash for all contracts accessed during validation for this operation. + pub expected_code_hash: H256, + /// The block hash simulation was completed at + pub sim_block_hash: H256, + /// The block number simulation was completed at + pub sim_block_number: u64, + /// Whether the account is staked. + pub account_is_staked: bool, + /// Staking information about all the entities. + pub entity_infos: EntityInfos, +} + +impl PoolOperation { + /// Returns true if the operation contains the given entity. + pub fn contains_entity(&self, entity: &Entity) -> bool { + if let Some(ei) = self.entity_infos.get(entity.kind) { + ei.entity.address == entity.address + } else { + false + } + } + + /// Returns an iterator over all entities that are included in this operation. + pub fn entities(&'_ self) -> impl Iterator + '_ { + self.entity_infos + .entities() + .map(|(t, ei)| Entity::new(t, ei.entity.address)) + } + + /// Return all the unstaked entities that are used in this operation. + pub fn unstaked_entities(&'_ self) -> impl Iterator + '_ { + self.entity_infos.entities().filter_map(|(t, ei)| { + if ei.is_staked { + None + } else { + Entity::new(t, ei.entity.address).into() + } + }) + } + + /// Compute the amount of heap memory the PoolOperation takes up. + pub fn mem_size(&self) -> usize { + std::mem::size_of::() + self.uo.heap_size() + } +} diff --git a/crates/types/src/timestamp.rs b/crates/types/src/timestamp.rs index 78d6bb14..5e5187d2 100644 --- a/crates/types/src/timestamp.rs +++ b/crates/types/src/timestamp.rs @@ -192,6 +192,14 @@ impl ValidTimeRange { pub fn contains(self, timestamp: Timestamp, buffer: Duration) -> bool { self.valid_after <= timestamp && (timestamp + buffer) <= self.valid_until } + + /// Intersect two time ranges into a single time range that is valid whenever both are valid + pub fn intersect(self, other: Self) -> Self { + Self { + valid_after: self.valid_after.max(other.valid_after), + valid_until: self.valid_until.min(other.valid_until), + } + } } #[cfg(test)] @@ -286,6 +294,15 @@ mod test { assert_eq!(json, "\"0x64\""); } + #[test] + fn test_merge_time_ranges() { + let range1 = ValidTimeRange::new(Timestamp::new(100), Timestamp::new(200)); + let range2 = ValidTimeRange::new(Timestamp::new(150), Timestamp::new(250)); + let intersect = range1.intersect(range2); + assert_eq!(intersect.valid_after, Timestamp::new(150)); + assert_eq!(intersect.valid_until, Timestamp::new(200)); + } + fn get_timestamp_out_of_bounds_for_datetime() -> Timestamp { // This is just a bit further in the future than the maximum allowed // DateTime, which is just before the start of year 2^18 = 262144. diff --git a/crates/types/src/user_operation.rs b/crates/types/src/user_operation.rs deleted file mode 100644 index fb3d68bc..00000000 --- a/crates/types/src/user_operation.rs +++ /dev/null @@ -1,332 +0,0 @@ -// This file is part of Rundler. -// -// Rundler is free software: you can redistribute it and/or modify it under the -// terms of the GNU Lesser General Public License as published by the Free Software -// Foundation, either version 3 of the License, or (at your option) any later version. -// -// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; -// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -// See the GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License along with Rundler. -// If not, see https://www.gnu.org/licenses/. - -use ethers::{ - abi::{encode, Token}, - types::{Address, Bytes, H256, U256}, - utils::keccak256, -}; -use strum::IntoEnumIterator; - -use crate::{ - entity::{Entity, EntityType}, - UserOperation, -}; - -/// Number of bytes in the fixed size portion of an ABI encoded user operation -const PACKED_USER_OPERATION_FIXED_LEN: usize = 480; - -/// Unique identifier for a user operation from a given sender -#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] -pub struct UserOperationId { - /// sender of user operation - pub sender: Address, - /// nonce of user operation - pub nonce: U256, -} - -impl UserOperation { - /// Hash a user operation with the given entry point and chain ID. - /// - /// The hash is used to uniquely identify a user operation in the entry point. - /// It does not include the signature field. - pub fn op_hash(&self, entry_point: Address, chain_id: u64) -> H256 { - keccak256(encode(&[ - Token::FixedBytes(keccak256(self.pack_for_hash()).to_vec()), - Token::Address(entry_point), - Token::Uint(chain_id.into()), - ])) - .into() - } - - /// Hash only the fields needed for HybridCompute key, excluding gas limits and fees - pub fn op_hc_hash(&self) -> H256 { - keccak256(encode(&[ - Token::FixedBytes(keccak256(self.pack_for_hc_hash()).to_vec()), - ])) - .into() - } - - /// Get the unique identifier for this user operation from its sender - pub fn id(&self) -> UserOperationId { - UserOperationId { - sender: self.sender, - nonce: self.nonce, - } - } - - /// Get the address of the factory entity associated with this user operation, if any - pub fn factory(&self) -> Option
{ - Self::get_address_from_field(&self.init_code) - } - - /// Returns the maximum cost, in wei, of this user operation - pub fn max_gas_cost(&self) -> U256 { - let mul = if self.paymaster().is_some() { 3 } else { 1 }; - self.max_fee_per_gas - * (self.pre_verification_gas + self.call_gas_limit + self.verification_gas_limit * mul) - } - - /// Get the address of the paymaster entity associated with this user operation, if any - pub fn paymaster(&self) -> Option
{ - Self::get_address_from_field(&self.paymaster_and_data) - } - - /// Extracts an address from the beginning of a data field - /// Useful to extract the paymaster address from paymaster_and_data - /// and the factory address from init_code - pub fn get_address_from_field(data: &Bytes) -> Option
{ - if data.len() < 20 { - None - } else { - Some(Address::from_slice(&data[..20])) - } - } - - /// Efficient calculation of the size of a packed user operation - pub fn abi_encoded_size(&self) -> usize { - PACKED_USER_OPERATION_FIXED_LEN - + pad_len(&self.init_code) - + pad_len(&self.call_data) - + pad_len(&self.paymaster_and_data) - + pad_len(&self.signature) - } - - /// Compute the amount of heap memory the UserOperation takes up. - pub fn heap_size(&self) -> usize { - self.init_code.len() - + self.call_data.len() - + self.paymaster_and_data.len() - + self.signature.len() - } - - /// Gets the byte array representation of the user operation to be used in the signature - pub fn pack_for_hash(&self) -> Bytes { - let hash_init_code = keccak256(self.init_code.clone()); - let hash_call_data = keccak256(self.call_data.clone()); - let hash_paymaster_and_data = keccak256(self.paymaster_and_data.clone()); - - encode(&[ - Token::Address(self.sender), - Token::Uint(self.nonce), - Token::FixedBytes(hash_init_code.to_vec()), - Token::FixedBytes(hash_call_data.to_vec()), - Token::Uint(self.call_gas_limit), - Token::Uint(self.verification_gas_limit), - Token::Uint(self.pre_verification_gas), - Token::Uint(self.max_fee_per_gas), - Token::Uint(self.max_priority_fee_per_gas), - Token::FixedBytes(hash_paymaster_and_data.to_vec()), - ]) - .into() - } - - /// Gets the byte array representation of the user operation to be used as HC key - pub fn pack_for_hc_hash(&self) -> Bytes { - let hash_init_code = keccak256(self.init_code.clone()); - let hash_call_data = keccak256(self.call_data.clone()); - let hash_paymaster_and_data = keccak256(self.paymaster_and_data.clone()); - - encode(&[ - Token::Address(self.sender), - Token::Uint(self.nonce), - Token::FixedBytes(hash_init_code.to_vec()), - Token::FixedBytes(hash_call_data.to_vec()), - Token::FixedBytes(hash_paymaster_and_data.to_vec()), // ??? - ]) - .into() - } - - /// Gets an iterator on all entities associated with this user operation - pub fn entities(&'_ self) -> impl Iterator + '_ { - EntityType::iter().filter_map(|entity| { - self.entity_address(entity) - .map(|address| Entity::new(entity, address)) - }) - } - - /// Gets the address of the entity of the given type associated with this user operation, if any - fn entity_address(&self, entity: EntityType) -> Option
{ - match entity { - EntityType::Account => Some(self.sender), - EntityType::Paymaster => self.paymaster(), - EntityType::Factory => self.factory(), - EntityType::Aggregator => None, - } - } -} - -/// Calculates the size a byte array padded to the next largest multiple of 32 -fn pad_len(b: &Bytes) -> usize { - (b.len() + 31) & !31 -} - -#[cfg(test)] -mod tests { - use std::str::FromStr; - - use ethers::{ - abi::AbiEncode, - types::{Bytes, U256}, - }; - - use super::*; - - #[test] - fn test_hash_zeroed() { - // Testing a user operation hash against the hash generated by the - // entrypoint contract getUserOpHash() function with entrypoint address - // at 0x66a15edcc3b50a663e72f1457ffd49b9ae284ddc and chain ID 1337. - // - // UserOperation = { - // sender: '0x0000000000000000000000000000000000000000', - // nonce: 0, - // initCode: '0x', - // callData: '0x', - // callGasLimit: 0, - // verificationGasLimit: 0, - // preVerificationGas: 0, - // maxFeePerGas: 0, - // maxPriorityFeePerGas: 0, - // paymasterAndData: '0x', - // signature: '0x', - // } - // - // Hash: 0xdca97c3b49558ab360659f6ead939773be8bf26631e61bb17045bb70dc983b2d - let operation = UserOperation { - sender: "0x0000000000000000000000000000000000000000" - .parse() - .unwrap(), - nonce: U256::zero(), - init_code: Bytes::default(), - call_data: Bytes::default(), - call_gas_limit: U256::zero(), - verification_gas_limit: U256::zero(), - pre_verification_gas: U256::zero(), - max_fee_per_gas: U256::zero(), - max_priority_fee_per_gas: U256::zero(), - paymaster_and_data: Bytes::default(), - signature: Bytes::default(), - }; - let entry_point = "0x66a15edcc3b50a663e72f1457ffd49b9ae284ddc" - .parse() - .unwrap(); - let chain_id = 1337; - let hash = operation.op_hash(entry_point, chain_id); - assert_eq!( - hash, - "0xdca97c3b49558ab360659f6ead939773be8bf26631e61bb17045bb70dc983b2d" - .parse() - .unwrap() - ); - } - - #[test] - fn test_hash() { - // Testing a user operation hash against the hash generated by the - // entrypoint contract getUserOpHash() function with entrypoint address - // at 0x66a15edcc3b50a663e72f1457ffd49b9ae284ddc and chain ID 1337. - // - // UserOperation = { - // sender: '0x1306b01bc3e4ad202612d3843387e94737673f53', - // nonce: 8942, - // initCode: '0x6942069420694206942069420694206942069420', - // callData: '0x0000000000000000000000000000000000000000080085', - // callGasLimit: 10000, - // verificationGasLimit: 100000, - // preVerificationGas: 100, - // maxFeePerGas: 99999, - // maxPriorityFeePerGas: 9999999, - // paymasterAndData: - // '0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef', - // signature: - // '0xda0929f527cded8d0a1eaf2e8861d7f7e2d8160b7b13942f99dd367df4473a', - // } - // - // Hash: 0x484add9e4d8c3172d11b5feb6a3cc712280e176d278027cfa02ee396eb28afa1 - let operation = UserOperation { - sender: "0x1306b01bc3e4ad202612d3843387e94737673f53" - .parse() - .unwrap(), - nonce: 8942.into(), - init_code: "0x6942069420694206942069420694206942069420" - .parse() - .unwrap(), - call_data: "0x0000000000000000000000000000000000000000080085" - .parse() - .unwrap(), - call_gas_limit: 10000.into(), - verification_gas_limit: 100000.into(), - pre_verification_gas: 100.into(), - max_fee_per_gas: 99999.into(), - max_priority_fee_per_gas: 9999999.into(), - paymaster_and_data: - "0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" - .parse() - .unwrap(), - signature: "0xda0929f527cded8d0a1eaf2e8861d7f7e2d8160b7b13942f99dd367df4473a" - .parse() - .unwrap(), - }; - let entry_point = "0x66a15edcc3b50a663e72f1457ffd49b9ae284ddc" - .parse() - .unwrap(); - let chain_id = 1337; - let hash = operation.op_hash(entry_point, chain_id); - assert_eq!( - hash, - "0x484add9e4d8c3172d11b5feb6a3cc712280e176d278027cfa02ee396eb28afa1" - .parse() - .unwrap() - ); - } - - #[test] - fn test_get_address_from_field() { - let paymaster_and_data: Bytes = - "0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" - .parse() - .unwrap(); - let address = UserOperation::get_address_from_field(&paymaster_and_data).unwrap(); - assert_eq!( - address, - "0x0123456789abcdef0123456789abcdef01234567" - .parse() - .unwrap() - ); - } - - #[test] - fn test_abi_encoded_size() { - let user_operation = UserOperation { - sender: "0xe29a7223a7e040d70b5cd460ef2f4ac6a6ab304d" - .parse() - .unwrap(), - nonce: U256::from_dec_str("3937668929043450082210854285941660524781292117276598730779").unwrap(), - init_code: Bytes::default(), - call_data: Bytes::from_str("0x5194544700000000000000000000000058440a3e78b190e5bd07905a08a60e30bb78cb5b000000000000000000000000000000000000000000000000000009184e72a000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap(), - call_gas_limit: 40_960.into(), - verification_gas_limit: 75_099.into(), - pre_verification_gas: 46_330.into(), - max_fee_per_gas: 105_000_000.into(), - max_priority_fee_per_gas: 105_000_000.into(), - paymaster_and_data: Bytes::from_str("0xc03aac639bb21233e0139381970328db8bceeb6700006508996f000065089a9b0000000000000000000000000000000000000000ca7517be4e51ca2cde69bc44c4c3ce00ff7f501ce4ee1b3c6b2a742f579247292e4f9a672522b15abee8eaaf1e1487b8e3121d61d42ba07a47f5ccc927aa7eb61b").unwrap(), - signature: Bytes::from_str("0x00000000f8a0655423f2dfbb104e0ff906b7b4c64cfc12db0ac5ef0fb1944076650ce92a1a736518e5b6cd46c6ff6ece7041f2dae199fb4c8e7531704fbd629490b712dc1b").unwrap(), - }; - - assert_eq!( - user_operation.clone().encode().len(), - user_operation.abi_encoded_size() - ); - } -} diff --git a/crates/types/src/user_operation/mod.rs b/crates/types/src/user_operation/mod.rs new file mode 100644 index 00000000..5e1c0ea4 --- /dev/null +++ b/crates/types/src/user_operation/mod.rs @@ -0,0 +1,460 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use std::{fmt::Debug, time::Duration}; + +use ethers::{ + abi::AbiEncode, + types::{Address, Bytes, H256, U256}, +}; + +/// User Operation types for Entry Point v0.6 +pub mod v0_6; +/// User Operation types for Entry Point v0.7 +pub mod v0_7; + +use crate::{chain::ChainSpec, Entity}; + +/// A user op must be valid for at least this long into the future to be included. +pub const TIME_RANGE_BUFFER: Duration = Duration::from_secs(60); + +/// Overhead for bytes required for each bundle +/// 4 bytes for function signature +/// 32 bytes for user op array offset +/// 32 bytes for beneficiary +/// 32 bytes for array count +/// Ontop of this offset there needs to be another 32 bytes for each +/// user operation in the bundle to store its offset within the array +pub const BUNDLE_BYTE_OVERHEAD: usize = 4 + 32 + 32 + 32; + +/// Size of word that stores offset of user op location +/// within handleOps `ops` array +pub const USER_OP_OFFSET_WORD_SIZE: usize = 32; + +/// ERC-4337 Entry point version +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub enum EntryPointVersion { + /// Unspecified version + Unspecified, + /// Version 0.6 + V0_6, + /// Version 0.7 + V0_7, +} + +/// Unique identifier for a user operation from a given sender +#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct UserOperationId { + /// sender of user operation + pub sender: Address, + /// nonce of user operation + pub nonce: U256, +} + +/// User operation trait +pub trait UserOperation: Debug + Clone + Send + Sync + 'static { + /// Optional gas type + /// + /// Associated type for the version of a user operation that has optional gas and fee fields + type OptionalGas; + + /// Get the entry point version for this UO + fn entry_point_version() -> EntryPointVersion; + + /* + * Getters + */ + + /// Get the user operation sender address + fn sender(&self) -> Address; + + /// Get the user operation nonce + fn nonce(&self) -> U256; + + /// Get the user operation paymaster address, if any + fn paymaster(&self) -> Option
; + + /// Get the user operation factory address, if any + fn factory(&self) -> Option
; + + /// Get the user operation calldata + fn call_data(&self) -> &Bytes; + + /// Returns the call gas limit + fn call_gas_limit(&self) -> U256; + + /// Returns the verification gas limit + fn verification_gas_limit(&self) -> U256; + + /// Returns the max fee per gas + fn max_fee_per_gas(&self) -> U256; + + /// Returns the max priority fee per gas + fn max_priority_fee_per_gas(&self) -> U256; + + /// Returns the maximum cost, in wei, of this user operation + fn max_gas_cost(&self) -> U256; + + /* + * Enhanced functions + */ + + /// Hash a user operation with the given entry point and chain ID. + /// + /// The hash is used to uniquely identify a user operation in the entry point. + /// It does not include the signature field. + fn hash(&self, entry_point: Address, chain_id: u64) -> H256; + + /// Identifies a user operation for Hybrid Compute purposes + fn hc_hash(&self) -> H256; + + /// Get the user operation id + fn id(&self) -> UserOperationId; + + /// Gets an iterator on all entities associated with this user operation + fn entities(&'_ self) -> Vec; + + /// Returns the heap size of the user operation + fn heap_size(&self) -> usize; + + /// Returns the total verification gas limit + fn total_verification_gas_limit(&self) -> U256; + + /// Returns the required pre-execution buffer + /// + /// This should capture all of the gas that is needed to execute the user operation, + /// minus the call gas limit. The entry point will check for this buffer before + /// executing the user operation. + fn required_pre_execution_buffer(&self) -> U256; + + /// Returns the pre-verification gas + fn pre_verification_gas(&self) -> U256; + + /// Calculate the static portion of the pre-verification gas for this user operation + fn calc_static_pre_verification_gas( + &self, + chain_spec: &ChainSpec, + include_fixed_gas_overhead: bool, + ) -> U256; + + /// Clear the signature field of the user op + /// + /// Used when a user op is using a signature aggregator prior to being submitted + fn clear_signature(&mut self); + + /// Abi encode size of the user operation + fn abi_encoded_size(&self) -> usize; + + /// Calculate the size of the user operation in single UO bundle in bytes + fn single_uo_bundle_size_bytes(&self) -> usize { + self.abi_encoded_size() + BUNDLE_BYTE_OVERHEAD + USER_OP_OFFSET_WORD_SIZE + } +} + +/// User operation enum +#[derive(Debug, Clone, Eq, PartialEq)] +pub enum UserOperationVariant { + /// User operation version 0.6 + V0_6(v0_6::UserOperation), + /// User operation version 0.7 + V0_7(v0_7::UserOperation), +} + +impl UserOperation for UserOperationVariant { + type OptionalGas = UserOperationOptionalGas; + + fn entry_point_version() -> EntryPointVersion { + EntryPointVersion::Unspecified + } + + fn hash(&self, entry_point: Address, chain_id: u64) -> H256 { + match self { + UserOperationVariant::V0_6(op) => op.hash(entry_point, chain_id), + UserOperationVariant::V0_7(op) => op.hash(entry_point, chain_id), + } + } + + fn hc_hash(&self) -> H256 { + match self { + UserOperationVariant::V0_6(op) => op.hc_hash(), + UserOperationVariant::V0_7(op) => op.hc_hash(), + } + } + + fn id(&self) -> UserOperationId { + match self { + UserOperationVariant::V0_6(op) => op.id(), + UserOperationVariant::V0_7(op) => op.id(), + } + } + + fn sender(&self) -> Address { + match self { + UserOperationVariant::V0_6(op) => op.sender(), + UserOperationVariant::V0_7(op) => op.sender(), + } + } + + fn nonce(&self) -> U256 { + match self { + UserOperationVariant::V0_6(op) => op.nonce(), + UserOperationVariant::V0_7(op) => op.nonce(), + } + } + + fn paymaster(&self) -> Option
{ + match self { + UserOperationVariant::V0_6(op) => op.paymaster(), + UserOperationVariant::V0_7(op) => op.paymaster(), + } + } + + fn factory(&self) -> Option
{ + match self { + UserOperationVariant::V0_6(op) => op.factory(), + UserOperationVariant::V0_7(op) => op.factory(), + } + } + + fn call_data(&self) -> &Bytes { + match self { + UserOperationVariant::V0_6(op) => op.call_data(), + UserOperationVariant::V0_7(op) => op.call_data(), + } + } + + fn max_gas_cost(&self) -> U256 { + match self { + UserOperationVariant::V0_6(op) => op.max_gas_cost(), + UserOperationVariant::V0_7(op) => op.max_gas_cost(), + } + } + + fn entities(&'_ self) -> Vec { + match self { + UserOperationVariant::V0_6(op) => op.entities(), + UserOperationVariant::V0_7(op) => op.entities(), + } + } + + fn heap_size(&self) -> usize { + match self { + UserOperationVariant::V0_6(op) => op.heap_size(), + UserOperationVariant::V0_7(op) => op.heap_size(), + } + } + + fn call_gas_limit(&self) -> U256 { + match self { + UserOperationVariant::V0_6(op) => op.call_gas_limit(), + UserOperationVariant::V0_7(op) => op.call_gas_limit(), + } + } + + fn verification_gas_limit(&self) -> U256 { + match self { + UserOperationVariant::V0_6(op) => op.verification_gas_limit(), + UserOperationVariant::V0_7(op) => op.verification_gas_limit(), + } + } + + fn total_verification_gas_limit(&self) -> U256 { + match self { + UserOperationVariant::V0_6(op) => op.total_verification_gas_limit(), + UserOperationVariant::V0_7(op) => op.total_verification_gas_limit(), + } + } + + fn required_pre_execution_buffer(&self) -> U256 { + match self { + UserOperationVariant::V0_6(op) => op.required_pre_execution_buffer(), + UserOperationVariant::V0_7(op) => op.required_pre_execution_buffer(), + } + } + + fn pre_verification_gas(&self) -> U256 { + match self { + UserOperationVariant::V0_6(op) => op.pre_verification_gas(), + UserOperationVariant::V0_7(op) => op.pre_verification_gas(), + } + } + + fn calc_static_pre_verification_gas( + &self, + chain_spec: &ChainSpec, + include_fixed_gas_overhead: bool, + ) -> U256 { + match self { + UserOperationVariant::V0_6(op) => { + op.calc_static_pre_verification_gas(chain_spec, include_fixed_gas_overhead) + } + UserOperationVariant::V0_7(op) => { + op.calc_static_pre_verification_gas(chain_spec, include_fixed_gas_overhead) + } + } + } + + fn max_fee_per_gas(&self) -> U256 { + match self { + UserOperationVariant::V0_6(op) => op.max_fee_per_gas(), + UserOperationVariant::V0_7(op) => op.max_fee_per_gas(), + } + } + + fn max_priority_fee_per_gas(&self) -> U256 { + match self { + UserOperationVariant::V0_6(op) => op.max_priority_fee_per_gas(), + UserOperationVariant::V0_7(op) => op.max_priority_fee_per_gas(), + } + } + + fn clear_signature(&mut self) { + match self { + UserOperationVariant::V0_6(op) => op.clear_signature(), + UserOperationVariant::V0_7(op) => op.clear_signature(), + } + } + + fn abi_encoded_size(&self) -> usize { + match self { + UserOperationVariant::V0_6(op) => op.abi_encoded_size(), + UserOperationVariant::V0_7(op) => op.abi_encoded_size(), + } + } +} + +impl UserOperationVariant { + fn into_v0_6(self) -> Option { + match self { + UserOperationVariant::V0_6(op) => Some(op), + _ => None, + } + } + + fn into_v0_7(self) -> Option { + match self { + UserOperationVariant::V0_7(op) => Some(op), + _ => None, + } + } + + /// Returns the user operation type + pub fn uo_type(&self) -> EntryPointVersion { + match self { + UserOperationVariant::V0_6(_) => EntryPointVersion::V0_6, + UserOperationVariant::V0_7(_) => EntryPointVersion::V0_7, + } + } +} + +/// User operation optional gas enum +#[derive(Debug, Clone)] +pub enum UserOperationOptionalGas { + /// User operation optional gas for version 0.6 + V0_6(v0_6::UserOperationOptionalGas), + /// User operation optional gas for version 0.7 + V0_7(v0_7::UserOperationOptionalGas), +} + +impl UserOperationOptionalGas { + /// Returns the user operation type + pub fn single_uo_bundle_size_bytes(&self) -> usize { + let abi_size = match self { + UserOperationOptionalGas::V0_6(op) => op.abi_encoded_size(), + UserOperationOptionalGas::V0_7(op) => op.abi_encoded_size(), + }; + abi_size + BUNDLE_BYTE_OVERHEAD + USER_OP_OFFSET_WORD_SIZE + } +} + +/// Gas estimate +#[derive(Debug, Clone)] +pub struct GasEstimate { + /// Pre verification gas + pub pre_verification_gas: U256, + /// Call gas limit + pub call_gas_limit: U256, + /// Verification gas limit + pub verification_gas_limit: U256, + /// Paymaster verification gas limit + /// + /// v0.6: unused + /// + /// v0.7: populated only if the user operation has a paymaster + pub paymaster_verification_gas_limit: Option, +} + +/// User operations per aggregator +#[derive(Debug, Eq, PartialEq, Clone, Default)] +pub struct UserOpsPerAggregator { + /// User operations + pub user_ops: Vec, + /// Aggregator address, zero if no aggregator is used + pub aggregator: Address, + /// Aggregator signature, empty if no aggregator is used + pub signature: Bytes, +} + +pub(crate) fn op_calldata_gas_cost( + uo: UO, + zero_byte_cost: U256, + non_zero_byte_cost: U256, + per_word_cost: U256, +) -> U256 { + let encoded_op = uo.encode(); + let length_in_words = (encoded_op.len() + 31) >> 5; // ceil(encoded_op.len() / 32) + let call_data_cost: U256 = encoded_op + .iter() + .map(|&x| { + if x == 0 { + zero_byte_cost + } else { + non_zero_byte_cost + } + }) + .reduce(|a, b| a + b) + .unwrap_or_default(); + + call_data_cost + per_word_cost * length_in_words +} + +/// Calculates the size a byte array padded to the next largest multiple of 32 +pub(crate) fn byte_array_abi_len(b: &Bytes) -> usize { + (b.len() + 31) & !31 +} + +/// Returns the default value if the option is None or the value is equal to the equal value +pub(crate) fn default_if_none_or_equal( + v: Option, + default: V, + equal: V, +) -> V { + v.filter(|v| v != &equal).unwrap_or(default) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_byte_array_abi_len() { + let b = Bytes::from(vec![0u8; 32]); + assert_eq!(byte_array_abi_len(&b), 32); + + let b = Bytes::from(vec![0u8; 31]); + assert_eq!(byte_array_abi_len(&b), 32); + + let b = Bytes::from(vec![0u8; 33]); + assert_eq!(byte_array_abi_len(&b), 64); + } +} diff --git a/crates/types/src/user_operation/v0_6.rs b/crates/types/src/user_operation/v0_6.rs new file mode 100644 index 00000000..0a8cee1a --- /dev/null +++ b/crates/types/src/user_operation/v0_6.rs @@ -0,0 +1,549 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use ethers::{ + abi::{encode, Token}, + types::{Address, Bytes, H256, U256}, + utils::keccak256, +}; +use rand::{self, RngCore}; +use serde::{Deserialize, Serialize}; +use strum::IntoEnumIterator; + +use super::{UserOperation as UserOperationTrait, UserOperationId, UserOperationVariant}; +pub use crate::contracts::v0_6::i_entry_point::{UserOperation, UserOpsPerAggregator}; +use crate::{ + chain::ChainSpec, + entity::{Entity, EntityType}, + EntryPointVersion, +}; + +/// Gas overhead required by the entry point contract for the inner call +pub const ENTRY_POINT_INNER_GAS_OVERHEAD: U256 = U256([5_000, 0, 0, 0]); + +/// Number of bytes in the fixed size portion of an ABI encoded user operation +/// sender = 32 bytes +/// nonce = 32 bytes +/// init_code = 32 bytes + 32 bytes num elems + var 32 +/// call_data = 32 bytes + 32 bytes num elems + var 32 +/// call_gas_limit = 32 bytes +/// verification_gas_limit = 32 bytes +/// pre_verification_gas = 32 bytes +/// max_fee_per_gas = 32 bytes +/// max_priority_fee_per_gas = 32 bytes +/// paymaster_and_data = 32 bytes + 32 bytes num elems + var 32 +/// signature = 32 bytes + 32 bytes num elems + var 32 +/// +/// 15 * 32 = 480 +const ABI_ENCODED_USER_OPERATION_FIXED_LEN: usize = 480; + +impl UserOperationTrait for UserOperation { + type OptionalGas = UserOperationOptionalGas; + + fn entry_point_version() -> EntryPointVersion { + EntryPointVersion::V0_6 + } + + fn hash(&self, entry_point: Address, chain_id: u64) -> H256 { + keccak256(encode(&[ + Token::FixedBytes(keccak256(self.pack_for_hash()).to_vec()), + Token::Address(entry_point), + Token::Uint(chain_id.into()), + ])) + .into() + } + + fn hc_hash(&self) -> H256 { + keccak256(encode(&[ + Token::FixedBytes(keccak256(self.pack_for_hc_hash()).to_vec()), + ])) + .into() + } + + fn id(&self) -> UserOperationId { + UserOperationId { + sender: self.sender, + nonce: self.nonce, + } + } + + fn sender(&self) -> Address { + self.sender + } + + fn nonce(&self) -> U256 { + self.nonce + } + + fn factory(&self) -> Option
{ + Self::get_address_from_field(&self.init_code) + } + + fn paymaster(&self) -> Option
{ + Self::get_address_from_field(&self.paymaster_and_data) + } + + fn call_data(&self) -> &Bytes { + &self.call_data + } + + fn max_gas_cost(&self) -> U256 { + let mul = if self.paymaster().is_some() { 3 } else { 1 }; + self.max_fee_per_gas + * (self.pre_verification_gas + self.call_gas_limit + self.verification_gas_limit * mul) + } + + fn heap_size(&self) -> usize { + self.init_code.len() + + self.call_data.len() + + self.paymaster_and_data.len() + + self.signature.len() + } + + fn entities(&self) -> Vec { + EntityType::iter() + .filter_map(|entity| { + self.entity_address(entity) + .map(|address| Entity::new(entity, address)) + }) + .collect() + } + + fn max_fee_per_gas(&self) -> U256 { + self.max_fee_per_gas + } + + fn max_priority_fee_per_gas(&self) -> U256 { + self.max_priority_fee_per_gas + } + + fn call_gas_limit(&self) -> U256 { + self.call_gas_limit + } + + fn pre_verification_gas(&self) -> U256 { + self.pre_verification_gas + } + + fn verification_gas_limit(&self) -> U256 { + self.verification_gas_limit + } + + fn total_verification_gas_limit(&self) -> U256 { + let mul = if self.paymaster().is_some() { 2 } else { 1 }; + self.verification_gas_limit * mul + } + + fn required_pre_execution_buffer(&self) -> U256 { + self.verification_gas_limit + ENTRY_POINT_INNER_GAS_OVERHEAD + } + + fn calc_static_pre_verification_gas( + &self, + chain_spec: &ChainSpec, + include_fixed_gas_overhead: bool, + ) -> U256 { + super::op_calldata_gas_cost( + self.clone(), + chain_spec.calldata_zero_byte_gas, + chain_spec.calldata_non_zero_byte_gas, + chain_spec.per_user_op_word_gas, + ) + chain_spec.per_user_op_v0_6_gas + + (if self.factory().is_some() { + chain_spec.per_user_op_deploy_overhead_gas + } else { + U256::zero() + }) + + (if include_fixed_gas_overhead { + chain_spec.transaction_intrinsic_gas + } else { + 0.into() + }) + } + + fn clear_signature(&mut self) { + self.signature = Bytes::default(); + } + + fn abi_encoded_size(&self) -> usize { + ABI_ENCODED_USER_OPERATION_FIXED_LEN + + super::byte_array_abi_len(&self.init_code) + + super::byte_array_abi_len(&self.call_data) + + super::byte_array_abi_len(&self.paymaster_and_data) + + super::byte_array_abi_len(&self.signature) + } +} + +impl UserOperation { + fn get_address_from_field(data: &Bytes) -> Option
{ + if data.len() < 20 { + None + } else { + Some(Address::from_slice(&data[..20])) + } + } + + fn pack_for_hash(&self) -> Bytes { + let hash_init_code = keccak256(self.init_code.clone()); + let hash_call_data = keccak256(self.call_data.clone()); + let hash_paymaster_and_data = keccak256(self.paymaster_and_data.clone()); + + encode(&[ + Token::Address(self.sender), + Token::Uint(self.nonce), + Token::FixedBytes(hash_init_code.to_vec()), + Token::FixedBytes(hash_call_data.to_vec()), + Token::Uint(self.call_gas_limit), + Token::Uint(self.verification_gas_limit), + Token::Uint(self.pre_verification_gas), + Token::Uint(self.max_fee_per_gas), + Token::Uint(self.max_priority_fee_per_gas), + Token::FixedBytes(hash_paymaster_and_data.to_vec()), + ]) + .into() + } + + /// Gets the byte array representation of the user operation to be used as HC key + pub fn pack_for_hc_hash(&self) -> Bytes { + let hash_init_code = keccak256(self.init_code.clone()); + let hash_call_data = keccak256(self.call_data.clone()); + let hash_paymaster_and_data = keccak256(self.paymaster_and_data.clone()); + + encode(&[ + Token::Address(self.sender), + Token::Uint(self.nonce), + Token::FixedBytes(hash_init_code.to_vec()), + Token::FixedBytes(hash_call_data.to_vec()), + Token::FixedBytes(hash_paymaster_and_data.to_vec()), // ??? + ]) + .into() + } + + fn entity_address(&self, entity: EntityType) -> Option
{ + match entity { + EntityType::Account => Some(self.sender), + EntityType::Paymaster => self.paymaster(), + EntityType::Factory => self.factory(), + EntityType::Aggregator => None, + } + } +} + +impl From for UserOperation { + /// Converts a UserOperationVariant to a UserOperation 0.6 + /// + /// # Panics + /// + /// Panics if the variant is not v0.6. This is for use in contexts + /// where the variant is known to be v0.6. + fn from(value: UserOperationVariant) -> Self { + value.into_v0_6().expect("Expected UserOperationV0_6") + } +} + +impl From for super::UserOperationVariant { + fn from(op: UserOperation) -> Self { + super::UserOperationVariant::V0_6(op) + } +} + +impl AsRef for super::UserOperationVariant { + /// # Panics + /// + /// Panics if the variant is not v0.6. This is for use in contexts + /// where the variant is known to be v0.6. + fn as_ref(&self) -> &UserOperation { + match self { + super::UserOperationVariant::V0_6(op) => op, + _ => panic!("Expected UserOperationV0_6"), + } + } +} + +impl AsMut for super::UserOperationVariant { + /// # Panics + /// + /// Panics if the variant is not v0.6. This is for use in contexts + /// where the variant is known to be v0.6. + fn as_mut(&mut self) -> &mut UserOperation { + match self { + super::UserOperationVariant::V0_6(op) => op, + _ => panic!("Expected UserOperationV0_6"), + } + } +} + +/// User operation with optional gas fields for gas estimation +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(rename_all = "camelCase")] +pub struct UserOperationOptionalGas { + /// Sender (required) + pub sender: Address, + /// Nonce (required) + pub nonce: U256, + /// Init code (required) + pub init_code: Bytes, + /// Call data (required) + pub call_data: Bytes, + /// Call gas limit (optional, set to maximum if unset) + pub call_gas_limit: Option, + /// Verification gas limit (optional, set to maximum if unset) + pub verification_gas_limit: Option, + /// Pre verification gas (optional, ignored if set) + pub pre_verification_gas: Option, + /// Max fee per gas (optional, ignored if set) + pub max_fee_per_gas: Option, + /// Max priority fee per gas (optional, ignored if set) + pub max_priority_fee_per_gas: Option, + /// Paymaster and data (required, dummy value for gas estimation) + pub paymaster_and_data: Bytes, + /// Signature (required, dummy value for gas estimation) + pub signature: Bytes, +} + +impl UserOperationOptionalGas { + /// Fill in the optional and dummy fields of the user operation with values + /// that will cause the maximum possible calldata gas cost. + pub fn max_fill(&self, max_call_gas: U256, max_verification_gas: U256) -> UserOperation { + UserOperation { + call_gas_limit: U256::MAX, + verification_gas_limit: U256::MAX, + pre_verification_gas: U256::MAX, + max_fee_per_gas: U256::MAX, + max_priority_fee_per_gas: U256::MAX, + signature: vec![255_u8; self.signature.len()].into(), + paymaster_and_data: vec![255_u8; self.paymaster_and_data.len()].into(), + ..self + .clone() + .into_user_operation(max_call_gas, max_verification_gas) + } + } + + /// Fill in the optional and dummy fields of the user operation with random values. + /// + /// When estimating pre-verification gas, specifically on networks that use + /// compression algorithms on their data that they post to their data availability + /// layer (like Arbitrum), it is important to make sure that the data that is + /// random such that it compresses to a representative size. + // + /// Note that this will slightly overestimate the calldata gas needed as it uses + /// the worst case scenario for the unknown gas values and paymaster_and_data. + pub fn random_fill(&self, max_call_gas: U256, max_verification_gas: U256) -> UserOperation { + UserOperation { + call_gas_limit: U256::from_big_endian(&Self::random_bytes(4)), // 30M max + verification_gas_limit: U256::from_big_endian(&Self::random_bytes(4)), // 30M max + pre_verification_gas: U256::from_big_endian(&Self::random_bytes(4)), // 30M max + max_fee_per_gas: U256::from_big_endian(&Self::random_bytes(8)), // 2^64 max + max_priority_fee_per_gas: U256::from_big_endian(&Self::random_bytes(8)), // 2^64 max + signature: Self::random_bytes(self.signature.len()), + paymaster_and_data: Self::random_bytes(self.paymaster_and_data.len()), + ..self + .clone() + .into_user_operation(max_call_gas, max_verification_gas) + } + } + + /// Convert into a full user operation. + /// Fill in the optional fields of the user operation with default values if unset + pub fn into_user_operation( + self, + max_call_gas: U256, + max_verification_gas: U256, + ) -> UserOperation { + // If unset or zero, default these to gas limits from settings + // Cap their values to the gas limits from settings + let cgl = super::default_if_none_or_equal(self.call_gas_limit, max_call_gas, U256::zero()); + let vgl = super::default_if_none_or_equal( + self.verification_gas_limit, + max_verification_gas, + U256::zero(), + ); + let pvg = + super::default_if_none_or_equal(self.pre_verification_gas, max_call_gas, U256::zero()); + + UserOperation { + sender: self.sender, + nonce: self.nonce, + init_code: self.init_code, + call_data: self.call_data, + paymaster_and_data: self.paymaster_and_data, + signature: self.signature, + verification_gas_limit: vgl, + call_gas_limit: cgl, + pre_verification_gas: pvg, + // These aren't used in gas estimation, set to if unset 0 so that there are no payment attempts during gas estimation + max_fee_per_gas: self.max_fee_per_gas.unwrap_or_default(), + max_priority_fee_per_gas: self.max_priority_fee_per_gas.unwrap_or_default(), + } + } + + /// Abi encoded size of the user operation (with its dummy fields) + pub fn abi_encoded_size(&self) -> usize { + ABI_ENCODED_USER_OPERATION_FIXED_LEN + + super::byte_array_abi_len(&self.init_code) + + super::byte_array_abi_len(&self.call_data) + + super::byte_array_abi_len(&self.paymaster_and_data) + + super::byte_array_abi_len(&self.signature) + } + + fn random_bytes(len: usize) -> Bytes { + let mut bytes = vec![0_u8; len]; + rand::thread_rng().fill_bytes(&mut bytes); + bytes.into() + } +} + +impl From for UserOperationOptionalGas { + /// # Panics + /// + /// Panics if the variant is not v0.6. This is for use in contexts + /// where the variant is known to be v0.6. + fn from(op: super::UserOperationOptionalGas) -> Self { + match op { + super::UserOperationOptionalGas::V0_6(op) => op, + _ => panic!("Expected UserOperationOptionalGasV0_6"), + } + } +} + +#[cfg(test)] +mod tests { + + use ethers::types::{Bytes, U256}; + + use super::*; + + #[test] + fn test_hash_zeroed() { + // Testing a user operation hash against the hash generated by the + // entrypoint contract getUserOpHash() function with entrypoint address + // at 0x66a15edcc3b50a663e72f1457ffd49b9ae284ddc and chain ID 1337. + // + // UserOperation = { + // sender: '0x0000000000000000000000000000000000000000', + // nonce: 0, + // initCode: '0x', + // callData: '0x', + // callGasLimit: 0, + // verificationGasLimit: 0, + // preVerificationGas: 0, + // maxFeePerGas: 0, + // maxPriorityFeePerGas: 0, + // paymasterAndData: '0x', + // signature: '0x', + // } + // + // Hash: 0xdca97c3b49558ab360659f6ead939773be8bf26631e61bb17045bb70dc983b2d + let operation = UserOperation { + sender: "0x0000000000000000000000000000000000000000" + .parse() + .unwrap(), + nonce: U256::zero(), + init_code: Bytes::default(), + call_data: Bytes::default(), + call_gas_limit: U256::zero(), + verification_gas_limit: U256::zero(), + pre_verification_gas: U256::zero(), + max_fee_per_gas: U256::zero(), + max_priority_fee_per_gas: U256::zero(), + paymaster_and_data: Bytes::default(), + signature: Bytes::default(), + }; + let entry_point = "0x66a15edcc3b50a663e72f1457ffd49b9ae284ddc" + .parse() + .unwrap(); + let chain_id = 1337; + let hash = operation.hash(entry_point, chain_id); + assert_eq!( + hash, + "0xdca97c3b49558ab360659f6ead939773be8bf26631e61bb17045bb70dc983b2d" + .parse() + .unwrap() + ); + } + + #[test] + fn test_hash() { + // Testing a user operation hash against the hash generated by the + // entrypoint contract getUserOpHash() function with entrypoint address + // at 0x66a15edcc3b50a663e72f1457ffd49b9ae284ddc and chain ID 1337. + // + // UserOperation = { + // sender: '0x1306b01bc3e4ad202612d3843387e94737673f53', + // nonce: 8942, + // initCode: '0x6942069420694206942069420694206942069420', + // callData: '0x0000000000000000000000000000000000000000080085', + // callGasLimit: 10000, + // verificationGasLimit: 100000, + // preVerificationGas: 100, + // maxFeePerGas: 99999, + // maxPriorityFeePerGas: 9999999, + // paymasterAndData: + // '0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef', + // signature: + // '0xda0929f527cded8d0a1eaf2e8861d7f7e2d8160b7b13942f99dd367df4473a', + // } + // + // Hash: 0x484add9e4d8c3172d11b5feb6a3cc712280e176d278027cfa02ee396eb28afa1 + let operation = UserOperation { + sender: "0x1306b01bc3e4ad202612d3843387e94737673f53" + .parse() + .unwrap(), + nonce: 8942.into(), + init_code: "0x6942069420694206942069420694206942069420" + .parse() + .unwrap(), + call_data: "0x0000000000000000000000000000000000000000080085" + .parse() + .unwrap(), + call_gas_limit: 10000.into(), + verification_gas_limit: 100000.into(), + pre_verification_gas: 100.into(), + max_fee_per_gas: 99999.into(), + max_priority_fee_per_gas: 9999999.into(), + paymaster_and_data: + "0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" + .parse() + .unwrap(), + signature: "0xda0929f527cded8d0a1eaf2e8861d7f7e2d8160b7b13942f99dd367df4473a" + .parse() + .unwrap(), + }; + let entry_point = "0x66a15edcc3b50a663e72f1457ffd49b9ae284ddc" + .parse() + .unwrap(); + let chain_id = 1337; + let hash = operation.hash(entry_point, chain_id); + assert_eq!( + hash, + "0x484add9e4d8c3172d11b5feb6a3cc712280e176d278027cfa02ee396eb28afa1" + .parse() + .unwrap() + ); + } + + #[test] + fn test_get_address_from_field() { + let paymaster_and_data: Bytes = + "0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" + .parse() + .unwrap(); + let address = UserOperation::get_address_from_field(&paymaster_and_data).unwrap(); + assert_eq!( + address, + "0x0123456789abcdef0123456789abcdef01234567" + .parse() + .unwrap() + ); + } +} diff --git a/crates/types/src/user_operation/v0_7.rs b/crates/types/src/user_operation/v0_7.rs new file mode 100644 index 00000000..0e160010 --- /dev/null +++ b/crates/types/src/user_operation/v0_7.rs @@ -0,0 +1,994 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use ethers::{ + abi::{encode, Token}, + types::{Address, Bytes, H256, U128, U256}, + utils::keccak256, +}; +use rand::RngCore; + +use super::{UserOperation as UserOperationTrait, UserOperationId, UserOperationVariant}; +use crate::{ + chain::ChainSpec, contracts::v0_7::shared_types::PackedUserOperation, Entity, EntryPointVersion, +}; + +/// Gas overhead required by the entry point contract for the inner call +pub const ENTRY_POINT_INNER_GAS_OVERHEAD: U256 = U256([10_000, 0, 0, 0]); + +/// Number of bytes in the fixed size portion of an ABI encoded user operation +/// sender = 32 bytes +/// nonce = 32 bytes +/// init_code = 32 bytes + 32 bytes for the length + var bytes +/// call_data = 32 bytes + 32 bytes for the length + var bytes +/// account_gas_limits = 32 bytes +/// pre_verification_gas = 32 bytes +/// gas_fees = 32 bytes +/// paymaster_and_data = 32 bytes + 32 bytes for the length + var bytes +/// signature = 32 bytes + 32 bytes for the length + var bytes +/// +/// 13 * 32 = 416 +const ABI_ENCODED_USER_OPERATION_FIXED_LEN: usize = 416; + +/// User Operation for Entry Point v0.7 +/// +/// Offchain version, must be packed before sending onchain +#[derive(Clone, Debug, Eq, PartialEq)] +#[non_exhaustive] // Prevent instantiation except with UserOperationBuilder +pub struct UserOperation { + /* + * Required fields + */ + /// Sender + pub sender: Address, + /// Semi-abstracted nonce + /// + /// The first 192 bits are the nonce key, the last 64 bits are the nonce value + pub nonce: U256, + /// Calldata + pub call_data: Bytes, + /// Call gas limit + pub call_gas_limit: U128, + /// Verification gas limit + pub verification_gas_limit: U128, + /// Pre-verification gas + pub pre_verification_gas: U256, + /// Max priority fee per gas + pub max_priority_fee_per_gas: U128, + /// Max fee per gas + pub max_fee_per_gas: U128, + /// Signature + pub signature: Bytes, + /* + * Optional fields + */ + /// Factory, populated if deploying a new sender contract + pub factory: Option
, + /// Factory data + pub factory_data: Bytes, + /// Paymaster, populated if using a paymaster + pub paymaster: Option
, + /// Paymaster verification gas limit + pub paymaster_verification_gas_limit: U128, + /// Paymaster post-op gas limit + pub paymaster_post_op_gas_limit: U128, + /// Paymaster data + pub paymaster_data: Bytes, + /* + * Cached fields, not part of the UO + */ + /// Entry point address + pub entry_point: Address, + /// Chain id + pub chain_id: u64, + /// The hash of the user operation + pub hash: H256, + /// The packed user operation + pub packed: PackedUserOperation, + /// The gas cost of the calldata + pub calldata_gas_cost: U256, +} + +impl UserOperationTrait for UserOperation { + type OptionalGas = UserOperationOptionalGas; + + fn entry_point_version() -> EntryPointVersion { + EntryPointVersion::V0_7 + } + + fn hash(&self, _entry_point: Address, _chain_id: u64) -> H256 { + self.hash + } + + fn hc_hash(&self) -> H256 { + H256::zero() // Not yet implemented + } + + fn id(&self) -> UserOperationId { + UserOperationId { + sender: self.sender, + nonce: self.nonce, + } + } + + fn sender(&self) -> Address { + self.sender + } + + fn nonce(&self) -> U256 { + self.nonce + } + + fn paymaster(&self) -> Option
{ + self.paymaster + } + + fn factory(&self) -> Option
{ + self.factory + } + + fn call_data(&self) -> &Bytes { + &self.call_data + } + + fn max_gas_cost(&self) -> U256 { + U256::from(self.max_fee_per_gas) + * (self.pre_verification_gas + + self.call_gas_limit + + self.verification_gas_limit + + self.paymaster_verification_gas_limit + + self.paymaster_post_op_gas_limit) + } + + fn entities(&self) -> Vec { + let mut ret = vec![Entity::account(self.sender)]; + if let Some(factory) = self.factory { + ret.push(Entity::factory(factory)); + } + if let Some(paymaster) = self.paymaster { + ret.push(Entity::paymaster(paymaster)); + } + ret + } + + fn heap_size(&self) -> usize { + self.packed.heap_size() + + self.call_data.len() + + self.signature.len() + + self.factory_data.len() + + self.paymaster_data.len() + } + + fn max_fee_per_gas(&self) -> U256 { + U256::from(self.max_fee_per_gas) + } + + fn max_priority_fee_per_gas(&self) -> U256 { + U256::from(self.max_priority_fee_per_gas) + } + + fn pre_verification_gas(&self) -> U256 { + self.pre_verification_gas + } + + fn call_gas_limit(&self) -> U256 { + U256::from(self.call_gas_limit) + } + + fn verification_gas_limit(&self) -> U256 { + U256::from(self.verification_gas_limit) + } + + fn total_verification_gas_limit(&self) -> U256 { + U256::from(self.verification_gas_limit) + U256::from(self.paymaster_verification_gas_limit) + } + + fn calc_static_pre_verification_gas( + &self, + chain_spec: &ChainSpec, + include_fixed_gas_overhead: bool, + ) -> U256 { + self.calldata_gas_cost + + chain_spec.per_user_op_v0_7_gas + + (if self.factory.is_some() { + chain_spec.per_user_op_deploy_overhead_gas + } else { + 0.into() + }) + + (if include_fixed_gas_overhead { + chain_spec.transaction_intrinsic_gas + } else { + 0.into() + }) + } + + fn required_pre_execution_buffer(&self) -> U256 { + // See EntryPoint::innerHandleOp + // + // Overhead prior to execution of the user operation is required to be + // At least the call gas limit, plus the paymaster post-op gas limit, plus + // a static overhead of 10K gas. + // + // To handle the 63/64ths rule also need to add a buffer of 1/63rd of that total* + ENTRY_POINT_INNER_GAS_OVERHEAD + + U256::from(self.paymaster_post_op_gas_limit) + + (U256::from(64) + * (U256::from(self.call_gas_limit) + + U256::from(self.paymaster_post_op_gas_limit) + + ENTRY_POINT_INNER_GAS_OVERHEAD) + / U256::from(63)) + } + + fn clear_signature(&mut self) { + self.signature = Bytes::new(); + self.packed = pack_user_operation(self.clone()); + self.hash = hash_packed_user_operation(&self.packed, self.entry_point, self.chain_id); + } + + fn abi_encoded_size(&self) -> usize { + ABI_ENCODED_USER_OPERATION_FIXED_LEN + + super::byte_array_abi_len(&self.packed.init_code) + + super::byte_array_abi_len(&self.packed.call_data) + + super::byte_array_abi_len(&self.packed.paymaster_and_data) + + super::byte_array_abi_len(&self.packed.signature) + } +} + +impl UserOperation { + /// Packs the user operation to its offchain representation + pub fn pack(self) -> PackedUserOperation { + self.packed + } + + /// Returns a reference to the packed user operation + pub fn packed(&self) -> &PackedUserOperation { + &self.packed + } +} + +impl From for UserOperation { + /// Converts a UserOperationVariant to a UserOperation 0.7 + /// + /// # Panics + /// + /// Panics if the variant is not v0.7. This is for use in contexts + /// where the variant is known to be v0.7. + fn from(value: UserOperationVariant) -> Self { + value.into_v0_7().expect("Expected UserOperationV0_7") + } +} + +impl From for super::UserOperationVariant { + fn from(op: UserOperation) -> Self { + super::UserOperationVariant::V0_7(op) + } +} + +impl AsRef for super::UserOperationVariant { + /// # Panics + /// + /// Panics if the variant is not v0.7. This is for use in contexts + /// where the variant is known to be v0.7. + fn as_ref(&self) -> &UserOperation { + match self { + super::UserOperationVariant::V0_7(op) => op, + _ => panic!("Expected UserOperationV0_7"), + } + } +} + +impl AsMut for super::UserOperationVariant { + /// # Panics + /// + /// Panics if the variant is not v0.7. This is for use in contexts + /// where the variant is known to be v0.7. + fn as_mut(&mut self) -> &mut UserOperation { + match self { + super::UserOperationVariant::V0_7(op) => op, + _ => panic!("Expected UserOperationV0_7"), + } + } +} + +/// User Operation with optional gas for Entry Point v0.7 +#[derive(Debug, Clone, Eq, PartialEq)] +pub struct UserOperationOptionalGas { + /* + * Required fields + */ + /// Sender + pub sender: Address, + /// Semi-abstracted nonce + pub nonce: U256, + /// Calldata + pub call_data: Bytes, + /// Signature, typically a dummy value for optional gas + pub signature: Bytes, + /* + * Optional fields + */ + /// Call gas limit + pub call_gas_limit: Option, + /// Verification gas limit + pub verification_gas_limit: Option, + /// Pre-verification gas + pub pre_verification_gas: Option, + /// Max priority fee per gas + pub max_priority_fee_per_gas: Option, + /// Max fee per gas + pub max_fee_per_gas: Option, + /// Factory + pub factory: Option
, + /// Factory data + pub factory_data: Bytes, + /// Paymaster + pub paymaster: Option
, + /// Paymaster verification gas limit + pub paymaster_verification_gas_limit: Option, + /// Paymaster post-op gas limit + pub paymaster_post_op_gas_limit: Option, + /// Paymaster data + pub paymaster_data: Bytes, +} + +impl UserOperationOptionalGas { + /// Fill in the optional and dummy fields of the user operation with values + /// that will cause the maximum possible calldata gas cost. + pub fn max_fill(&self, chain_spec: &ChainSpec) -> UserOperation { + let max_4 = U128::from(u32::MAX); + let max_8 = U128::from(u64::MAX); + + let mut builder = UserOperationBuilder::new( + chain_spec, + UserOperationRequiredFields { + sender: self.sender, + nonce: self.nonce, + call_data: self.call_data.clone(), + signature: vec![255_u8; self.signature.len()].into(), + call_gas_limit: max_4, + verification_gas_limit: max_4, + pre_verification_gas: max_4.into(), + max_priority_fee_per_gas: max_8, + max_fee_per_gas: max_8, + }, + ); + + if self.paymaster.is_some() { + builder = builder.paymaster( + self.paymaster.unwrap(), + max_4, + max_4, + vec![255_u8; self.paymaster_data.len()].into(), + ); + } + if self.factory.is_some() { + builder = builder.factory( + self.factory.unwrap(), + vec![255_u8; self.factory_data.len()].into(), + ); + } + + builder.build() + } + + /// Fill in the optional and dummy fields of the user operation with random values. + /// + /// When estimating pre-verification gas, specifically on networks that use + /// compression algorithms on their data that they post to their data availability + /// layer (like Arbitrum), it is important to make sure that the data that is + /// random such that it compresses to a representative size. + // + /// Note that this will slightly overestimate the calldata gas needed as it uses + /// the worst case scenario for the unknown gas values and paymaster_and_data. + pub fn random_fill(&self, chain_spec: &ChainSpec) -> UserOperation { + let mut builder = UserOperationBuilder::new( + chain_spec, + UserOperationRequiredFields { + sender: self.sender, + nonce: self.nonce, + call_data: self.call_data.clone(), + signature: Self::random_bytes(self.signature.len()), + call_gas_limit: U128::from_big_endian(&Self::random_bytes(4)), + verification_gas_limit: U128::from_big_endian(&Self::random_bytes(4)), + pre_verification_gas: U256::from_big_endian(&Self::random_bytes(4)), + max_priority_fee_per_gas: U128::from_big_endian(&Self::random_bytes(8)), + max_fee_per_gas: U128::from_big_endian(&Self::random_bytes(8)), + }, + ); + + if self.paymaster.is_some() { + builder = builder.paymaster( + self.paymaster.unwrap(), + U128::from_big_endian(&Self::random_bytes(4)), + U128::from_big_endian(&Self::random_bytes(4)), + Self::random_bytes(self.paymaster_data.len()), + ) + } + if self.factory.is_some() { + builder = builder.factory( + self.factory.unwrap(), + Self::random_bytes(self.factory_data.len()), + ) + } + + builder.build() + } + + /// Convert into a builder for producing a full user operation. + /// Fill in the optional fields of the user operation with default values if unset + pub fn into_user_operation_builder( + self, + chian_spec: &ChainSpec, + max_call_gas: U128, + max_verification_gas: U128, + max_paymaster_verification_gas: U128, + ) -> UserOperationBuilder<'_> { + // If unset or zero, default these to gas limits from settings + // Cap their values to the gas limits from settings + let cgl = super::default_if_none_or_equal(self.call_gas_limit, max_call_gas, U128::zero()); + let vgl = super::default_if_none_or_equal( + self.verification_gas_limit, + max_verification_gas, + U128::zero(), + ); + let pgl = super::default_if_none_or_equal( + self.paymaster_verification_gas_limit, + max_paymaster_verification_gas, + U128::zero(), + ); + let pvg = super::default_if_none_or_equal( + self.pre_verification_gas, + max_call_gas.into(), + U256::zero(), + ); + + let mut builder = UserOperationBuilder::new( + chian_spec, + UserOperationRequiredFields { + sender: self.sender, + nonce: self.nonce, + call_data: self.call_data, + signature: self.signature, + call_gas_limit: cgl, + verification_gas_limit: vgl, + pre_verification_gas: pvg, + // These are unused in gas estimation, so default to zero + max_priority_fee_per_gas: self.max_priority_fee_per_gas.unwrap_or_default(), + max_fee_per_gas: self.max_fee_per_gas.unwrap_or_default(), + }, + ); + if let Some(factory) = self.factory { + builder = builder.factory(factory, self.factory_data); + } + if let Some(paymaster) = self.paymaster { + let paymaster_verification_gas_limit = pgl; + // If the user doesn't supply a post op, assume unused and zero + let paymaster_post_op_gas_limit = self.paymaster_post_op_gas_limit.unwrap_or_default(); + builder = builder.paymaster( + paymaster, + paymaster_verification_gas_limit, + paymaster_post_op_gas_limit, + self.paymaster_data, + ); + } + builder + } + + /// Abi encoded size of the user operation (with its dummy fields) + pub fn abi_encoded_size(&self) -> usize { + let mut base = ABI_ENCODED_USER_OPERATION_FIXED_LEN + + super::byte_array_abi_len(&self.call_data) + + super::byte_array_abi_len(&self.signature); + if self.factory.is_some() { + base += super::byte_array_abi_len(&self.factory_data) + 32; // account for factory address + } + if self.paymaster.is_some() { + base += super::byte_array_abi_len(&self.paymaster_data) + 64; // account for paymaster address and gas limits + } + + base + } + + fn random_bytes(len: usize) -> Bytes { + let mut bytes = vec![0_u8; len]; + rand::thread_rng().fill_bytes(&mut bytes); + bytes.into() + } +} + +impl From for UserOperationOptionalGas { + /// # Panics + /// + /// Panics if the variant is not v0.7. This is for use in contexts + /// where the variant is known to be v0.7. + fn from(op: super::UserOperationOptionalGas) -> Self { + match op { + super::UserOperationOptionalGas::V0_7(op) => op, + _ => panic!("Expected UserOperationOptionalGasV0_7"), + } + } +} + +/// Builder for UserOperation +/// +/// Used to create a v0.7 while ensuring all required fields and grouped fields are present +pub struct UserOperationBuilder<'a> { + // chain spec + chain_spec: &'a ChainSpec, + + // required fields + required: UserOperationRequiredFields, + + // optional fields + factory: Option
, + factory_data: Bytes, + paymaster: Option
, + paymaster_verification_gas_limit: U128, + paymaster_post_op_gas_limit: U128, + paymaster_data: Bytes, + packed_uo: Option, +} + +/// Required fields for UserOperation v0.7 +pub struct UserOperationRequiredFields { + /// Sender + pub sender: Address, + /// Semi-abstracted nonce + pub nonce: U256, + /// Calldata + pub call_data: Bytes, + /// Call gas limit + pub call_gas_limit: U128, + /// Verification gas limit + pub verification_gas_limit: U128, + /// Pre-verification gas + pub pre_verification_gas: U256, + /// Max priority fee per gas + pub max_priority_fee_per_gas: U128, + /// Max fee per gas + pub max_fee_per_gas: U128, + /// Signature + pub signature: Bytes, +} + +impl<'a> UserOperationBuilder<'a> { + /// Creates a new builder + pub fn new(chain_spec: &'a ChainSpec, required: UserOperationRequiredFields) -> Self { + Self { + chain_spec, + required, + factory: None, + factory_data: Bytes::new(), + paymaster: None, + paymaster_verification_gas_limit: U128::zero(), + paymaster_post_op_gas_limit: U128::zero(), + paymaster_data: Bytes::new(), + packed_uo: None, + } + } + + /// Creates a builder from an existing UO + pub fn from_uo(uo: UserOperation, chain_spec: &'a ChainSpec) -> Self { + Self { + chain_spec, + required: UserOperationRequiredFields { + sender: uo.sender, + nonce: uo.nonce, + call_data: uo.call_data, + call_gas_limit: uo.call_gas_limit, + verification_gas_limit: uo.verification_gas_limit, + pre_verification_gas: uo.pre_verification_gas, + max_priority_fee_per_gas: uo.max_priority_fee_per_gas, + max_fee_per_gas: uo.max_fee_per_gas, + signature: uo.signature, + }, + factory: uo.factory, + factory_data: uo.factory_data, + paymaster: uo.paymaster, + paymaster_verification_gas_limit: uo.paymaster_verification_gas_limit, + paymaster_post_op_gas_limit: uo.paymaster_post_op_gas_limit, + paymaster_data: uo.paymaster_data, + packed_uo: None, + } + } + + /// Sets the factory and factory data + pub fn factory(mut self, factory: Address, factory_data: Bytes) -> Self { + self.factory = Some(factory); + self.factory_data = factory_data; + self + } + + /// Sets the paymaster and associated fields + pub fn paymaster( + mut self, + paymaster: Address, + paymaster_verification_gas_limit: U128, + paymaster_post_op_gas_limit: U128, + paymaster_data: Bytes, + ) -> Self { + self.paymaster = Some(paymaster); + self.paymaster_verification_gas_limit = paymaster_verification_gas_limit; + self.paymaster_post_op_gas_limit = paymaster_post_op_gas_limit; + self.paymaster_data = paymaster_data; + self + } + + /// Sets the pre-verification gas + pub fn pre_verification_gas(mut self, pre_verification_gas: U256) -> Self { + self.required.pre_verification_gas = pre_verification_gas; + self + } + + /// Sets the verification gas limit + pub fn verification_gas_limit(mut self, verification_gas_limit: U128) -> Self { + self.required.verification_gas_limit = verification_gas_limit; + self + } + + /// Sets the call gas limit + pub fn call_gas_limit(mut self, call_gas_limit: U128) -> Self { + self.required.call_gas_limit = call_gas_limit; + self + } + + /// Sets the max fee per gas + pub fn max_fee_per_gas(mut self, max_fee_per_gas: U128) -> Self { + self.required.max_fee_per_gas = max_fee_per_gas; + self + } + + /// Sets the max priority fee per gas + pub fn max_priority_fee_per_gas(mut self, max_priority_fee_per_gas: U128) -> Self { + self.required.max_priority_fee_per_gas = max_priority_fee_per_gas; + self + } + + /// Sets the paymaster verification gas limit + pub fn paymaster_verification_gas_limit( + mut self, + paymaster_verification_gas_limit: U128, + ) -> Self { + self.paymaster_verification_gas_limit = paymaster_verification_gas_limit; + self + } + + /// Sets the paymaster post-op gas limit + pub fn paymaster_post_op_gas_limit(mut self, paymaster_post_op_gas_limit: U128) -> Self { + self.paymaster_post_op_gas_limit = paymaster_post_op_gas_limit; + self + } + + /// Sets the packed user operation, if known beforehand + pub fn packed(mut self, packed: PackedUserOperation) -> Self { + self.packed_uo = Some(packed); + self + } + + /// Builds the UserOperation + pub fn build(self) -> UserOperation { + let uo = UserOperation { + sender: self.required.sender, + nonce: self.required.nonce, + factory: self.factory, + factory_data: self.factory_data, + call_data: self.required.call_data, + call_gas_limit: self.required.call_gas_limit, + verification_gas_limit: self.required.verification_gas_limit, + pre_verification_gas: self.required.pre_verification_gas, + max_priority_fee_per_gas: self.required.max_priority_fee_per_gas, + max_fee_per_gas: self.required.max_fee_per_gas, + paymaster: self.paymaster, + paymaster_verification_gas_limit: self.paymaster_verification_gas_limit, + paymaster_post_op_gas_limit: self.paymaster_post_op_gas_limit, + paymaster_data: self.paymaster_data, + signature: self.required.signature, + entry_point: self.chain_spec.entry_point_address_v0_7, + chain_id: self.chain_spec.id, + hash: H256::zero(), + packed: PackedUserOperation::default(), + calldata_gas_cost: U256::zero(), + }; + + let packed = self + .packed_uo + .unwrap_or_else(|| pack_user_operation(uo.clone())); + let hash = hash_packed_user_operation( + &packed, + self.chain_spec.entry_point_address_v0_7, + self.chain_spec.id, + ); + let calldata_gas_cost = super::op_calldata_gas_cost( + packed.clone(), + self.chain_spec.calldata_zero_byte_gas, + self.chain_spec.calldata_non_zero_byte_gas, + self.chain_spec.per_user_op_word_gas, + ); + + UserOperation { + hash, + packed, + calldata_gas_cost, + ..uo + } + } +} + +fn pack_user_operation(uo: UserOperation) -> PackedUserOperation { + let init_code = if let Some(factory) = uo.factory { + let mut init_code = factory.as_bytes().to_vec(); + init_code.extend_from_slice(&uo.factory_data); + Bytes::from(init_code) + } else { + Bytes::new() + }; + + let account_gas_limits = concat_128( + uo.verification_gas_limit.low_u128().to_be_bytes(), + uo.call_gas_limit.low_u128().to_be_bytes(), + ); + + let gas_fees = concat_128( + uo.max_priority_fee_per_gas.low_u128().to_be_bytes(), + uo.max_fee_per_gas.low_u128().to_be_bytes(), + ); + + let paymaster_and_data = if let Some(paymaster) = uo.paymaster { + let mut paymaster_and_data = paymaster.as_bytes().to_vec(); + paymaster_and_data + .extend_from_slice(&uo.paymaster_verification_gas_limit.low_u128().to_be_bytes()); + paymaster_and_data + .extend_from_slice(&uo.paymaster_post_op_gas_limit.low_u128().to_be_bytes()); + paymaster_and_data.extend_from_slice(&uo.paymaster_data); + Bytes::from(paymaster_and_data) + } else { + Bytes::new() + }; + + PackedUserOperation { + sender: uo.sender, + nonce: uo.nonce, + init_code, + call_data: uo.call_data, + account_gas_limits, + pre_verification_gas: uo.pre_verification_gas, + gas_fees, + paymaster_and_data, + signature: uo.signature, + } +} + +fn unpack_user_operation(puo: PackedUserOperation, chain_spec: &ChainSpec) -> UserOperation { + let mut builder = UserOperationBuilder::new( + chain_spec, + UserOperationRequiredFields { + sender: puo.sender, + nonce: puo.nonce, + call_data: puo.call_data.clone(), + call_gas_limit: U128::from_big_endian(&puo.account_gas_limits[16..]), + verification_gas_limit: U128::from_big_endian(&puo.account_gas_limits[..16]), + pre_verification_gas: puo.pre_verification_gas, + max_priority_fee_per_gas: U128::from_big_endian(&puo.gas_fees[..16]), + max_fee_per_gas: U128::from_big_endian(&puo.gas_fees[16..]), + signature: puo.signature.clone(), + }, + ); + + builder = builder.packed(puo.clone()); + + if !puo.init_code.is_empty() { + let factory = Address::from_slice(&puo.init_code[..20]); + let factory_data = Bytes::from_iter(&puo.init_code[20..]); + + builder = builder.factory(factory, factory_data); + } + + if !puo.paymaster_and_data.is_empty() { + let paymaster = Address::from_slice(&puo.paymaster_and_data[..20]); + let paymaster_verification_gas_limit = + U128::from_big_endian(&puo.paymaster_and_data[20..36]); + let paymaster_post_op_gas_limit = U128::from_big_endian(&puo.paymaster_and_data[36..52]); + let paymaster_data = Bytes::from_iter(&puo.paymaster_and_data[52..]); + + builder = builder.paymaster( + paymaster, + paymaster_verification_gas_limit, + paymaster_post_op_gas_limit, + paymaster_data, + ); + } + + builder.build() +} + +fn hash_packed_user_operation( + puo: &PackedUserOperation, + entry_point: Address, + chain_id: u64, +) -> H256 { + let hash_init_code = keccak256(&puo.init_code); + let hash_call_data = keccak256(&puo.call_data); + let hash_paymaster_and_data = keccak256(&puo.paymaster_and_data); + + let encoded: Bytes = encode(&[ + Token::Address(puo.sender), + Token::Uint(puo.nonce), + Token::FixedBytes(hash_init_code.to_vec()), + Token::FixedBytes(hash_call_data.to_vec()), + Token::FixedBytes(puo.account_gas_limits.to_vec()), + Token::Uint(puo.pre_verification_gas), + Token::FixedBytes(puo.gas_fees.to_vec()), + Token::FixedBytes(hash_paymaster_and_data.to_vec()), + ]) + .into(); + + let hashed = keccak256(encoded); + + keccak256(encode(&[ + Token::FixedBytes(hashed.to_vec()), + Token::Address(entry_point), + Token::Uint(chain_id.into()), + ])) + .into() +} + +fn concat_128(a: [u8; 16], b: [u8; 16]) -> [u8; 32] { + std::array::from_fn(|i| { + if let Some(i) = i.checked_sub(a.len()) { + b[i] + } else { + a[i] + } + }) +} + +impl PackedUserOperation { + /// Unpacks the user operation to its offchain representation + pub fn unpack(self, chain_spec: &ChainSpec) -> UserOperation { + unpack_user_operation(self.clone(), chain_spec) + } + + fn heap_size(&self) -> usize { + self.init_code.len() + self.call_data.len() + self.paymaster_and_data.len() + } +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use ethers::utils::hex::{self, FromHex}; + + use super::*; + + #[test] + fn test_pack_unpack() { + let cs = ChainSpec::default(); + let builder = UserOperationBuilder::new( + &cs, + UserOperationRequiredFields { + sender: Address::zero(), + nonce: 0.into(), + call_data: Bytes::new(), + call_gas_limit: 0.into(), + verification_gas_limit: 0.into(), + pre_verification_gas: 0.into(), + max_priority_fee_per_gas: 0.into(), + max_fee_per_gas: 0.into(), + signature: Bytes::new(), + }, + ); + + let uo = builder.build(); + let packed = uo.clone().pack(); + let unpacked = packed.unpack(&cs); + + assert_eq!(uo, unpacked); + } + + #[test] + fn test_pack_unpack_2() { + let cs = ChainSpec::default(); + let builder = UserOperationBuilder::new( + &cs, + UserOperationRequiredFields { + sender: Address::zero(), + nonce: 0.into(), + call_data: Bytes::new(), + call_gas_limit: 0.into(), + verification_gas_limit: 0.into(), + pre_verification_gas: 0.into(), + max_priority_fee_per_gas: 0.into(), + max_fee_per_gas: 0.into(), + signature: Bytes::new(), + }, + ); + let builder = builder + .factory(Address::random(), "0xdeadbeef".parse().unwrap()) + .paymaster( + Address::random(), + 0.into(), + 0.into(), + "0xbeefdead".parse().unwrap(), + ); + + let uo = builder.build(); + let packed = uo.clone().pack(); + let unpacked = packed.unpack(&cs); + + assert_eq!(uo, unpacked); + } + + #[test] + fn test_hash() { + // From https://sepolia.etherscan.io/tx/0x51c1f40ce6e997a54b39a0eb783e472c2afa4ed3f2f11f97986f7f3a347b9d50 + let cs = ChainSpec { + id: 11155111, + ..Default::default() + }; + + let puo = PackedUserOperation { + sender: Address::from_str("0xb292Cf4a8E1fF21Ac27C4f94071Cd02C022C414b").unwrap(), + nonce: U256::from("0xF83D07238A7C8814A48535035602123AD6DBFA63000000000000000000000001"), + init_code: Bytes::from_hex("0x").unwrap(), + call_data: Bytes::from_hex("0xe9ae5c530000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000001d8b292cf4a8e1ff21ac27c4f94071cd02c022c414b00000000000000000000000000000000000000000000000000000000000000009517e29f0000000000000000000000000000000000000000000000000000000000000002000000000000000000000000ad6330089d9a1fe89f4020292e1afe9969a5a2fc00000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000120000000000000000000000000000000000000000000000000000000000001518000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000018e2fbe8980000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000800000000000000000000000002372912728f93ab3daaaebea4f87e6e28476d987000000000000000000000000000000000000000000000000002386f26fc10000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap(), + account_gas_limits: hex::decode("0x000000000000000000000000000114fc0000000000000000000000000012c9b5") + .unwrap() + .try_into() + .unwrap(), + pre_verification_gas: U256::from(48916), + gas_fees: hex::decode("0x000000000000000000000000524121000000000000000000000000109a4a441a") + .unwrap() + .try_into() + .unwrap(), + paymaster_and_data: Bytes::from_hex("0x").unwrap(), + signature: Bytes::from_hex("0x3c7bfe22c9c2ef8994a9637bcc4df1741c5dc0c25b209545a7aeb20f7770f351479b683bd17c4d55bc32e2a649c8d2dff49dcfcc1f3fd837bcd88d1e69a434cf1c").unwrap(), + }; + + let hash = + H256::from_str("0xe486401370d145766c3cf7ba089553214a1230d38662ae532c9b62eb6dadcf7e") + .unwrap(); + let uo = puo.unpack(&cs); + assert_eq!(uo.hash(cs.entry_point_address_v0_7, cs.id), hash); + } + + #[test] + fn test_builder() { + let factory_address = Address::random(); + let paymaster_address = Address::random(); + let cs = ChainSpec::default(); + + let uo = UserOperationBuilder::new( + &cs, + UserOperationRequiredFields { + sender: Address::zero(), + nonce: 0.into(), + call_data: Bytes::new(), + call_gas_limit: 0.into(), + verification_gas_limit: 0.into(), + pre_verification_gas: 0.into(), + max_priority_fee_per_gas: 0.into(), + max_fee_per_gas: 0.into(), + signature: Bytes::new(), + }, + ) + .factory(factory_address, Bytes::new()) + .paymaster(paymaster_address, 10.into(), 20.into(), Bytes::new()) + .build(); + + assert_eq!(uo.factory, Some(factory_address)); + assert_eq!(uo.paymaster, Some(paymaster_address)); + assert_eq!(uo.paymaster_verification_gas_limit, 10.into()); + assert_eq!(uo.paymaster_post_op_gas_limit, 20.into()); + } +} diff --git a/crates/types/src/validation_results.rs b/crates/types/src/validation_results.rs new file mode 100644 index 00000000..c49b1b2d --- /dev/null +++ b/crates/types/src/validation_results.rs @@ -0,0 +1,453 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +use std::ops::Add; + +use ethers::{ + abi::{self, AbiDecode, AbiError}, + types::{Address, Bytes, H160, U256}, +}; +use rundler_utils::eth::ContractRevertError; + +use crate::{ + contracts::{ + v0_6::i_entry_point::{ + FailedOp as FailedOpV0_6, ValidationResult as ValidationResultV0_6, + ValidationResultWithAggregation as ValidationResultWithAggregationV0_6, + }, + v0_7::entry_point_simulations::{ + AggregatorStakeInfo as AggregatorStakeInfoV0_7, FailedOp as FailedOpV0_7, + FailedOpWithRevert as FailedOpWithRevertV0_7, ReturnInfo as ReturnInfoV0_7, + StakeInfo as StakeInfoV0_7, ValidationResult as ValidationResultV0_7, + }, + }, + Timestamp, ValidTimeRange, TIME_RANGE_BUFFER, +}; + +/// Both v0.6 and v0.7 contracts use this aggregator address to indicate that the signature validation failed +/// Zero is also used to indicate that no aggregator is used AND that the signature validation failed. +const SIG_VALIDATION_FAILED: Address = + H160([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]); + +/// Error during validation simulation +#[derive(Clone, Debug, thiserror::Error, Ord, PartialOrd, Eq, PartialEq)] +pub enum ValidationRevert { + /// The entry point reverted + #[error("{0}")] + EntryPoint(String), + /// The operation reverted + #[error("{}", Self::display_operation_error(.entry_point_reason, .inner_revert_reason))] + Operation { + /// Error message returned by entry point + entry_point_reason: String, + /// Revert data of the validation failure returned by an entity + inner_revert_data: Bytes, + /// Message parsed from the inner revert data, if the entity used the + /// `revert` or `require` Solidity keywords + inner_revert_reason: Option, + }, + /// Validation everted with an unknown signature + #[error("revert with bytes: {0:?}")] + Unknown(Bytes), +} + +impl ValidationRevert { + /// Extracts the error code string returned by the entry point, e.g. + /// `"AA24"`, if it exists. + pub fn entry_point_error_code(&self) -> Option<&str> { + let message = match self { + Self::EntryPoint(message) => Some(message), + Self::Operation { + entry_point_reason: entry_point_message, + .. + } => Some(entry_point_message), + Self::Unknown(_) => None, + }; + message + .filter(|m| m.len() >= 4 && m.starts_with("AA")) + .map(|m| &m[..4]) + } + + fn display_operation_error( + entry_point_message: &str, + inner_message: &Option, + ) -> String { + match inner_message { + Some(inner_message) => format!("{entry_point_message} : {inner_message}"), + None => entry_point_message.to_owned(), + } + } +} + +impl From for ValidationRevert { + fn from(value: ContractRevertError) -> Self { + ValidationRevert::EntryPoint(value.reason) + } +} + +impl From for ValidationRevert { + fn from(value: FailedOpV0_6) -> Self { + ValidationRevert::EntryPoint(value.reason) + } +} + +impl From for ValidationRevert { + fn from(value: FailedOpV0_7) -> Self { + ValidationRevert::EntryPoint(value.reason) + } +} + +impl From for ValidationRevert { + fn from(value: FailedOpWithRevertV0_7) -> Self { + let inner_message = ContractRevertError::decode(&value.inner) + .ok() + .map(|err| err.reason); + ValidationRevert::Operation { + entry_point_reason: value.reason, + inner_revert_data: value.inner, + inner_revert_reason: inner_message, + } + } +} + +/// Error during validation simulation +#[derive(Debug, thiserror::Error)] +pub enum ValidationError { + /// The validation reverted + #[error(transparent)] + Revert(#[from] ValidationRevert), + /// Other error + #[error(transparent)] + Other(#[from] anyhow::Error), +} + +/// Equivalent to the generated `ValidationResult` or +/// `ValidationResultWithAggregation` from `EntryPoint`, but with named structs +/// instead of tuples and with a helper for deserializing. +#[derive(Clone, Debug)] +pub struct ValidationOutput { + /// The return info from the validation function + pub return_info: ValidationReturnInfo, + /// The stake info for the sender + pub sender_info: StakeInfo, + /// The stake info for the factory + pub factory_info: StakeInfo, + /// The stake info for the paymaster + pub paymaster_info: StakeInfo, + /// Optional aggregator_info + pub aggregator_info: Option, +} + +impl ValidationOutput { + /// Decode a v0.6 validation result from bytes. + pub fn decode_v0_6(bytes: impl AsRef<[u8]>) -> Result { + if let Ok(result) = ValidationResultV0_6::decode(bytes.as_ref()) { + return Ok(result.into()); + } + if let Ok(result) = ValidationResultWithAggregationV0_6::decode(bytes) { + return Ok(result.into()); + } + Err(AbiError::DecodingError(abi::Error::InvalidData)) + } + + /// Decode a v0.6 validation result from hex. + pub fn decode_v0_6_hex(hex: impl AsRef) -> Result { + let bytes: Bytes = hex.as_ref().parse()?; + Self::decode_v0_6(&bytes) + } + + /// Decode a v0.7 validation result from bytes. + pub fn decode_v0_7(bytes: impl AsRef<[u8]>) -> Result { + if let Ok(result) = ValidationResultV0_7::decode(bytes.as_ref()) { + return Ok(result.into()); + } + Err(AbiError::DecodingError(abi::Error::InvalidData)) + } + + /// Decode a v0.7 validation result from hex. + pub fn decode_v0_7_hex(hex: impl AsRef) -> Result { + let bytes: Bytes = hex.as_ref().parse()?; + Self::decode_v0_7(&bytes) + } +} + +impl From for ValidationOutput { + fn from(value: ValidationResultV0_6) -> Self { + let ValidationResultV0_6 { + return_info, + sender_info, + factory_info, + paymaster_info, + } = value; + Self { + return_info: return_info.into(), + sender_info: sender_info.into(), + factory_info: factory_info.into(), + paymaster_info: paymaster_info.into(), + aggregator_info: None, + } + } +} + +impl From for ValidationOutput { + fn from(value: ValidationResultWithAggregationV0_6) -> Self { + let ValidationResultWithAggregationV0_6 { + return_info, + sender_info, + factory_info, + paymaster_info, + aggregator_info, + } = value; + Self { + return_info: return_info.into(), + sender_info: sender_info.into(), + factory_info: factory_info.into(), + paymaster_info: paymaster_info.into(), + aggregator_info: Some(aggregator_info.into()), + } + } +} + +impl From for ValidationOutput { + fn from(value: ValidationResultV0_7) -> Self { + let ValidationResultV0_7 { + return_info, + sender_info, + factory_info, + paymaster_info, + aggregator_info, + } = value; + + let aggregator_info = if aggregator_info.aggregator.is_zero() { + None + } else { + Some(aggregator_info.into()) + }; + + Self { + return_info: return_info.into(), + sender_info: sender_info.into(), + factory_info: factory_info.into(), + paymaster_info: paymaster_info.into(), + aggregator_info, + } + } +} + +/// ValidationReturnInfo from EntryPoint contract +#[derive(Clone, Debug)] +pub struct ValidationReturnInfo { + /// The amount of gas used before the op was executed (pre verification gas and validation gas) + pub pre_op_gas: U256, + /// Whether the account signature verification failed + pub account_sig_failed: bool, + /// Whether the paymaster signature verification failed + pub paymaster_sig_failed: bool, + /// The time after which the op is valid + pub valid_after: Timestamp, + /// The time until which the op is valid + pub valid_until: Timestamp, + /// The paymaster context + pub paymaster_context: Bytes, +} + +impl ValidationReturnInfo { + /// helper function to check if the returned time range is valid + pub fn is_valid_time_range(&self) -> bool { + let now = Timestamp::now(); + self.valid_after <= now || self.valid_until > now.add(TIME_RANGE_BUFFER) + } +} + +// Conversion for v0.6 +impl From<(U256, U256, bool, u64, u64, Bytes)> for ValidationReturnInfo { + fn from(value: (U256, U256, bool, u64, u64, Bytes)) -> Self { + let ( + pre_op_gas, + _, /* prefund */ + sig_failed, + valid_after, + valid_until, + paymaster_context, + ) = value; + // In v0.6 if one signature fails both do + Self { + pre_op_gas, + account_sig_failed: sig_failed, + paymaster_sig_failed: sig_failed, + valid_after: valid_after.into(), + valid_until: valid_until.into(), + paymaster_context, + } + } +} + +impl From for ValidationReturnInfo { + fn from(value: ReturnInfoV0_7) -> Self { + let ReturnInfoV0_7 { + pre_op_gas, + prefund: _, + account_validation_data, + paymaster_validation_data, + paymaster_context, + } = value; + + let account = parse_validation_data(account_validation_data); + let paymaster = parse_validation_data(paymaster_validation_data); + + let intersect_range = account + .valid_time_range() + .intersect(paymaster.valid_time_range()); + + Self { + pre_op_gas, + account_sig_failed: !account.signature_valid(), + paymaster_sig_failed: !paymaster.signature_valid(), + valid_after: intersect_range.valid_after, + valid_until: intersect_range.valid_until, + paymaster_context, + } + } +} + +/// ValidationData from EntryPoint contract +pub struct ValidationData { + aggregator: Address, + valid_after: u64, + valid_until: u64, +} + +impl ValidationData { + /// Valid time range for the validation data + pub fn valid_time_range(&self) -> ValidTimeRange { + ValidTimeRange::new(self.valid_after.into(), self.valid_until.into()) + } + + /// Whether the signature is valid + pub fn signature_valid(&self) -> bool { + self.aggregator != SIG_VALIDATION_FAILED + } + + /// The aggregator address, if any + pub fn aggregator(&self) -> Option
{ + if self.aggregator == SIG_VALIDATION_FAILED || self.aggregator.is_zero() { + None + } else { + Some(self.aggregator) + } + } +} + +/// Parse the validation data from a U256 +/// +/// Works for both v0.6 and v0.7 validation data +pub fn parse_validation_data(data: U256) -> ValidationData { + let slice: [u8; 32] = data.into(); + let aggregator = Address::from_slice(&slice[12..]); + + let mut buf = [0; 8]; + buf[2..8].copy_from_slice(&slice[6..12]); + let valid_until = u64::from_be_bytes(buf); + + let mut buf = [0; 8]; + buf[2..8].copy_from_slice(&slice[..6]); + let valid_after = u64::from_be_bytes(buf); + + ValidationData { + aggregator, + valid_after, + valid_until, + } +} + +/// StakeInfo from EntryPoint contract +#[derive(Clone, Copy, Debug)] +pub struct StakeInfo { + /// The amount of stake + pub stake: U256, + /// The delay for unstaking + pub unstake_delay_sec: U256, +} + +impl From<(U256, U256)> for StakeInfo { + fn from((stake, unstake_delay_sec): (U256, U256)) -> Self { + Self { + stake, + unstake_delay_sec, + } + } +} + +impl From for StakeInfo { + fn from(value: StakeInfoV0_7) -> Self { + let StakeInfoV0_7 { + stake, + unstake_delay_sec, + } = value; + Self { + stake, + unstake_delay_sec, + } + } +} + +/// AggregatorInfo from EntryPoint contract +#[derive(Clone, Copy, Debug)] +pub struct AggregatorInfo { + /// The address of the aggregator + pub address: Address, + /// The stake info for the aggregator + pub stake_info: StakeInfo, +} + +impl From<(Address, (U256, U256))> for AggregatorInfo { + fn from((address, stake_info): (Address, (U256, U256))) -> Self { + Self { + address, + stake_info: stake_info.into(), + } + } +} + +impl From for AggregatorInfo { + fn from(value: AggregatorStakeInfoV0_7) -> Self { + let AggregatorStakeInfoV0_7 { + aggregator, + stake_info, + } = value; + Self { + address: aggregator, + stake_info: stake_info.into(), + } + } +} + +#[cfg(test)] +mod tests { + use super::parse_validation_data; + + #[test] + fn test_parse_validation_data() { + let data = "0x00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff"; + let parsed = parse_validation_data(data.into()); + assert_eq!( + parsed.aggregator, + "0xccddeeff00112233445566778899aabbccddeeff" + .parse() + .unwrap() + ); + + assert_eq!(parsed.valid_until, 0x66778899aabb); + assert_eq!(parsed.valid_after, 0x001122334455); + } +} diff --git a/crates/utils/Cargo.toml b/crates/utils/Cargo.toml index c21c452a..e482b551 100644 --- a/crates/utils/Cargo.toml +++ b/crates/utils/Cargo.toml @@ -8,10 +8,13 @@ repository.workspace = true [dependencies] anyhow.workspace = true +derive_more = "0.99.17" ethers.workspace = true futures.workspace = true +itertools.workspace = true rand.workspace = true reqwest.workspace = true +schnellru = "0.2.1" tokio.workspace = true tracing.workspace = true url.workspace = true diff --git a/crates/utils/src/cache.rs b/crates/utils/src/cache.rs new file mode 100644 index 00000000..032c27f4 --- /dev/null +++ b/crates/utils/src/cache.rs @@ -0,0 +1,67 @@ +// This file is part of Rundler. +// +// Rundler is free software: you can redistribute it and/or modify it under the +// terms of the GNU Lesser General Public License as published by the Free Software +// Foundation, either version 3 of the License, or (at your option) any later version. +// +// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; +// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +// See the GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License along with Rundler. +// If not, see https://www.gnu.org/licenses/. + +//! Caching utilities + +use core::hash::BuildHasher; +use std::{ + fmt::{self, Debug, Display, Formatter}, + hash::Hash, +}; + +use derive_more::{Deref, DerefMut}; +use itertools::Itertools; +use schnellru::{ByLength, Limiter, RandomState}; + +/// Wrapper of [`schnellru::LruMap`] that implements [`fmt::Debug`]. +/// Adapted from [Reth](https://github.com/paradigmxyz/reth/blob/main/crates/net/network/src/cache.rs) +#[derive(Deref, DerefMut, Default)] +pub struct LruMap(schnellru::LruMap) +where + K: Hash + PartialEq, + L: Limiter, + S: BuildHasher; + +impl Debug for LruMap +where + K: Hash + PartialEq + Display, + V: Debug, + L: Limiter + Debug, + S: BuildHasher, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + let mut debug_struct = f.debug_struct("LruMap"); + + debug_struct.field("limiter", self.limiter()); + + debug_struct.field( + "res_fn_iter", + &format_args!( + "Iter: {{{} }}", + self.iter().map(|(k, v)| format!(" {k}: {v:?}")).format(",") + ), + ); + + debug_struct.finish() + } +} + +impl LruMap +where + K: Hash + PartialEq, +{ + /// Returns a new cache with default limiter and hash builder. + pub fn new(max_length: u32) -> Self { + LruMap(schnellru::LruMap::new(ByLength::new(max_length))) + } +} diff --git a/crates/utils/src/eth.rs b/crates/utils/src/eth.rs index 236816b6..f54780a3 100644 --- a/crates/utils/src/eth.rs +++ b/crates/utils/src/eth.rs @@ -13,18 +13,23 @@ //! Utilities for working with an Ethereum-like chain via Ethers. -use std::{sync::Arc, time::Duration}; - -use anyhow::Context; use ethers::{ - abi::{AbiDecode, RawLog}, + abi::{AbiDecode, AbiEncode, RawLog}, contract::ContractError, - providers::{ - Http, HttpRateLimitRetryPolicy, Middleware, Provider, RetryClient, RetryClientBuilder, - }, - types::{Address, Bytes, Log}, + providers::Middleware, + types::{Address, Bytes, Log, Selector}, }; -use url::Url; + +/// Creates call data from a method and its arguments. The arguments should be +/// passed as a tuple. +/// +/// Important: if the method takes a single argument, then this function should +/// be passed a single-element tuple, and not just the argument by itself. +pub fn call_data_of(selector: Selector, args: impl AbiEncode) -> Bytes { + let mut bytes = selector.to_vec(); + bytes.extend(args.encode()); + bytes.into() +} /// Gets the revert data from a contract error if it is a revert error, /// otherwise returns the original error. @@ -51,37 +56,6 @@ pub fn parse_revert_message(revert_data: &[u8]) -> Option { .map(|err| err.reason) } -/// Construct a new Ethers provider from a URL and a poll interval. -/// -/// Creates a provider with a retry client that retries 10 times, with an initial backoff of 500ms. -pub fn new_provider( - url: &str, - poll_interval: Option, -) -> anyhow::Result>>> { - let parsed_url = Url::parse(url).context("provider url should be valid")?; - - let http_client = reqwest::Client::builder() - .connect_timeout(Duration::from_secs(1)) - .build() - .context("failed to build reqwest client")?; - let http = Http::new_with_client(parsed_url, http_client); - - let client = RetryClientBuilder::default() - // these retries are if the server returns a 429 - .rate_limit_retries(10) - // these retries are if the connection is dubious - .timeout_retries(3) - .initial_backoff(Duration::from_millis(500)) - .build(http, Box::::default()); - - let mut provider = Provider::new(client); - if let Some(poll_interval) = poll_interval { - provider = provider.interval(poll_interval); - } - - Ok(Arc::new(provider)) -} - /// Converts an ethers `Log` into an ethabi `RawLog`. pub fn log_to_raw_log(log: Log) -> RawLog { let Log { topics, data, .. } = log; diff --git a/crates/utils/src/lib.rs b/crates/utils/src/lib.rs index 54bb6962..ac1a44f0 100644 --- a/crates/utils/src/lib.rs +++ b/crates/utils/src/lib.rs @@ -20,6 +20,7 @@ //! Rundler utilities +pub mod cache; pub mod emit; pub mod eth; pub mod handle; diff --git a/deny.toml b/deny.toml index c6cb720f..b0069153 100644 --- a/deny.toml +++ b/deny.toml @@ -22,6 +22,7 @@ deny = [ # Each entry the name of a crate and a version range. If version is # not specified, all versions will be matched. #{ name = "ansi_term", version = "=0.11.0" }, + { name = "openssl" } ] # Certain crates/versions that will be skipped when doing duplicate detection. skip = [] diff --git a/docs/README.md b/docs/README.md index b51b61da..c20dea76 100644 --- a/docs/README.md +++ b/docs/README.md @@ -11,3 +11,5 @@ [Docker](./docker.md): Instructions for building and running Docker images. [Proto](./proto.md): Protobuf usage and best practices. + +[Releases](./release.md): The release pipeline for the rundler binary diff --git a/docs/architecture/builder.md b/docs/architecture/builder.md index 69c3c948..76777088 100644 --- a/docs/architecture/builder.md +++ b/docs/architecture/builder.md @@ -42,7 +42,7 @@ Once a candidate bundle is constructed, each UO is re-simulated and validation r After 2nd simulation the entire bundle is validated via an `eth_call`, and ops that fail validation are again removed from the bundle. This process is repeated until the entire bundle passes validation. -NOTE: This procedure implements an old version of the spec and will be updated to conform soon. See [here](https://github.com/eth-infinitism/account-abstraction/blob/develop/erc/ERCS/erc-4337.md#bundling) for more details on the new implementation. +NOTE: This procedure implements an old version of the spec and will be updated to conform soon. See [here](https://eips.ethereum.org/EIPS/eip-4337#bundling) for more details on the new implementation. ## Transaction Signers @@ -59,25 +59,14 @@ When using AWS KMS for signing Rundler requires the use of Redis to perform key To ensure that no two signers in a bundler system attempt to use the same key, causing nonce collisions, this key leasing system is used to lease a key in a CLI configured list to a single signer at a time. ## Transaction Senders - The builder supports multiple sender implementations to support bundle transaction submission to different types of APIs. -- **Raw**: Send the bundle as an `eth_sendRawTransaction` via a standard ETH JSON-RPC. - -- **Conditional**: Send the bundle as an `eth_sendRawTransactionConditional` to an interface that supports the [conditional transaction RPC](https://notes.ethereum.org/@yoav/SkaX2lS9j). +- **Raw**: Send the bundle as an `eth_sendRawTransaction` via a standard ETH JSON-RPC. If conditional RPC is enabled it will send the bundle as an `eth_sendRawTransactionConditional` to an interface that supports the [conditional transaction RPC](https://notes.ethereum.org/@yoav/SkaX2lS9j). - **Flashbots**: Submit bundles via the [Flashbots Protect](https://docs.flashbots.net/) RPC endpoint, only supported on Ethereum Mainnet. - **Bloxroute**: Submit bundles via Bloxroute's [Polygon Private Transaction](https://docs.bloxroute.com/apis/frontrunning-protection/polygon_private_tx) endpoint. Only supported on polygon. -## Transaction Tracking - -After the bundle transaction is sent, the sender tracks its status via the transaction tracker module. This module tracks to see if a transaction is pending, dropped, or mined. - -If after a configured amount of blocks the transaction is still pending, the sender will attempt to re-estimate gas fees and will submit a new bundle that replaces the old bundle. - -If dropped or mined, the sender will restart the process. - ## N-Senders Rundler has the ability to run N bundle sender state machines in parallel, each configured with their own distinct signer/account for bundle submission. @@ -85,3 +74,64 @@ Rundler has the ability to run N bundle sender state machines in parallel, each In order for bundle proposers to avoid attempting to bundle the same UO, the sender is configured with a mempool shard index that is added to the request to the pool. This shard index is used by the pool to always return a disjoint set of UOs to each sender. N-senders can be useful to increase bundler gas throughput. + +## Sender State Machine + +The bundle sender is implemented as an finite state machine to continuously submit bundle transactions onchain. The state machine runs as long as the builder process is running. + +### States + +**`Building`** + +In the building state the sender is waiting for a trigger. Once triggered, the sender will query the mempool for available user operations. Those user operations are then filtered by the current fees, total gas limit, and simulation results. If before/after the filtering there are no candidate user operations, the sender will wait for another trigger. If there are candidate user operations, a bundle transaction is submitted. If a cancellation is required, the sender will transfer to the cancelling state. + +**`Pending`** + +In the pending state the builder is waiting for a bundle transaction to be mined. It will wait in this state for up to `max_blocks_to_wait_for_mine` blocks. If mined, dropped, or timed out (abandoned) the sender will transition back to the building state with the appropriate metadata captured. + +**`Cancelling`** + +In the cancelling state the builder creates a cancellation operation. The shape of this operation depends on the type of transaction sender being used. If a "hard" cancellation operation is submitted the sender will submit a cancellation transaction and transition to the cancel pending state. If a "soft" cancellation operation is submitted it will transition back to the building state immediately. + +**`CancelPending`** + +In the cancel pending state the builder is waiting for a cancellation transaction to be mined. It will wait in this state for up to `max_blocks_to_wait_for_mine` blocks. If mined, the sender will transition back to the building state. If dropped or timed out (abandoned), the sender will transition back to the cancelling state. If the sender has already performed `max_cancellation_fee_increases`, and the transaction has been abandoned, it will transition back to the building state and reset internal state. + +### Triggers + +While in the building state the sender is waiting for a trigger. There are 3 types of triggers: + +* New block (building mode: auto): Trigger bundle building when a new block is mined. +* Time (building mode: auto): Trigger bundle building after `bundle_max_send_interval_millis` (chain spec) has elapsed without a bundle attempt. +* Manual call (building mode: manual): Trigger bundle building on a call to `debug_bundler_sendBundleNow`. + +### Cancellations + +Cancellations occur in a specific scenario: there are user operations available that pay more than the estimated gas price, but when the sender submits the bundle transaction it receives a "replacement underpriced" error. If after increasing the fee the user operations are priced out, we are in an "underpriced" meta-state. + +The first time the sender encounters this state it will capture the block number and attempt to create another bundle, resetting the fees. During subsequent encounters the builder will compare that block number to latest, if the difference is more than `max_replacement_underpriced_blocks`, the builder will move to a cancellation state. + +The goal of the cancellation state is to remove the pending transaction from the mempool that is blocking the bundle submission, and to do so while spending the least amount of gas. There are two types of cancellations: "hard" and "soft." A "hard" cancellation requires a transaction to be sent onchain. This is typically an empty transaction to minimize costs. A "soft" cancellation does not require a transaction and is simply an RPC interaction. + +### Diagram + +```mermaid +--- +title: Bundle Sender State Machine (Simplified) +--- +stateDiagram-v2 + Building: Building + Pending + Cancelling + CancelPending + + [*] --> Building + Building --> Building : No operations + Building --> Pending : Bundle submitted + Pending --> Building : Bundle mined/dropped/abandoned + Building --> Cancelling : Cancel triggered + Cancelling --> CancelPending: Hard cancellation submitted + Cancelling --> Building : Soft cancellation completed + CancelPending --> Cancelling: Cancellation dropped/abandoned + CancelPending --> Building: Cancellation mined/aborted +``` diff --git a/docs/architecture/chain_spec.md b/docs/architecture/chain_spec.md new file mode 100644 index 00000000..b2613008 --- /dev/null +++ b/docs/architecture/chain_spec.md @@ -0,0 +1,39 @@ +# Chain Specification + +Chain specification is used in Rundler to set chain specific parameters. + +You can find the various parameters [here](../../crates/types/src/chain.rs). + +Upon startup Rundler uses the following CLI params to gather the chain spec parameters: + +* `--network`: Network name to lookup a hardcoded chain spec. +* `--chain_spec`: Path to a chain spec TOML file. +* `CHAIN_*`: Environment variables representing chain spec fields. + +The chain specification is derived using the following steps: + +### Find a `base` specification, if defined + +Using the following config hierarchy: + +- `CHAIN_BASE` env var +- `--chain_spec` file `base` key +- `--network` hardcoded spec `base` key + +to find a chain spec base. A base is not required. A base must be a hardcoded network. + +### Resolve the full chain spec + +Using the following config hierarchy: + +- `CHAIN_*` env vars +- `--chain_spec` file keys +- `--network` hardcoded spec keys +- base (if defined) +- defaults + +to resolve the full chain spec. Only one level of `base` resolution is defined. That is, if a `base` network defined another `base`, the second `base` won't be resolved. + +### Hardcoded Chan Specs + +See the files [here](../../bin/rundler/chain_specs/) for a list of hardcoded chain specifications. diff --git a/docs/architecture/entry_point.md b/docs/architecture/entry_point.md new file mode 100644 index 00000000..c63e1aeb --- /dev/null +++ b/docs/architecture/entry_point.md @@ -0,0 +1,65 @@ +# Entry Point Support + +Rundler currently supports the most recent two entry point versions: + + * [v0.6.0](https://github.com/eth-infinitism/account-abstraction/tree/v0.6.0) + * [v0.7.0](https://github.com/eth-infinitism/account-abstraction/tree/v0.7.0) + +## Configuration + +Rundler's entry point support is controlled by the following CLI options: + +Enable/disable entry point versions (defaults to both enabled): +- `--entry_point_v0_6_enabled` +- `--entry_point_v0_7_enabled` + +Modify the number of builders (and thus keys) associated with each entry point: +- `--num_builders_v0_6` +- `--num_builders_v0_7` + +Rundler expects that the entry point contract is deployed at a deterministic address. It defaults to: + +- v0.6.0: `0x5FF137D4b0FDCD49DcA30c7CF57E578a026d2789` +- v0.7.0: `0x0000000071727De22E5E9d8BAf0edAc6f37da032` + +If a chain has the entry point deployed at a different address, these addresses can be modified using the chain spec configurations: `entry_point_address_v0_6` and `entry_point_address_v0_7`. + +Rundler expects that the entry points are unmodified from their canonical versions above. Thus, the only use for overriding the entry point addresses would be due to the lack of a deterministic deployment mechanism on a chain. + +## API + +Rundler uses the same API interface for both v0.6 and v0.7. It determines which JSON schema to apply to each RPC request based on the provided entry point address. + +See the version of the spec associated with the entry point version for the expected schemas. + + * [v0.6.0](https://github.com/eth-infinitism/account-abstraction/blob/v0.6.0/eip/EIPS/eip-4337.md#rpc-methods-eth-namespace) + * [v0.7.0](https://github.com/eth-infinitism/account-abstraction/blob/v0.7.0/erc/ERCS/erc-4337.md#rpc-methods-eth-namespace) + +## Internals + +To support multiple entry point versions in the same codebase, Rundler's components are entry point version aware. + +### Types + +Versions v0.6 and v0.7 define different User Operation types. Rundler uses the following to represent these different versions: + +* `UserOperation` Trait: A common interface for user operation implementations +* `UserOperationVariant`: A container to hold either version of user operation. Implements the trait via passthrough access +* `v0_6::UserOperation`: A v0.6 user operation +* `v0_7::UserOperation`: A v0.7 user operation + +Depending on the context a class may elect to access a user operation via any of these interfaces. Only classes that are hyper-specific to a particular version should use the version specific types. We prefer to use the trait as a generic, or the variant, where code sharing between the versions is possible. + +### Pool + +Rundler will run a separate mempool for each enabled entry point. These pools are still driven by the same tracking logic, but their data structures are completely independent. + +### Builder + +Rundler will run independent bundle builders for each entry point. Each builder will only interact with the mempool of its same version. + +### RPC + +Rundler runs a single RPC server to handle both v0.6 and v0.7 requests, and routes requests to their correct version handling based on the provided entry point version. + +For endpoints where entry point version is not specified (i.e. `eth_getUserOperationReceipt`) Rundler will apply the request to any enabled entry point. For example, in `eth_getUserOperationReceipt` it will search any enabled entry point's logs for the provided user operation hash. diff --git a/docs/architecture/pool.md b/docs/architecture/pool.md index 51c912f2..cfe2d699 100644 --- a/docs/architecture/pool.md +++ b/docs/architecture/pool.md @@ -6,11 +6,11 @@ The `Pool` task is responsible for receiving, validating, sorting, and storing u Upon each `add_operation` call the `Pool` will preforms a series of checks. -1. Run a series of [prechecks](https://github.com/eth-infinitism/account-abstraction/blob/develop/erc/ERCS/erc-4337.md#client-behavior-upon-receiving-a-useroperation) to catch any reasons why the UO may not be mined. +1. Run a series of [prechecks](https://eips.ethereum.org/EIPS/eip-4337#client-behavior-upon-receiving-a-useroperation) to catch any reasons why the UO may not be mined. -2. Simulate the UO via a `debug_traceCall` as per the [ERC-4337 spec](https://github.com/eth-infinitism/account-abstraction/blob/develop/erc/ERCS/erc-4337.md#simulation). +2. Simulate the UO via a `debug_traceCall` as per the [ERC-4337 spec](https://eips.ethereum.org/EIPS/eip-4337#simulation). -If violations are found, the UO is rejected. Else, the UO is added to the pool. +If violations are found, the UO is rejected. Else, the UO is added to the pool. We only accept User Operations into the pool if the `validUntil` field has over 60 seconds to expire from the time of entry or the `validAfter` field is before the time of entry. ### Tracer @@ -18,7 +18,7 @@ A typescript based tracer is used to collect relevant information from the `debu ## Reputation -The `Pool` tracks the reputation of entities as per the [ERC-4337 spec](https://github.com/eth-infinitism/account-abstraction/blob/develop/erc/ERCS/erc-4337.md#reputation-scoring-and-throttlingbanning-for-global-entities). +The `Pool` tracks the reputation of entities as per the [ERC-4337 spec](https://eips.ethereum.org/EIPS/eip-4337#reputation-scoring-and-throttlingbanning-for-global-entities). ### Allowlist/Blocklist diff --git a/docs/architecture/rpc.md b/docs/architecture/rpc.md index 3a8ec679..f8e02d6d 100644 --- a/docs/architecture/rpc.md +++ b/docs/architecture/rpc.md @@ -1,10 +1,11 @@ # RPC Task -The `RPC` task is the main interface into the Rundler. It consists of 3 namespaces: +The `RPC` task is the main interface into the Rundler. It consists of 4 namespaces: -- **eth** -- **debug** -- **rundler** +- [**eth**](#eth_-namespace) +- [**debug**](#debug_-namespace) +- [**rundler**](#rundler_-namespace) +- [**admin**](#admin_-namespace) Each of which can be enabled/disabled via configuration. @@ -14,7 +15,7 @@ It also supports a health check endpoint. ### `eth_` Namespace -Methods defined by the [ERC-4337 spec](https://github.com/eth-infinitism/account-abstraction/blob/develop/erc/ERCS/erc-4337.md#rpc-methods-eth-namespace). +Methods defined by the [ERC-4337 spec](https://eips.ethereum.org/EIPS/eip-4337#rpc-methods-eth-namespace). | Method | Supported | | ------ | :-----------: | @@ -27,24 +28,125 @@ Methods defined by the [ERC-4337 spec](https://github.com/eth-infinitism/account ### `debug_` Namespace -Method defined by the [ERC-4337 spec](https://github.com/eth-infinitism/account-abstraction/blob/develop/erc/ERCS/erc-4337.md#rpc-methods-debug-namespace). Used only for debugging/testing and should be disabled on production APIs. - -| Method | Supported | -| ------ | :-----------: | -| `debug_clearState` | ✅ | -| `debug_dumpMempool` | ✅ | -| `debug_sendBundleNow` | ✅ | -| `debug_setBundlingMode` | ✅ | -| `debug_setReputation` | ✅ | -| `debug_dumpReputation` | ✅ | +Method defined by the [ERC-4337 spec](https://eips.ethereum.org/EIPS/eip-4337#rpc-methods-debug-namespace). Used only for debugging/testing and should be disabled on production APIs. + +| Method | Supported | Non-Standard | +| ------ | :-----------: | :--: | +| `debug_bundler_clearState` | ✅ | +| `debug_bundler_dumpMempool` | ✅ | +| `debug_bundler_sendBundleNow` | ✅ | +| `debug_bundler_setBundlingMode` | ✅ | +| `debug_bundler_setReputation` | ✅ | +| `debug_bundler_dumpReputation` | ✅ | +| `debug_bundler_addUserOps` | 🚧 | | +| [`debug_bundler_getStakeStatus`](#debug_bundler_getstakestatus) | ✅ | ✅ | +| [`debug_bundler_clearMempool`](#debug_bundler_clearMempool) | ✅ | ✅ +| [`debug_bundler_dumpPaymasterBalances`](#debug_bundler_dumpPaymasterBalances) | ✅ | ✅ + +#### `debug_bundler_getStakeStatus` + +This method is used by the ERC-4337 `bundler-spec-tests` but is not (yet) part of the standard. + +This method gets the stake status of a certain address with a particular entry point contract. + +##### Parameters + +- Address to get stake status for +- Entry point address + +``` +# Request +{ + "jsonrpc": "2.0", + "id": 1, + "method": "debug_bundler_clearMempool", + "params": ["0x...", "0x..."] // address, entry point address +} + +# Response +{ + "jsonrpc": "2.0", + "id": 1, + "result": [ + { + isStaked: bool, + stakeInfo: { + addr: address, + stake: uint128, + unstakeDelaySec: uint32 + } + } + ] +} +``` + +#### `debug_bundler_clearMempool` + +This method is used by the ERC-4337 `bundler-spec-tests` but is not (yet) part of the standard. + +This method triggers a the mempool to drop all pending user operations, but keeps the rest of its state. In contrast to `debug_bundler_clearState` which drops all state. + +##### Parameters + +- Entry point address + +``` +# Request +{ + "jsonrpc": "2.0", + "id": 1, + "method": "debug_bundler_clearMempool", + "params": ["0x...."] // entry point address +} + +# Response +{ + "jsonrpc": "2.0", + "id": 1, + "result": "ok" +} +``` + +#### `debug_bundler_dumpPaymasterBalances` + +Dump the paymaster balances from the paymaster tracker in the mempool for a given entry point. + +##### Parameters + +- Entry point address + +``` +# Request +{ + "jsonrpc": "2.0", + "id": 1, + "method": "debug_bundler_clearMempool", + "params": ["0x...."] // entry point address +} + +# Response +{ + "jsonrpc": "2.0", + "id": 1, + "result": [ + { + address: address // paymaster address + pendingBalance: uint256 // paymaster balance including pending UOs in pool + confirmedBalance: uint256 // paymaster confirmed balance onchain + }, + { ... }, ... + ] +} +``` ### `rundler_` Namespace -Rundler specific methods that are not specified by the ERC-4337 spec. +Rundler specific methods that are not specified by the ERC-4337 spec. This namespace may be opened publicly. | Method | Supported | | ------ | :-----------: | | [`rundler_maxPriorityFeePerGas`](#rundler_maxpriorityfeepergas) | ✅ | +| [`rundler_dropLocalUserOperation`](#rundler_droplocaluseroperation) | ✅ | #### `rundler_maxPriorityFeePerGas` @@ -52,6 +154,145 @@ This method returns the minimum `maxPriorityFeePerGas` that the bundler will acc Users of this method should typically increase their priority fee values by a buffer value in order to handle price fluctuations. +``` +# Request +{ + "jsonrpc": "2.0", + "id": 1, + "method": "rundler_maxPriorityFeePerGas", + "params": [] +} + +# Response +{ + "jsonrpc": "2.0", + "id": 1, + "result": ["0x..."] // uint256 +} +``` + +#### `rundler_dropLocalUserOperation` + +Drops a user operation from the local mempool for the given sender/nonce. The user must send a signed UO that passes validation and matches the requirements below. + +**NOTE:** there is no guarantee that this method effectively cancels a user operation. If the user operation has been bundled prior to the drop attempt it may still be mined. If the user operation has been sent to the P2P network it may be mined by another bundler after being dropped locally. + +**Requirements:** + +- `sender` and `nonce` match the UO that is being dropped. +- `preVerificationGas`, `callGasLimit`, `maxFeePerGas` must all be 0. + - This is to ensure this UO is not viable onchain. +- `callData` must be `0x`. + - This is to ensure this UO is not viable onchain. +- If an `initCode` was used on the UO to be dropped, the request must also supply that same `initCode`, else `0x`, + - This is required for signature verification. +- `verificationGasLimit` must be high enough to run the account verification step. +- `signature` must be valid on a UO with the above requirements. +- User operation must be in the pool for at least N blocks before it is dropped. N is configurable via a CLI setting. + - This is to ensure that the bundler has had sufficient time to attempt to bundle the UO and get compensated for its initial simulation. This prevents DOS attacks. + +**Notes:** + +- `paymasterAndData` is not required to be `0x`, but there is little use for it here, its recommended to set to `0x`. +- `verificationGasLimit` doesn't require estimation, just set to a high number that is lower than the bundler's max verification gas, i.e. 1M. + +``` +# Request +{ + "jsonrpc": "2.0", + "id": 1, + "method": "rundler_dropLocalUserOperation", + "params": [ + { + ... // UO with the requirements above + }, + "0x..." // entry point address + ] +} + +# Response +{ + "jsonrpc": "2.0", + "id": 1, + "result": ["0x..."] // hash of UO if dropped, or empty if a UO is not found for the sender/ID +} +``` + + +### `admin_` Namespace + +Administration methods specific to Rundler. This namespace should not be open to the public. + +| Method | +| ------ | +| [`admin_clearState`](#admin_clearState) | +| [`admin_setTracking`](#admin_settracking) | + +#### `admin_clearState` + +Clears the state of various Rundler components associated with an entry point address. + +##### Parameters + +- Entry point address +- Admin clear state object + +``` +# Request +{ + "jsonrpc": "2.0", + "id": 1, + "method": "admin_clearState", + "params": [ + "0x....", // entry point address + { + clearMempool: bool, // optional, clears the UOs from the pool + clearPaymaster: bool, // optional, clears the paymaster balances + clearReputation: bool // optional, clears the reputation manager + } + ] +} + +# Response +{ + "jsonrpc": "2.0", + "id": 1, + "result": "ok" +} +``` + +#### `admin_setTracking` + +Turns various mempool features on/off. + +##### Parameters + +- Entry point address +- Admin set tracking object + +``` +# Request +{ + "jsonrpc": "2.0", + "id": 1, + "method": "admin_clearState", + "params": [ + "0x....", // entry point address + { + paymasterTracking: bool, // required, enables paymaster balance tracking/enforcement + reputationTracking: bool, // required, enables reputation tracking/enforcement + } + ] +} + +# Response +{ + "jsonrpc": "2.0", + "id": 1, + "result": "ok" +} +``` + ### Health Check The health check endpoint can be used by infrastructure to ensure that Rundler is up and running. @@ -97,41 +338,67 @@ NOTE: Since the dynamic portion of PVG can change, users on networks that contai ### `verificationGasLimit` Estimation -To estimate `verificationGasLimit` Rundler uses a binary search to find the minimum gas value where validation succeeds. The procedure follows: +To estimate `verificationGasLimit` Rundler uses binary search to find the minimum gas value where verification succeeds. The procedure follows: -1. Run an initial attempt at max limit using the gas measurement helper contract. If validation fails here it will never succeed and the UO is rejected. +1. Run an initial attempt at max limit using the gas measurement helper contract. If verification fails here it will never succeed and the UO is rejected. 2. Set the initial guess to the gas used in the initial attempt * 2 to account for the 63/64ths rule. 3. Run the binary search algorithm until the minimum successful gas value and the maximum failure gas value are within 10%. This approach allows for minimal `eth_call` requests while providing an accurate gas limit. -#### Gas Fee, Token Transfers, and State Overrides +#### Gas Fees and Token Transfers + +During ERC-4337 verification a transfer of an asset to pay for gas always occurs. For example: + +- When there is no paymaster and the sender's deposit is less than the maximum gas cost, the sender must transfer ETH to the entrypoint. +- When an ERC-20 paymaster is used, there is typically an ERC-20 token transfer from the sender to the paymaster and then the paymaster transfers ETH to the entry point. + +We split this into two cases for estimation: no paymaster, and paymaster. + +##### No Paymaster Case -During ERC-4337 verification a transfer of an asset to pay for gas typically occurs. For example: +When no paymaster is used, verification gas is always estimated using **zero fees**. The cost of a native transfer is added to the result of the binary search to account for the transfer of funds from the account to the entry point. -- When there is no paymaster and the sender's deposit is less than the maximum gas fee, the sender must transfer ETH to the entrypoint. -- When an ERC20 paymaster is used, there is typically an ERC20 token transfer from the sender to the paymaster. +**Note:** This may overestimate the verification gas by the cost of a native transfer in the case where the account has enough deposited on the entry point to cover the full prefund cost. This will not impact the onchain cost of the operation. -To correctly capture the gas cost of this transfer, a non-zero gas fee must be used. This fee must be: +##### Paymaster Case + +Paymasters may perform more complicated logic on the fee fields, including triggering ERC-20 transfers, that must be accounted for during estimation. Unlike the no paymaster case, this gas cost cannot be known beforehand as it varies by paymaster implementation. + +To correctly estimate the verification gas, a non-zero gas fee must be used. This fee must be: - Large enough that it triggers a transfer of tokens. - I.e. USDC only uses 6 decimals, if the gas fee in USDC is < 1e-6 the transfer won't trigger. Its reasonable to assume that users will have a few USD cents worth of their fee token to avoid this case. - Small enough that a fee-payer with a small amount of the fee token can pay for the maximum gas. -This value can be controlled by the `validation_estimation_gas_fee` configuration variable. A default value of 10K gwei is provided. +During estimation the gas cost is kept constant by varying the `maxFeePerGas` based on the current binary search guess. Therefore, as long as the fee-payer can pay for the gas cost initially, Rundler should be able to successfully estimate gas. -During estimation the gas fee is kept constant by varying the `max_fee_per_gas` based on the current binary search guess. Therefore, as long as the fee-payer can pay for the gas fee initially, Rundler should be able to successfully estimate gas. +This value can be controlled by the `VERIFICATION_ESTIMATION_GAS_FEE` configuration variable. A default value of 10K gwei is provided. -What if the fee payer does not own enough of the payment token? A common use case may be to estimate the gas fee prior to transferring the gas token to the fee-payer. In this case, callers should use the state override functionality of `eth_estimateUserOperationGas`. Callers can override the balance (ETH, ERC20, or any arbitrary payment method) such that the fee-payer can pay the `validation_estimation_gas_fee`. +Paymasters should ensure that they have at least this value available in order for estimation to succeed. If the paymaster is causing token transfers from the account (ERC-20 paymaster case), they'll need to handle when the account doesn't have enough tokens. Three possible ways to do this: + +- The paymaster can absorb the balance error, and write their contract in such a way that it will estimate the correct amount of gas even when the transfer fails. If the transfer fails the paymaster can return the signature invalid code. +- Use state overrides to ensure that the account has the full gas fee. See below. +- Use hardcoded values for paymaster gas. The paymaster provider can decide beforehand a maximum gas limit. The client can estimate gas without a paymaster, and then account for this hardcoded paymaster gas limit. + - In entry point v0.6 the client should set `verificationGasLimit` to the maximum of the account verification gas limit estimation and the paymaster hardcoded value. + - In entry point v0.7 the client can directly set the `paymasterVerificationGasLimit` and use the estimation only for the `verificationGasLimit`. ### `callGasLimit` Estimation -`callGasLimit` estimation is similar to `verificationGasLimit` estimation in that it also uses a binary search. The majority of the binary search, however, is performed in Solidity to limit network calls. +`callGasLimit` estimation is similar to `verificationGasLimit` estimation in that it also uses a binary search. The majority of the binary search, however, is performed in Solidity to limit network calls. Call gas is always estimated with zero gas fees. This scheme requires the use of a spoofed entry point contract via `eth_call` state overrides. The original entry point contract is moved and a proxy is loaded in its place. This allows us to write additional logic to support gas estimation into the entry point contract. More information on gas estimation can be found [here](https://www.alchemy.com/blog/erc-4337-gas-estimation). +### State Overrides + +The `eth_estimateUserOperationGas` accepts an optional state override set as the 3rd positional RPC parameter. It accepts the same format as Geth's `eth_call` [state overrides](https://geth.ethereum.org/docs/interacting-with-geth/rpc/ns-eth#eth-call). + +This parameter can be used to modify the state of the chain before preforming gas estimation. + +A typical use case for this could be to spoof some funds into a user's account while using an ERC-20 paymaster. Callers can override the balance (ETH, ERC20, or any arbitrary payment method) such that the fee-payer can pay the `verification_estimation_gas_fee`. + ## Fee Estimation Fee estimation is done by applying the configured [priority fee mode](./builder.md#required-fees) to the estimated network fees. diff --git a/docs/cli.md b/docs/cli.md index 8af37ebe..31a2eb31 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -15,12 +15,20 @@ The `pool` and `builder` commands will also start a gRPC endpoint to allow other These options are common to all subcommands and can be used globally: -- `--entry_points`: Entry point addresses to target. Provide a comma-separated list. (**REQUIRED**) - - env: *ENTRY_POINTS* - - (multiple entry points is currently in beta, we only officially support `0x5FF137D4b0FDCD49DcA30c7CF57E578a026d2789`) -- `--chain_id`: Chain ID to target. (default: `1337` **IMPORTANT**). - - env: *CHAIN_ID* -- `--node_http`: ETH Node HTTP URL to connect to. (**REQUIRED**) +### Chain Specification + +See [chain spec](./architecture/chain_spec.md) for a detailed description of chain spec derivation from these options. + +- `--network`: Network to look up a hardcoded chain spec. (default: None) + - env: *NETWORK* +- `--chain_spec`: Path to a chain spec TOML file. + - env: *CHAIN_SPEC* +- (env only): Chain specification overrides. + - env: *CHAIN_** + +### Rundler Common + +- `--node_http`: EVM Node HTTP URL to use. (**REQUIRED**) - env: *NODE_HTTP* - `--max_verification_gas`: Maximum verification gas. (default: `5000000`). - env: *MAX_VERIFICATION_GAS* @@ -30,12 +38,12 @@ These options are common to all subcommands and can be used globally: - env: *MIN_STAKE_VALUE* - `--min_unstake_delay`: Minimum unstake delay. (default: `84600`). - env: *MIN_UNSTAKE_DELAY* -- `--user_operation_event_block_distance`: Number of blocks to search when calling `eth_getUserOperationByHash`. (default: distance to genesis **IMPORTANT**) +- `--user_operation_event_block_distance`: Number of blocks to search when calling `eth_getUserOperationByHash`. (default: all blocks) - env: *USER_OPERATION_EVENT_BLOCK_DISTANCE* - `--max_simulate_handle_ops_gas`: Maximum gas for simulating handle operations. (default: `20000000`). - env: *MAX_SIMULATE_HANDLE_OPS_GAS* -- `--validation_estimation_gas_fee`: The gas fee to use during validation estimation. (default: `1000000000000` 10K gwei). - - env: *VALIDATION_ESTIMATION_GAS_FEE* +- `--verification_estimation_gas_fee`: The gas fee to use during verification estimation. (default: `1000000000000` 10K gwei). + - env: *VERIFICATION_ESTIMATION_GAS_FEE* - See [RPC documentation](./architecture/rpc.md#verificationGasLimit-estimation) for details. - `--bundle_priority_fee_overhead_percent`: bundle transaction priority fee overhead over network value. (default: `0`). - env: *BUNDLE_PRIORITY_FEE_OVERHEAD_PERCENT* @@ -44,21 +52,27 @@ These options are common to all subcommands and can be used globally: - env: *PRIORITY_FEE_MODE_KIND* - `--priority_fee_mode_value`: Priority fee mode value. (default: `0`). - env: *PRIORITY_FEE_MODE_VALUE* -- `--fee_accept_percent`: Percentage of the current network fees a user operation must have in order to be accepted into the mempool. (default: `100`). - - env: *FEE_ACCEPT_PERCENT* +- `--base_fee_accept_percent`: Percentage of the current network fees a user operation must have in order to be accepted into the mempool. (default: `100`). + - env: *BASE_FEE_ACCEPT_PERCENT* - `--aws_region`: AWS region. (default: `us-east-1`). - env: *AWS_REGION* - (*Only required if using other AWS features*) -- `--eth_poll_interval_millis`: Interval at which the builder polls an RPC node for new blocks and mined transactions (default: `100`) - - env: *ETH_POLL_INTERVAL_MILLIS* +- `--unsafe`: Flag for unsafe bundling mode. When set Rundler will skip checking simulation rules (and any `debug_traceCall`). (default: `false`). + - env: *UNSAFE* - `--mempool_config_path`: Path to the mempool configuration file. (example: `mempool-config.json`, `s3://my-bucket/mempool-config.json`) - This path can either be a local file path or an S3 url. If using an S3 url, Make sure your machine has access to this file. - env: *MEMPOOL_CONFIG_PATH* - See [here](./architecture/pool.md#alternative-mempools-in-preview) for details. -- `--num_builders`: The number of bundle builders to run (default: `1`) - - env: *NUM_BUILDERS* - -### Mempool Configuration +- `--disable_entry_point_v0_6`: Disable entry point v0.6 support. (default: `false`). + - env: *DISABLE_ENTRY_POINT_V0_6* +- `--num_builders_v0_6`: The number of bundle builders to run on entry point v0.6 (default: `1`) + - env: *NUM_BUILDERS_V0_6* +- `--disable_entry_point_v0_7`: Disable entry point v0.7 support. (default: `false`). + - env: *DISABLE_ENTRY_POINT_V0_7* +- `--num_builders_v0_7`: The number of bundle builders to run on entry point v0.7 (default: `1`) + - env: *NUM_BUILDERS_V0_7* +- `--tracer_timeout`: The timeout used for custom javascript tracers, the string must be in a valid parseable format that can be used in the `ParseDuration` function on an ethereum node. See Docs [Here](https://pkg.go.dev/time#ParseDuration). (default: `15s`) + - env: *TRACER_TIMEOUT* ## Metrics Options @@ -129,8 +143,20 @@ List of command line options for configuring the Pool. - env: *POOL_ALLOWLIST_PATH* - This path can either be a local file path or an S3 url. If using an S3 url, Make sure your machine has access to this file. - See [here](./architecture/pool.md#allowlistblocklist) for details. +- `--pool.chain_poll_interval_millis`: Interval at which the pool polls an Eth node for new blocks (default: `100`) + - env: *POOL_CHAIN_POLL_INTERVAL_MILLIS* +- `--pool.chain_sync_max_retries`: The amount of times to retry syncing the chain before giving up and waiting for the next block (default: `5`) + - env: *POOL_CHAIN_SYNC_MAX_RETRIES* - `--pool.chain_history_size`: Size of the chain history - env: *POOL_CHAIN_HISTORY_SIZE* +- `--pool.paymaster_tracking_enabled`: Boolean field that sets whether the pool server starts with paymaster tracking enabled (default: `true`) + - env: *POOL_PAYMASTER_TRACKING_ENABLED* +- `--pool.paymaster_cache_length`: Length of the paymaster cache (default: `10_000`) + - env: *POOL_PAYMASTER_CACHE_LENGTH* +- `--pool.reputation_tracking_enabled`: Boolean field that sets whether the pool server starts with reputation tracking enabled (default: `true`) + - env: *POOL_REPUTATION_TRACKING_ENABLED* +- `--pool.drop_min_num_blocks`: The minimum number of blocks that a UO must stay in the mempool before it can be requested to be dropped by the user (default: `10`) + - env: *POOL_DROP_MIN_NUM_BLOCKS* ## Builder Options @@ -144,10 +170,13 @@ List of command line options for configuring the Builder. - *Only required when running in distributed mode* - `--builder.private_key`: Private key to use for signing transactions - env: *BUILDER_PRIVATE_KEY* - - *Only required if BUILDER_AWS_KMS_KEY_IDS is not provided* + - **DEPRECATED**: Use `--builder.private_keys` instead. If both used this is added to the list. +- `--builder.private_keys`: Private keys to use for signing transactions, separated by `,` + - env: *BUILDER_PRIVATE_KEYS* - `--builder.aws_kms_key_ids`: AWS KMS key IDs to use for signing transactions (comma-separated) - env: *BUILDER_AWS_KMS_KEY_IDS* - *Only required if BUILDER_PRIVATE_KEY is not provided* + - *Cannot use `builder.private_keys` and `builder.aws_kms_key_ids` at the same time* - `--builder.redis_uri`: Redis URI to use for KMS leasing (default: `""`) - env: *BUILDER_REDIS_URI* - *Only required when AWS_KMS_KEY_IDS are provided* @@ -156,26 +185,36 @@ List of command line options for configuring the Builder. - *Only required when AWS_KMS_KEY_IDS are provided* - `--builder.max_bundle_size`: Maximum number of ops to include in one bundle (default: `128`) - env: *BUILDER_MAX_BUNDLE_SIZE* -- `--builder.submit_url`: If present, the URL of the ETH provider that will be used to send transactions. Defaults to the value of `node_http`. - - env: *BUILDER_SUBMIT_URL* -- `--builder.sender`: Choice of what sender type to to use for transaction submission. (default: `raw`, options: `raw`, `conditional`, `flashbots`, `polygon_bloxroute`) - - env: *BUILDER_SENDER* - `--builder.max_blocks_to_wait_for_mine`: After submitting a bundle transaction, the maximum number of blocks to wait for that transaction to mine before trying to resend with higher gas fees (default: `2`) - env: *BUILDER_MAX_BLOCKS_TO_WAIT_FOR_MINE* - `--builder.replacement_fee_percent_increase`: Percentage amount to increase gas fees when retrying a transaction after it failed to mine (default: `10`) - env: *BUILDER_REPLACEMENT_FEE_PERCENT_INCREASE* -- `--builder.max_fee_increases`: Maximum number of fee increases to attempt (Seven increases of 10% is roughly 2x the initial fees) (default: `7`) - - env: *BUILDER_MAX_FEE_INCREASES* -- `--builder.bloxroute_auth_header`: If using the bloxroute transaction sender on Polygon, this is the auth header to supply with the requests. (default: None) +- `--builder.max_cancellation_fee_increases`: Maximum number of cancellation fee increases to attempt (default: `15`) + - env: *BUILDER_MAX_CANCELLATION_FEE_INCREASES* +- `--builder.max_replacement_underpriced_blocks`: The maximum number of blocks to wait in a replacement underpriced state before issuing a cancellation transaction (default: `20`) + - env: *BUILDER_MAX_REPLACEMENT_UNDERPRICED_BLOCKS* +- `--builder.sender`: Choice of what sender type to use for transaction submission. (default: `raw`, options: `raw`, `flashbots`, `polygon_bloxroute`) + - env: *BUILDER_SENDER* +- `--builder.submit_url`: Only used if builder.sender == "raw." If present, the URL of the ETH provider that will be used to send transactions. Defaults to the value of `node_http`. + - env: *BUILDER_SUBMIT_URL* +- `--builder.use_submit_for_status`: Only used if builder.sender == "raw." Use the submit url to get the status of the bundle transaction. (default: `false`) + - env: *BUILDER_USE_SUBMIT_FOR_STATUS* +- `--builder.use_conditional_rpc`: Only used if builder.sender == "raw." Use `eth_sendRawTransactionConditional` when submitting. (default: `false`) + - env: *BUILDER_USE_CONDITIONAL_RPC* +- `--builder.dropped_status_unsupported`: Only used if builder.sender == "raw." If set, the builder will not process a dropped status. Use this if the URL that is being used for status (node_http or submit_url) does not support pending transactions, only those that are mined. (default: `false`) + - env: *BUILDER_DROPPED_STATUS_UNSUPPORTED* +- `--builder.flashbots_relay_builders`: Only used if builder.sender == "flashbots." Additional builders to send bundles to through the Flashbots relay RPC (comma-separated). List of builders that the Flashbots RPC supports can be found [here](https://docs.flashbots.net/flashbots-auction/advanced/rpc-endpoint#eth_sendprivatetransaction). (default: `flashbots`) + - env: *BUILDER_FLASHBOTS_RELAY_BUILDERS* +- `--builder.flashbots_relay_auth_key`: Only used/required if builder.sender == "flashbots." Authorization key to use with the flashbots relay. See [here](https://docs.flashbots.net/flashbots-auction/advanced/rpc-endpoint#authentication) for more info. (default: None) + - env: *BUILDER_FLASHBOTS_RELAY_AUTH_KEY* +- `--builder.bloxroute_auth_header`: Only used/required if builder.sender == "polygon_bloxroute." If using the bloxroute transaction sender on Polygon, this is the auth header to supply with the requests. (default: None) - env: `BUILDER_BLOXROUTE_AUTH_HEADER` - - *Only required when `--builder.sender=polygon_bloxroute`* - `--builder.index_offset`: If running multiple builder processes, this is the index offset to assign unique indexes to each bundle sender. (default: 0) - env: `BUILDER_INDEX_OFFSET` - `--builder.pool_url`: If running in distributed mode, the URL of the pool server to use. - env: `BUILDER_POOL_URL` - *Only required when running in distributed mode* - ### Key management Private keys for the bundler can be provided in a few ways. You can set the `--builder.private_key` flag or the `BUILDER_PRIVATE_KEY` environment variable @@ -190,11 +229,11 @@ Here are some example commands to use the CLI: ```sh # Run the Node subcommand with custom options -$ ./rundler node --entry_points 0x5FF137D4b0FDCD49DcA30c7CF57E578a026d2789 --chain_id 1337 --max_verification_gas 10000000 +$ ./rundler node --network dev --disable_entry_point_v0_6 --node_http http://localhost:8545 --builder.private_keys 0x0000000000000000000000000000000000000000000000000000000000000001 -# Run the RPC subcommand with custom options and enable JSON logging. The builder and pool will need to be running before this starts. -$ ./rundler rpc --node_http http://localhost:8545 --log.json +# Run the RPC subcommand with custom options and enable JSON logging. The builder (localhost:50052) and pool (localhost:50051) will need to be running before this starts. +$ ./rundler rpc --network dev --node_http http://localhost:8545 --log.json --disable_entry_point_v0_6 # Run the Pool subcommand with custom options and specify a mempool config file -$ ./rundler pool --max_simulate_handle_ops_gas 15000000 --mempool_config_path mempool.json --node_http http://localhost:8545 --chain_id 8453 -``` \ No newline at end of file +$ ./target/debug/rundler pool --network dev --max_simulate_handle_ops_gas 15000000 --mempool_config_path mempool.json --node_http http://localhost:8545 --disable_entry_point_v0_6 +``` diff --git a/docs/docker.md b/docs/docker.md index 243df922..99f6349f 100644 --- a/docs/docker.md +++ b/docs/docker.md @@ -18,6 +18,7 @@ version: "3.8" services: rundler: image: rundler + command: node ports: # RPC port - "3000:3000" @@ -31,3 +32,52 @@ services: ``` An example docker-compose configuration running Rundler in its distributed mode can be found [here](../test/spec-tests/remote/docker-compose.yml). + +## Cross-Platform Docker Builds with Docker and cross-rs + +### Prerequisites + +- [cross-rs](https://github.com/cross-rs/cross) +- [tonistiigi/binfmt](https://github.com/tonistiigi/binfmt) +- [docker-buildx](https://github.com/docker/buildx) + +### Build Phase [Dockerfile.build](../Dockerfile.build) + +This phase compiles and imports required libraries for successful compilation. It uses the Dockerfile.build as an environment. The base image is specified by the `CROSS_BASE_IMAGE` argument. A list of images that `cross-rs` provides can be found [here](https://github.com/cross-rs/cross/tree/main/docker). + +### Release Phase [Dockerfile.cross](../Dockerfile.cross) + +This phase imports the compiled binary from the previous stage into its environment and exposes relevant ports for the correct functioning of the program. The target platform is specified by the `TARGETPLATFORM` argument. + +### Usage + +**GitHub Actions** + +``` +jobs: + build: + runs-on: ubuntu-latest + + steps: + - name: Set up Docker builder + run: | + docker run --privileged --rm tonistiigi/binfmt --install arm64,amd64 + docker buildx create --use --name cross-builder + + - name: Build and push image + run: | + cargo install cross --git https://github.com/cross-rs/cross + sudo -E env "PATH=$PATH" make docker-build-latest +``` + +**Local Builds** + +These command should only be used if you are trying to cross compile the application locally. If you just want to build cross compiled docker images, you should use the commands above. + + +``` +docker run --privileged --rm tonistiigi/binfmt --install arm64,amd64 +docker buildx create --use --name cross-builder +cargo install cross --git https://github.com/cross-rs/cross +sudo -E env "PATH=$PATH" make docker-build-latest +``` diff --git a/docs/release.md b/docs/release.md new file mode 100644 index 00000000..2f47944a --- /dev/null +++ b/docs/release.md @@ -0,0 +1,39 @@ +# Releases + +The rundler project's releases are seamlessly managed by GitHub Actions and can be accessed here. This comprehensive workflow orchestrates multiple steps to compile and release new versions of the rundler project. + +## Workflow Steps + +# Extract Version + +This initial step conditionally extracts the project version either from GitHub Actions inputs or the Git reference. + +# Build + +The build process is orchestrated to cater to various architectures and platforms. Using a dynamic matrix strategy, it defines distinct build configurations encompassing architecture, platform, and profile. Key actions include: + +- Checking out the source code. +- Updating the Rust toolchain. +- Installing the target architecture. +- Leveraging the `Swatinem/rust-cache` action to efficiently cache Rust dependencies. +- Setting up essential environment variables for Apple Silicon SDK during Apple builds. +- Compiling the project with the specified profile and architecture. +- Organizing the compiled binary into a designated 'artifacts' directory. +- Adapting Windows builds by appending a '.exe' extension to the binary. + +# Signing + +Ensuring the integrity of the release, this step imports the GPG signing key and passphrase securely from GitHub secrets. It then generates GPG signatures for the resulting tarballs, strategically placing them in the root directory. + +# Upload Artifacts + +Leveraging the `actions/upload-artifact` action, this step uploads compressed artifacts along with their corresponding signatures to GitHub Actions. + +# Draft Release + +Dependent on the successful completion of the 'build' and 'extract-version' steps, this final step seamlessly manages the release draft. Actions include: + +- Checking out the source code for the release process. +- Downloading the artifacts necessary for the release. +- Constructing a detailed changelog by extracting commit messages between the current and previous versions. +- Initiating the creation of a draft release on GitHub. The release template includes the changelog and convenient download links for the signed tarballs. diff --git a/hybrid-compute/deploy-local.py b/hybrid-compute/deploy-local.py index d75771d3..d09b1f76 100644 --- a/hybrid-compute/deploy-local.py +++ b/hybrid-compute/deploy-local.py @@ -69,7 +69,7 @@ solcx.install_solc("0.8.17") solcx.set_solc_version("0.8.17") contract_info = {} -PATH_PREFIX = "../crates/types/contracts/lib/account-abstraction/contracts/" +PATH_PREFIX = "../crates/types/contracts/lib/account-abstraction-versions/v0_6/contracts/" def load_contract(w, name, files): """Compiles a contract from source and loads its ABI""" @@ -77,7 +77,7 @@ def load_contract(w, name, files): files, output_values=['abi', 'bin', 'bin-runtime'], import_remappings={ - "@openzeppelin": "../crates/types/contracts/lib/openzeppelin-contracts"}, + "@openzeppelin": "../crates/types/contracts/lib/openzeppelin-contracts-versions/v4_9"}, allow_paths=[PATH_PREFIX], optimize=True, optimize_runs=1000000, @@ -204,25 +204,29 @@ def deploy_account(factory, owner): l2_util.sign_and_submit(tx, deploy_key) return acct_addr -def deploy_base(): - """Deploy the basic contracts needed for the local system""" - args = ["forge", "script", "--json", "--broadcast", "--silent"] - args.append ("--rpc-url=http://127.0.0.1:9545") - args.append("hc_scripts/LocalDeploy.s.sol") +def deploy_forge(script, cmd_env): + args = ["/home/enya/.foundry/bin/forge", "script", "--silent", "--json", "--broadcast"] + args.append("--rpc-url=http://127.0.0.1:9545") + args.append("--contracts") + args.append("lib/account-abstraction-versions/v0_6/contracts/core") + args.append("--remappings") + args.append("@openzeppelin/=lib/openzeppelin-contracts-versions/v4_9") + args.append(script) sys_env = os.environ.copy() - cmd_env = {} + cmd_env['PATH'] = sys_env['PATH'] cmd_env['PRIVATE_KEY'] = deploy_key - cmd_env['HC_SYS_OWNER'] = env_vars['HC_SYS_OWNER'] cmd_env['DEPLOY_ADDR'] = deploy_addr cmd_env['DEPLOY_SALT'] = cli_args.deploy_salt # Update to force redeployment - cmd_env['BOBA_TOKEN'] = boba_token + cmd_env['ENTRY_POINTS'] = env_vars['ENTRY_POINTS'] out = subprocess.run(args, cwd="../crates/types/contracts", env=cmd_env, capture_output=True, check=True) # Subprocess will fail if contracts were previously deployed but those addresses were # not passed in as env variables. Retry on a cleanly deployed devnet or change deploy_salt. + if out.returncode != 0: + print(out) assert out.returncode == 0 jstr = out.stdout.split(b'\n')[0].decode('ascii') @@ -230,25 +234,21 @@ def deploy_base(): addrs_raw = ret_json['returns']['0']['value'] # Need to parse the 'internal_type': 'address[5]' value addrs = addrs_raw[1:-1].replace(' ','') + return addrs + +def deploy_base(): + """Deploy the basic contracts needed for the local system""" + cmd_env = {} + cmd_env['HC_SYS_OWNER'] = env_vars['HC_SYS_OWNER'] + cmd_env['BOBA_TOKEN'] = boba_token + addrs = deploy_forge("hc_scripts/LocalDeploy.s.sol", cmd_env) print("Deployed base contracts:", addrs) return addrs.split(',') def deploy_examples(hybrid_acct_addr): - """Deploy example contracts""" - args = ["forge", "script", "--json", "--broadcast", "--silent"] - args.append ("--rpc-url=http://127.0.0.1:9545") - args.append("hc_scripts/ExampleDeploy.s.sol") - cmd_env = os.environ.copy() - cmd_env['PRIVATE_KEY'] = deploy_key + cmd_env = {} cmd_env['OC_HYBRID_ACCOUNT'] = hybrid_acct_addr - - out = subprocess.run(args, cwd="../crates/types/contracts", env=cmd_env,\ - capture_output=True, check=True) - assert out.returncode == 0 - jstr = out.stdout.split(b'\n')[0].decode('ascii') - ret_json = json.loads(jstr) - addrs_raw = ret_json['returns']['0']['value'] - addrs = addrs_raw[1:-1].replace(' ','') + addrs = deploy_forge("hc_scripts/ExampleDeploy.s.sol", cmd_env) print("Deployed example contracts:", addrs) return addrs.split(',') diff --git a/hybrid-compute/local.env b/hybrid-compute/local.env index 6dd0c11e..e57e1e32 100644 --- a/hybrid-compute/local.env +++ b/hybrid-compute/local.env @@ -12,3 +12,4 @@ OC_OWNER=0xE073fC0ff8122389F6e693DD94CcDc5AF637448e OC_PRIVKEY=0x7c0c629efc797f8c5f658919b7efbae01275470d59d03fdeb0fca1e6bd11d7fa CLIENT_OWNER=0x77Fe14A710E33De68855b0eA93Ed8128025328a9 CLIENT_PRIVKEY=0x541b3e3b20b8bb0e5bae310b2d4db4c8b7912ba09750e6ff161b7e67a26a9bf7 +ENTRY_POINTS=0x5FF137D4b0FDCD49DcA30c7CF57E578a026d2789 diff --git a/hybrid-compute/runit.sh b/hybrid-compute/runit.sh index 6ef95ae0..ffd1d596 100755 --- a/hybrid-compute/runit.sh +++ b/hybrid-compute/runit.sh @@ -5,5 +5,7 @@ RUST_BACKTRACE=1 ETH_POLL_INTERVAL_MILLIS=5000 \ ../target/debug/rundler node \ --rpc.port 3300 \ --metrics.port 8380 \ - --builder.private_key $BUILDER_PRIVKEY \ + --builder.private_keys $BUILDER_PRIVKEY \ + --disable_entry_point_v0_7 \ + --builder.dropped_status_unsupported \ $@ 2>&1 diff --git a/test/.env.default b/test/.env.default index 02a11f56..0cbaf77e 100644 --- a/test/.env.default +++ b/test/.env.default @@ -1,5 +1,5 @@ +NETWORK=dev BUILDER_PRIVATE_KEY=0x0000000000000000000000000000000000000000000000000000000000000002 -ENTRY_POINTS=0x5FF137D4b0FDCD49DcA30c7CF57E578a026d2789 METRICS_HOST=127.0.0.1 MIN_UNSTAKE_DELAY=2 NODE_HTTP=http://127.0.0.1:8545 diff --git a/test/spec-tests/bundler-spec-tests b/test/spec-tests/bundler-spec-tests deleted file mode 160000 index b8a738db..00000000 --- a/test/spec-tests/bundler-spec-tests +++ /dev/null @@ -1 +0,0 @@ -Subproject commit b8a738db703b9a135fdf21be9d377f381a33bc7d diff --git a/test/spec-tests/launchers/rundler-launcher/docker-compose.yml b/test/spec-tests/launchers/rundler-launcher/docker-compose.yml index 09f931a5..6f50c1fe 100644 --- a/test/spec-tests/launchers/rundler-launcher/docker-compose.yml +++ b/test/spec-tests/launchers/rundler-launcher/docker-compose.yml @@ -6,7 +6,7 @@ services: ports: - "3000:3000" - "8080:8080" - command: bash -c "/usr/local/bin/rundler node" + command: node environment: - RUST_LOG=debug - ENTRY_POINTS=0x5FF137D4b0FDCD49DcA30c7CF57E578a026d2789 @@ -14,6 +14,9 @@ services: - RPC_API=eth,debug - MIN_UNSTAKE_DELAY=2 - BUILDER_PRIVATE_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 + - PRIORITY_FEE_MODE_KIND=base_fee_percent + - PRIORITY_FEE_MODE_VALUE=0 + - POOL_THROTTLED_ENTITY_LIVE_BLOCKS=20 healthcheck: test: curl --fail http://rundler:3000/health || exit 1 interval: 1s diff --git a/test/spec-tests/local/.env b/test/spec-tests/local/.env index 2d5fd2bc..3b15e60a 100644 --- a/test/spec-tests/local/.env +++ b/test/spec-tests/local/.env @@ -1,8 +1,12 @@ -ENTRY_POINTS=0x5FF137D4b0FDCD49DcA30c7CF57E578a026d2789 +NETWORK=dev NODE_HTTP=http://localhost:8545 RUST_LOG=debug RPC_API=eth,debug RPC_HOST=127.0.0.1 METRICS_HOST=127.0.0.1 MIN_UNSTAKE_DELAY=2 +PRIORITY_FEE_MODE_KIND=base_fee_percent +PRIORITY_FEE_MODE_VALUE=0 BUILDER_PRIVATE_KEY=0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 +ENTRY_POINT_V0_6_ENABLED=false +ENTRY_POINT_V0_7_ENABLED=false diff --git a/test/spec-tests/local/docker-compose.yml b/test/spec-tests/local/docker-compose.yml index 4e7c8daf..6419041a 100644 --- a/test/spec-tests/local/docker-compose.yml +++ b/test/spec-tests/local/docker-compose.yml @@ -2,26 +2,18 @@ version: "3.8" services: geth: - image: ethereum/client-go:v1.10.26 - ports: - - "8545:8545" - - "8546:8546" - command: - - --miner.gaslimit=12000000 - - --http - - --http.api=personal,eth,net,web3,debug - - --http.vhosts=* - - --http.addr=0.0.0.0 - - --ws - - --ws.api=personal,eth,net,web3,debug - - --ws.addr=0.0.0.0 - - --ignore-legacy-receipts - - --allow-insecure-unlock - - --rpc.allow-unprotected-txs - - --dev - - --verbosity=2 - - --nodiscover - - --maxpeers=0 - - --mine - - --miner.threads=1 - - --networkid=1337 + image: ethereum/client-go:release-1.14 + ports: [ '8545:8545' ] + command: --verbosity 1 + --http.vhosts '*,localhost,host.docker.internal' + --http + --http.api eth,net,web3,debug + --http.corsdomain '*' + --http.addr "0.0.0.0" + --networkid 1337 + --dev + --dev.period 0 + --allow-insecure-unlock + --rpc.allow-unprotected-txs + --rpc.txfeecap 0 + --dev.gaslimit 20000000 diff --git a/test/spec-tests/local/launcher.sh b/test/spec-tests/local/launcher.sh index b8ceb08d..fb967e5e 100755 --- a/test/spec-tests/local/launcher.sh +++ b/test/spec-tests/local/launcher.sh @@ -7,16 +7,20 @@ case $1 in start) docker-compose up -d sleep 10 - cast send --unlocked --from $(cast rpc eth_accounts | tail -n 1 | tr -d '[]"') --value 1ether 0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 > /dev/null - (cd ../bundler-spec-tests/@account-abstraction && yarn deploy --network localhost) + cast send --unlocked --from $(cast rpc eth_accounts | tail -n 1 | tr -d '[]"') --value 100ether 0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 > /dev/null + (cd ../$2/bundler-spec-tests/@account-abstraction && yarn deploy --network localhost) ../../../target/debug/rundler node --log.file out.log & while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' localhost:3000/health)" != "200" ]]; do sleep 1 ; done ;; stop) pkill rundler - docker-compose down -t 3 + docker-compose down -t 3 ;; *) - echo "usage: $0 {start|stop|name}" + cat < /dev/null - cd ../bundler-spec-tests/@account-abstraction && yarn deploy --network localhost + cd ../$2/bundler-spec-tests/@account-abstraction && yarn deploy --network localhost ;; stop) docker-compose down -t 3 ;; *) - echo "usage: $0 {start|stop|name}" + cat < Date: Thu, 26 Sep 2024 14:43:10 -0700 Subject: [PATCH 02/13] Merge cleanup Changes to be committed: deleted: .github/workflows/docker-images.yml deleted: crates/provider/src/ethers/nonce_manager.rs deleted: crates/provider/src/traits/nonce_manager.rs --- .github/workflows/docker-images.yml | 85 --------------------- crates/provider/src/ethers/nonce_manager.rs | 28 ------- crates/provider/src/traits/nonce_manager.rs | 27 ------- 3 files changed, 140 deletions(-) delete mode 100644 .github/workflows/docker-images.yml delete mode 100644 crates/provider/src/ethers/nonce_manager.rs delete mode 100644 crates/provider/src/traits/nonce_manager.rs diff --git a/.github/workflows/docker-images.yml b/.github/workflows/docker-images.yml deleted file mode 100644 index 67caac1a..00000000 --- a/.github/workflows/docker-images.yml +++ /dev/null @@ -1,85 +0,0 @@ -name: Docker Images - -on: - workflow_dispatch: - push: - tags: - - '*' - -jobs: - publish-images: - name: Publish Images - runs-on: ubuntu-latest - outputs: - rundler-hc: ${{ steps.packages.outputs.rundler-hc }} - offchain-rpc: ${{ steps.packages.outputs.offchain-rpc }} - - steps: - - name: Check out source code - uses: actions/checkout/@v4 - with: - fetch-depth: 0 - - rundler-hc: - name: Publish rundler-hc - runs-on: ubuntu-latest - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: DockerHub login - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }} - password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }} - - - name: Metadata - id: meta - uses: docker/metadata-action@v5 - with: - images: bobanetwork/rundler-hc - tags: | - type=ref,event=tag - type=sha - - - name: Build and push - uses: docker/build-push-action@v5 - with: - context: . - file: Dockerfile - push: ${{ github.event_name != 'pull_request' }} - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - - offchain-rpc: - name: Publish offchain-rpc - runs-on: ubuntu-latest - - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: DockerHub login - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_USERNAME }} - password: ${{ secrets.DOCKERHUB_ACCESS_TOKEN_SECRET }} - - - name: Metadata - id: meta - uses: docker/metadata-action@v5 - with: - images: bobanetwork/offchain-rpc - tags: | - type=ref,event=tag - type=sha - - - name: Build and push - uses: docker/build-push-action@v5 - with: - context: ./hybrid-compute - file: hybrid-compute/Dockerfile.offchain-rpc - push: ${{ github.event_name != 'pull_request' }} - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} diff --git a/crates/provider/src/ethers/nonce_manager.rs b/crates/provider/src/ethers/nonce_manager.rs deleted file mode 100644 index 5cb12f3e..00000000 --- a/crates/provider/src/ethers/nonce_manager.rs +++ /dev/null @@ -1,28 +0,0 @@ -// This file is part of Rundler. -// -// Rundler is free software: you can redistribute it and/or modify it under the -// terms of the GNU Lesser General Public License as published by the Free Software -// Foundation, either version 3 of the License, or (at your option) any later version. -// -// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; -// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -// See the GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License along with Rundler. -// If not, see https://www.gnu.org/licenses/. - -use anyhow::Result; -use ethers::{providers::Middleware, types::Address}; -use rundler_types::{contracts::i_nonce_manager::INonceManager}; - -use crate::NonceManager; - -#[async_trait::async_trait] -impl NonceManager for INonceManager -where - M: Middleware + 'static, -{ - async fn get_nonce(&self, address: Address, key: ::ethers::core::types::U256) -> Result<::ethers::core::types::U256> { - Ok(INonceManager::get_nonce(self, address, key).await?) - } -} diff --git a/crates/provider/src/traits/nonce_manager.rs b/crates/provider/src/traits/nonce_manager.rs deleted file mode 100644 index e148abf0..00000000 --- a/crates/provider/src/traits/nonce_manager.rs +++ /dev/null @@ -1,27 +0,0 @@ -// This file is part of Rundler. -// -// Rundler is free software: you can redistribute it and/or modify it under the -// terms of the GNU Lesser General Public License as published by the Free Software -// Foundation, either version 3 of the License, or (at your option) any later version. -// -// Rundler is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; -// without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -// See the GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License along with Rundler. -// If not, see https://www.gnu.org/licenses/. - -use ethers::types::Address; -#[cfg(feature = "test-utils")] -use mockall::automock; -//use rundler_types::contracts::i_stake_manager::DepositInfo; - -/// Trait for interacting with an stake manager contract. -/// Implemented for the v0.6 version of the stake manager contract. -/// [Contracts can be found here](https://github.com/eth-infinitism/account-abstraction/tree/v0.6.0). -#[cfg_attr(feature = "test-utils", automock)] -#[async_trait::async_trait] -pub trait NonceManager: Send + Sync + 'static { - /// Get the deposit info from address - async fn get_nonce(&self, address: Address, key: ::ethers::core::types::U256) -> anyhow::Result<::ethers::core::types::U256>; -} From 279a8de8913e64d8e03c5b02823eba8fcba4b847 Mon Sep 17 00:00:00 2001 From: Michael Montour Date: Thu, 26 Sep 2024 21:05:24 -0700 Subject: [PATCH 03/13] Fix contract building, adding @gnosis.pm remapping to https://github.com/safe-global/safe-smart-account.git Changes to be committed: modified: .gitmodules modified: crates/types/build.rs new file: crates/types/contracts/lib/safe-smart-account --- .gitmodules | 3 +++ crates/types/build.rs | 13 ++++++++++++- crates/types/contracts/lib/safe-smart-account | 1 + 3 files changed, 16 insertions(+), 1 deletion(-) create mode 160000 crates/types/contracts/lib/safe-smart-account diff --git a/.gitmodules b/.gitmodules index d130ed7e..1a48975a 100644 --- a/.gitmodules +++ b/.gitmodules @@ -26,3 +26,6 @@ path = test/spec-tests/v0_7/bundler-spec-tests url = git@github.com:alchemyplatform/bundler-spec-tests.git ignore = dirty +[submodule "crates/types/contracts/lib/safe-smart-account"] + path = crates/types/contracts/lib/safe-smart-account + url = https://github.com/safe-global/safe-smart-account.git diff --git a/crates/types/build.rs b/crates/types/build.rs index ab32c291..3a854f83 100644 --- a/crates/types/build.rs +++ b/crates/types/build.rs @@ -36,6 +36,17 @@ fn generate_v0_6_bindings() -> Result<(), Box> { "generate ABIs", )?; + // hybrid compute + run_command( + forge_build("../lib/account-abstraction-versions/v0_6/contracts") + .arg("--remappings") + .arg("@openzeppelin/=lib/openzeppelin-contracts-versions/v4_9") + .arg("--remappings") + .arg("@gnosis.pm/safe-contracts=lib/safe-smart-account"), + "https://getfoundry.sh/", + "generate ABIs", + )?; + MultiAbigen::from_abigens([ abigen_of("v0_6", "IEntryPoint")?, abigen_of("v0_6", "IAggregator")?, @@ -47,7 +58,7 @@ fn generate_v0_6_bindings() -> Result<(), Box> { abigen_of("v0_6", "CallGasEstimationProxy")?, // hybrid compute abigen_of("v0_6", "INonceManager")?, - abigen_of("", "HCHelper")?, + abigen_of("v0_6", "HCHelper")?, ]) .build()? .write_to_module("src/contracts/v0_6", false)?; diff --git a/crates/types/contracts/lib/safe-smart-account b/crates/types/contracts/lib/safe-smart-account new file mode 160000 index 00000000..186a21a7 --- /dev/null +++ b/crates/types/contracts/lib/safe-smart-account @@ -0,0 +1 @@ +Subproject commit 186a21a74b327f17fc41217a927dea7064f74604 From 2512878fdbed710aac74719ed87486149e189bcd Mon Sep 17 00:00:00 2001 From: Michael Montour Date: Fri, 27 Sep 2024 13:07:42 -0700 Subject: [PATCH 04/13] Change "docker-compose" to "docker compose" Changes to be committed: modified: test/spec-tests/launchers/rundler-launcher/rundler-launcher.sh modified: test/spec-tests/local/launcher.sh modified: test/spec-tests/remote/launcher.sh --- .../spec-tests/launchers/rundler-launcher/rundler-launcher.sh | 4 ++-- test/spec-tests/local/launcher.sh | 4 ++-- test/spec-tests/remote/launcher.sh | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/test/spec-tests/launchers/rundler-launcher/rundler-launcher.sh b/test/spec-tests/launchers/rundler-launcher/rundler-launcher.sh index 69a736af..88d2ead6 100755 --- a/test/spec-tests/launchers/rundler-launcher/rundler-launcher.sh +++ b/test/spec-tests/launchers/rundler-launcher/rundler-launcher.sh @@ -10,13 +10,13 @@ case $1 in ;; start) - docker-compose up -d + docker compose up -d ./waitForServices.sh cast send --from $(cast rpc eth_accounts | tail -n 1 | tr -d '[]"') --unlocked --value 1ether 0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 > /dev/null cd ../../bundler-spec-tests/@account-abstraction && yarn deploy --network localhost ;; stop) - docker-compose down -t 3 + docker compose down -t 3 ;; *) diff --git a/test/spec-tests/local/launcher.sh b/test/spec-tests/local/launcher.sh index fb967e5e..3d37fe06 100755 --- a/test/spec-tests/local/launcher.sh +++ b/test/spec-tests/local/launcher.sh @@ -5,7 +5,7 @@ cd `dirname \`realpath $0\`` case $1 in start) - docker-compose up -d + docker compose up -d sleep 10 cast send --unlocked --from $(cast rpc eth_accounts | tail -n 1 | tr -d '[]"') --value 100ether 0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 > /dev/null (cd ../$2/bundler-spec-tests/@account-abstraction && yarn deploy --network localhost) @@ -14,7 +14,7 @@ case $1 in ;; stop) pkill rundler - docker-compose down -t 3 + docker compose down -t 3 ;; *) diff --git a/test/spec-tests/remote/launcher.sh b/test/spec-tests/remote/launcher.sh index 92fdda2f..ff0fddfd 100755 --- a/test/spec-tests/remote/launcher.sh +++ b/test/spec-tests/remote/launcher.sh @@ -27,12 +27,12 @@ usage: EOF exit 1 esac - docker-compose up -d --wait + docker compose up -d --wait cast send --unlocked --from $(cast rpc eth_accounts | tail -n 1 | tr -d '[]"') --value 1ether 0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 > /dev/null cd ../$2/bundler-spec-tests/@account-abstraction && yarn deploy --network localhost ;; stop) - docker-compose down -t 3 + docker compose down -t 3 ;; *) From 8633561c1d1804b6553f1d57df06af02a98ccae4 Mon Sep 17 00:00:00 2001 From: Michael Montour Date: Fri, 27 Sep 2024 13:14:16 -0700 Subject: [PATCH 05/13] Set toolchain version Changes to be committed: modified: .github/workflows/release.yaml modified: .github/workflows/unit.yaml --- .github/workflows/release.yaml | 1 + .github/workflows/unit.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index da7626c7..ad7b5afd 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -58,6 +58,7 @@ jobs: - uses: dtolnay/rust-toolchain@stable with: target: ${{ matrix.target }} + toolchain: 1.79.0 - uses: taiki-e/install-action@cross - uses: Swatinem/rust-cache@v2 with: diff --git a/.github/workflows/unit.yaml b/.github/workflows/unit.yaml index 7895b96c..062821dd 100644 --- a/.github/workflows/unit.yaml +++ b/.github/workflows/unit.yaml @@ -26,6 +26,7 @@ jobs: uses: dtolnay/rust-toolchain@stable with: components: llvm-tools-preview + toolchain: 1.79.0 - name: Install protobuf run: sudo apt-get install -y protobuf-compiler From 24695f8d8a8b0d02bb4d8185fd2a4f1303212b70 Mon Sep 17 00:00:00 2001 From: Michael Montour Date: Fri, 27 Sep 2024 13:35:37 -0700 Subject: [PATCH 06/13] Cleanup - "cargo fmt" Changes to be committed: modified: bin/rundler/src/cli/mod.rs modified: crates/builder/src/bundle_proposer.rs modified: crates/builder/src/bundle_sender.rs modified: crates/builder/src/transaction_tracker.rs modified: crates/provider/src/ethers/entry_point/v0_6.rs modified: crates/provider/src/ethers/entry_point/v0_7.rs modified: crates/provider/src/ethers/provider.rs modified: crates/provider/src/traits/entry_point.rs modified: crates/provider/src/traits/test_utils.rs modified: crates/rpc/src/eth/api.rs modified: crates/rpc/src/eth/router.rs modified: crates/rpc/src/task.rs modified: crates/sim/src/estimation/estimate_verification_gas.rs modified: crates/sim/src/estimation/v0_6.rs modified: crates/sim/src/gas/gas.rs modified: crates/sim/src/simulation/v0_6/tracer.rs modified: crates/types/src/hybrid_compute.rs modified: crates/types/src/user_operation/v0_6.rs modified: crates/types/src/user_operation/v0_7.rs --- bin/rundler/src/cli/mod.rs | 18 +- crates/builder/src/bundle_proposer.rs | 125 +++-- crates/builder/src/bundle_sender.rs | 2 +- crates/builder/src/transaction_tracker.rs | 17 +- .../provider/src/ethers/entry_point/v0_6.rs | 17 +- .../provider/src/ethers/entry_point/v0_7.rs | 6 +- crates/provider/src/ethers/provider.rs | 13 +- crates/provider/src/traits/entry_point.rs | 7 +- crates/provider/src/traits/test_utils.rs | 2 +- crates/rpc/src/eth/api.rs | 527 +++++++++++------- crates/rpc/src/eth/router.rs | 32 +- crates/rpc/src/task.rs | 4 +- .../estimation/estimate_verification_gas.rs | 16 +- crates/sim/src/estimation/v0_6.rs | 7 +- crates/sim/src/gas/gas.rs | 22 +- crates/sim/src/simulation/v0_6/tracer.rs | 2 +- crates/types/src/hybrid_compute.rs | 452 +++++++++------ crates/types/src/user_operation/v0_6.rs | 8 +- crates/types/src/user_operation/v0_7.rs | 2 +- 19 files changed, 809 insertions(+), 470 deletions(-) diff --git a/bin/rundler/src/cli/mod.rs b/bin/rundler/src/cli/mod.rs index fca7baad..2dadaf27 100644 --- a/bin/rundler/src/cli/mod.rs +++ b/bin/rundler/src/cli/mod.rs @@ -63,7 +63,10 @@ pub async fn run() -> anyhow::Result<()> { //opt.common.entry_points[0].parse::
().expect("Must provide an entry_point"), cs.entry_point_address_v0_6, cs.id, - opt.common.node_http.clone().expect("Must provide node_http"), + opt.common + .node_http + .clone() + .expect("Must provide node_http"), ); match opt.command { @@ -320,31 +323,26 @@ pub struct CommonArgs { #[arg( long = "hc_helper_addr", name = "hc_helper_addr", - env = "HC_HELPER_ADDR", + env = "HC_HELPER_ADDR" )] hc_helper_addr: Address, #[arg( long = "hc_sys_account", name = "hc_sys_account", - env = "HC_SYS_ACCOUNT", + env = "HC_SYS_ACCOUNT" )] hc_sys_account: Address, - #[arg( - long = "hc_sys_owner", - name = "hc_sys_owner", - env = "HC_SYS_OWNER", - )] + #[arg(long = "hc_sys_owner", name = "hc_sys_owner", env = "HC_SYS_OWNER")] hc_sys_owner: Address, #[arg( long = "hc_sys_privkey", name = "hc_sys_privkey", - env = "HC_SYS_PRIVKEY", + env = "HC_SYS_PRIVKEY" )] hc_sys_privkey: H256, - } const SIMULATION_GAS_OVERHEAD: u64 = 100_000; diff --git a/crates/builder/src/bundle_proposer.rs b/crates/builder/src/bundle_proposer.rs index 0c1ed11a..51898bbb 100644 --- a/crates/builder/src/bundle_proposer.rs +++ b/crates/builder/src/bundle_proposer.rs @@ -164,7 +164,11 @@ where UO: UserOperation + From, UserOperationVariant: AsRef, S: Simulator, - E: EntryPoint + SignatureAggregator + BundleHandler + L1GasProvider + SimulationProvider, + E: EntryPoint + + SignatureAggregator + + BundleHandler + + L1GasProvider + + SimulationProvider, P: Provider, M: Pool, { @@ -255,9 +259,12 @@ where .into_iter() .flatten() .collect::>(); - if ops_with_simulations.len() > 0 { - println!("HC bundle_proposer before assemble_context len {:?}", ops_with_simulations.len()); - } + if ops_with_simulations.len() > 0 { + println!( + "HC bundle_proposer before assemble_context len {:?}", + ops_with_simulations.len() + ); + } let mut context = self .assemble_context(ops_with_simulations, balances_by_paymaster) .await; @@ -310,7 +317,11 @@ where UO: UserOperation + From, UserOperationVariant: AsRef, S: Simulator, - E: EntryPoint + SignatureAggregator + BundleHandler + L1GasProvider + SimulationProvider, + E: EntryPoint + + SignatureAggregator + + BundleHandler + + L1GasProvider + + SimulationProvider, P: Provider, M: Pool, { @@ -399,23 +410,34 @@ where } }; - let hc_hash = op.uo.hc_hash(); - let mut is_hc:bool = false; + let hc_hash = op.uo.hc_hash(); + let mut is_hc: bool = false; - if let Some(hc_pvg) = hybrid_compute::hc_get_pvg(hc_hash) { - println!("HC pvg override for op_hash {:?} {:?} {:?}", hc_hash, required_pvg, hc_pvg); - is_hc = true; + if let Some(hc_pvg) = hybrid_compute::hc_get_pvg(hc_hash) { + println!( + "HC pvg override for op_hash {:?} {:?} {:?}", + hc_hash, required_pvg, hc_pvg + ); + is_hc = true; if hc_pvg > required_pvg { - required_pvg = hc_pvg; + required_pvg = hc_pvg; } - } else { - println!("HC no pvg override for op_hash {:?}, required_pvg {:?}", hc_hash, required_pvg); - } + } else { + println!( + "HC no pvg override for op_hash {:?}, required_pvg {:?}", + hc_hash, required_pvg + ); + } if op.uo.pre_verification_gas() < required_pvg { if is_hc { // Workaround - reject op here instead of waiting indefinitely. - println!("HC WARN rejecting op_hash {:?}, pre_verifification_gas {:?} < {:?}", hc_hash, op.uo.pre_verification_gas(), required_pvg); + println!( + "HC WARN rejecting op_hash {:?}, pre_verifification_gas {:?} < {:?}", + hc_hash, + op.uo.pre_verification_gas(), + required_pvg + ); self.emit(BuilderEvent::rejected_op( self.builder_index, @@ -506,7 +528,7 @@ where let mut paymasters_to_reject = Vec::::new(); let mut gas_spent = self.settings.chain_spec.transaction_intrinsic_gas; - let mut cleanup_keys:Vec = Vec::new(); + let mut cleanup_keys: Vec = Vec::new(); let mut constructed_bundle_size = BUNDLE_BYTE_OVERHEAD; for (po, simulation) in ops_with_simulations { let op = po.clone().uo; @@ -568,10 +590,13 @@ where let hc_ent = hybrid_compute::get_hc_ent(hc_hash); if hc_ent.is_some() { required_gas += hc_ent.clone().unwrap().oc_gas; - println!("HC bundle_properer found hc_ent {:?} op_hash {:?} required_gas {:?}", hc_ent, hc_hash, required_gas); + println!( + "HC bundle_properer found hc_ent {:?} op_hash {:?} required_gas {:?}", + hc_ent, hc_hash, required_gas + ); } - if required_gas > self.settings.max_bundle_gas.into() { + if required_gas > self.settings.max_bundle_gas.into() { continue; } @@ -612,18 +637,24 @@ where if hc_ent.is_some() { gas_spent += hc_ent.clone().unwrap().oc_gas; //println!("HC insert, hc_ent {:?}", hc_ent); - let u_op2:UserOperationVariant = hc_ent.clone().unwrap().user_op.into(); + let u_op2: UserOperationVariant = hc_ent.clone().unwrap().user_op.into(); - let sim_result = self.simulator.simulate_validation(u_op2.clone().into(), None, None).await.expect("Failed to unwrap sim_result"); // FIXME + let sim_result = self + .simulator + .simulate_validation(u_op2.clone().into(), None, None) + .await + .expect("Failed to unwrap sim_result"); // FIXME context .groups_by_aggregator .entry(simulation.aggregator_address()) .or_default() .ops_with_simulations - .push(OpWithSimulation { op:u_op2.into(), simulation:sim_result }); + .push(OpWithSimulation { + op: u_op2.into(), + simulation: sim_result, + }); cleanup_keys.push(hc_ent.clone().unwrap().map_key); - } constructed_bundle_size = @@ -641,20 +672,34 @@ where } if cleanup_keys.len() > 0 { - println!("HC cleanup_keys {:?}", cleanup_keys); - let cfg = hybrid_compute::HC_CONFIG.lock().unwrap().clone(); - let c_nonce = self.entry_point.get_nonce(cfg.sys_account, U256::zero()).await.unwrap(); - let cleanup_op:UserOperationVariant = hybrid_compute::rr_op(&cfg, c_nonce, cleanup_keys).await.into(); + println!("HC cleanup_keys {:?}", cleanup_keys); + let cfg = hybrid_compute::HC_CONFIG.lock().unwrap().clone(); + let c_nonce = self + .entry_point + .get_nonce(cfg.sys_account, U256::zero()) + .await + .unwrap(); + let cleanup_op: UserOperationVariant = + hybrid_compute::rr_op(&cfg, c_nonce, cleanup_keys) + .await + .into(); - let cleanup_sim = self.simulator.simulate_validation(cleanup_op.clone().into(), None, None).await.expect("Failed to unwrap sim_result"); // FIXME + let cleanup_sim = self + .simulator + .simulate_validation(cleanup_op.clone().into(), None, None) + .await + .expect("Failed to unwrap sim_result"); // FIXME - context + context .groups_by_aggregator .entry(None) .or_default() .ops_with_simulations - .push(OpWithSimulation { op:cleanup_op.into(), simulation:cleanup_sim }); - } + .push(OpWithSimulation { + op: cleanup_op.into(), + simulation: cleanup_sim, + }); + } for paymaster in paymasters_to_reject { // No need to update aggregator signatures because we haven't computed them yet. @@ -791,7 +836,11 @@ where ); // call handle ops with the bundle to filter any rejected ops before sending - println!("HC bundle_proposer gas1 {:?} {:?}", gas, context.to_ops_per_aggregator()); + println!( + "HC bundle_proposer gas1 {:?} {:?}", + gas, + context.to_ops_per_aggregator() + ); let handle_ops_out = self .entry_point .call_handle_ops( @@ -1204,18 +1253,26 @@ impl ProposalContext { for (&aggregator, group) in &mut self.groups_by_aggregator { if remaining_i < group.ops_with_simulations.len() { let rejected = group.ops_with_simulations.remove(remaining_i); - println!("HC reject_index at {:?} of {:?} - {:?}", i, group.ops_with_simulations.len(), rejected.op); + println!( + "HC reject_index at {:?} of {:?} - {:?}", + i, + group.ops_with_simulations.len(), + rejected.op + ); if rejected.op.max_fee_per_gas() == U256::from(0) { // Assume an Offchain op if i == group.ops_with_simulations.len() { - println!("HC ERR rejecting Cleanup op {:?}", rejected.op); + println!("HC ERR rejecting Cleanup op {:?}", rejected.op); } else { println!("HC ERR rejecting offchain op {:?}", rejected.op); } } else { let hc_hash = rejected.op.hc_hash(); let hc_ent = hybrid_compute::get_hc_ent(hc_hash); - println!("HC rejecting regular op with hash {:?} paired_op {:?}", hc_hash, hc_ent); + println!( + "HC rejecting regular op with hash {:?} paired_op {:?}", + hc_hash, hc_ent + ); if hc_ent.is_some() { todo!("Should remove paired op"); } diff --git a/crates/builder/src/bundle_sender.rs b/crates/builder/src/bundle_sender.rs index 088a130a..424bbfac 100644 --- a/crates/builder/src/bundle_sender.rs +++ b/crates/builder/src/bundle_sender.rs @@ -21,6 +21,7 @@ use futures_util::StreamExt; use mockall::automock; use rundler_provider::{BundleHandler, EntryPoint}; use rundler_sim::ExpectedStorage; +use rundler_types::hybrid_compute; use rundler_types::{ builder::BundlingMode, chain::ChainSpec, @@ -33,7 +34,6 @@ use tokio::{ sync::{broadcast, mpsc, mpsc::UnboundedReceiver, oneshot}, }; use tracing::{debug, error, info, instrument, warn}; -use rundler_types::hybrid_compute; use crate::{ bundle_proposer::{Bundle, BundleProposer, BundleProposerError}, diff --git a/crates/builder/src/transaction_tracker.rs b/crates/builder/src/transaction_tracker.rs index 16b402b6..87d34a17 100644 --- a/crates/builder/src/transaction_tracker.rs +++ b/crates/builder/src/transaction_tracker.rs @@ -227,7 +227,10 @@ where self.provider.get_transaction(tx_hash), self.provider.get_transaction_receipt(tx_hash), )?; - println!("HC get_mined_tx_gas_info looking for hash {:?} got tx {:?} receipt {:?}", tx_hash, tx, tx_receipt); + println!( + "HC get_mined_tx_gas_info looking for hash {:?} got tx {:?} receipt {:?}", + tx_hash, tx, tx_receipt + ); let gas_limit = tx.map(|t| t.gas).or_else(|| { warn!("failed to fetch transaction data for tx: {}", tx_hash); None @@ -374,7 +377,10 @@ where async fn check_for_update(&mut self) -> TransactionTrackerResult> { let external_nonce = self.get_external_nonce().await?; if self.nonce < external_nonce { - println!("HC check_for_update_now at self.nonce {:?} external_nonce {:?}", self.nonce, external_nonce); + println!( + "HC check_for_update_now at self.nonce {:?} external_nonce {:?}", + self.nonce, external_nonce + ); // The nonce has changed. Check to see which of our transactions has // mined, if any. debug!( @@ -389,8 +395,11 @@ where .get_transaction_status(tx.tx_hash) .await .context("tracker should check transaction status when the nonce changes")?; - println!("HC check_for_update_now status after nonce change {:?}", status); - info!("Status of tx {:?}: {:?}", tx.tx_hash, status); + println!( + "HC check_for_update_now status after nonce change {:?}", + status + ); + info!("Status of tx {:?}: {:?}", tx.tx_hash, status); if let TxStatus::Mined { block_number } = status { let (gas_limit, gas_used, gas_price) = self.get_mined_tx_gas_info(tx.tx_hash).await?; diff --git a/crates/provider/src/ethers/entry_point/v0_6.rs b/crates/provider/src/ethers/entry_point/v0_6.rs index b2fed43a..6ebb9fb3 100644 --- a/crates/provider/src/ethers/entry_point/v0_6.rs +++ b/crates/provider/src/ethers/entry_point/v0_6.rs @@ -283,17 +283,17 @@ where user_op: UserOperation, max_validation_gas: u64, ) -> (TypedTransaction, spoof::State) { - let pvg = user_op.pre_verification_gas; // FIXME HC + let pvg = user_op.pre_verification_gas; // FIXME HC let gas_price = user_op.max_fee_per_gas; let mut call = self .i_entry_point .simulate_validation(user_op) .gas(U256::from(max_validation_gas) + pvg) // FIXME HC .tx; - let from_addr = hybrid_compute::HC_CONFIG.lock().unwrap().from_addr; - call.set_from(from_addr); - call.set_gas_price(gas_price); - //println!("HC entry_point.rs s_v {:?} {:?} {:?} {:?} gas_price", max_validation_gas, pvg, tx, gas_price); + let from_addr = hybrid_compute::HC_CONFIG.lock().unwrap().from_addr; + call.set_from(from_addr); + call.set_gas_price(gas_price); + //println!("HC entry_point.rs s_v {:?} {:?} {:?} {:?} gas_price", max_validation_gas, pvg, tx, gas_price); (call, spoof::State::default()) } @@ -388,11 +388,14 @@ where true } - async fn get_nonce(&self, address: Address, key: ::ethers::core::types::U256) -> Result<::ethers::core::types::U256, String> { + async fn get_nonce( + &self, + address: Address, + key: ::ethers::core::types::U256, + ) -> Result<::ethers::core::types::U256, String> { let ret = self.i_entry_point.get_nonce(address, key).await; Ok(ret.unwrap()) } - } impl

EntryPointProvider for EntryPoint

where diff --git a/crates/provider/src/ethers/entry_point/v0_7.rs b/crates/provider/src/ethers/entry_point/v0_7.rs index 074ad1fc..1da5261c 100644 --- a/crates/provider/src/ethers/entry_point/v0_7.rs +++ b/crates/provider/src/ethers/entry_point/v0_7.rs @@ -388,7 +388,11 @@ where false } - async fn get_nonce(&self, address: Address, key: ::ethers::core::types::U256) -> Result<::ethers::core::types::U256, String> { + async fn get_nonce( + &self, + address: Address, + key: ::ethers::core::types::U256, + ) -> Result<::ethers::core::types::U256, String> { let ret = self.i_entry_point.get_nonce(address, key).await; Ok(ret.unwrap()) } diff --git a/crates/provider/src/ethers/provider.rs b/crates/provider/src/ethers/provider.rs index bc2cb73c..7242feb0 100644 --- a/crates/provider/src/ethers/provider.rs +++ b/crates/provider/src/ethers/provider.rs @@ -141,9 +141,18 @@ impl Provider for EthersProvider { block_id: Option, trace_options: GethDebugTracingCallOptions, ) -> ProviderResult { - println!("HC debug_trace_call overrides {:?} tx {:?}", trace_options.state_overrides, tx); + println!( + "HC debug_trace_call overrides {:?} tx {:?}", + trace_options.state_overrides, tx + ); println!("HC will use BlockNumber::Latest instead of {:?}", block_id); - let ret = Middleware::debug_trace_call(self, tx, Some(ethers::types::BlockId::Number(BlockNumber::Latest)), trace_options).await; + let ret = Middleware::debug_trace_call( + self, + tx, + Some(ethers::types::BlockId::Number(BlockNumber::Latest)), + trace_options, + ) + .await; println!("HC debug_trace_call ret {:?}", ret); Ok(ret?) } diff --git a/crates/provider/src/traits/entry_point.rs b/crates/provider/src/traits/entry_point.rs index 67ab23d8..eef7c74b 100644 --- a/crates/provider/src/traits/entry_point.rs +++ b/crates/provider/src/traits/entry_point.rs @@ -235,8 +235,11 @@ pub trait SimulationProvider: Send + Sync + 'static { fn simulation_should_revert(&self) -> bool; /// Return the AA nonce for the given sender and key - async fn get_nonce(&self, address: Address, key: ::ethers::core::types::U256) -> Result<::ethers::core::types::U256, String>; - + async fn get_nonce( + &self, + address: Address, + key: ::ethers::core::types::U256, + ) -> Result<::ethers::core::types::U256, String>; } /// Trait for a provider that provides all entry point functionality diff --git a/crates/provider/src/traits/test_utils.rs b/crates/provider/src/traits/test_utils.rs index 4b149fb0..fd8f8d60 100644 --- a/crates/provider/src/traits/test_utils.rs +++ b/crates/provider/src/traits/test_utils.rs @@ -85,7 +85,7 @@ mockall::mock! { ) -> Result; fn simulation_should_revert(&self) -> bool; async fn get_nonce(&self, address: Address, key: ::ethers::core::types::U256) -> Result<::ethers::core::types::U256, String>; - + } #[async_trait::async_trait] diff --git a/crates/rpc/src/eth/api.rs b/crates/rpc/src/eth/api.rs index 01a4b470..8bd863c2 100644 --- a/crates/rpc/src/eth/api.rs +++ b/crates/rpc/src/eth/api.rs @@ -11,22 +11,17 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -use std::{ - collections::{HashMap}, - future::Future, - pin::Pin, -}; +use std::{collections::HashMap, future::Future, pin::Pin}; use ethers::{ types::{spoof, Address, H256, U64}, - utils::{to_checksum, hex}, + utils::{hex, to_checksum}, }; use futures_util::future; use rundler_types::{ - chain::ChainSpec, pool::Pool, UserOperation, UserOperationOptionalGas, UserOperationVariant, - contracts::v0_6::hc_helper::{HCHelper as HH2}, - contracts::v0_6::simple_account::SimpleAccount, - + chain::ChainSpec, contracts::v0_6::hc_helper::HCHelper as HH2, + contracts::v0_6::simple_account::SimpleAccount, pool::Pool, UserOperation, + UserOperationOptionalGas, UserOperationVariant, }; use rundler_utils::log::LogOnError; use tracing::Level; @@ -37,13 +32,13 @@ use super::{ }; use crate::types::{RpcGasEstimate, RpcUserOperationByHash, RpcUserOperationReceipt}; -use rundler_types::hybrid_compute; -use ethers::types::{U256,Bytes}; +use crate::types::RpcGasEstimateV0_6; +use ethers::types::{Bytes, U256}; use jsonrpsee::{ core::{client::ClientT, params::ObjectParams, JsonValue}, - http_client::{HttpClientBuilder}, + http_client::HttpClientBuilder, }; -use crate::types::RpcGasEstimateV0_6; +use rundler_types::hybrid_compute; /// Settings for the `eth_` API #[derive(Copy, Clone, Debug)] @@ -64,10 +59,10 @@ impl Settings { // Can't track down what's causing the gas differences between // simulateHandleOps and SimulateValidation, so pad it and // hope for the best. Unused gas will be refunded. -const VG_PAD:i32 = 20000; +const VG_PAD: i32 = 20000; // FIXME - Workaround for another occasional failure. -const PVG_PAD:i32 = 5000; +const PVG_PAD: i32 = 5000; pub(crate) struct EthApi

{ pub(crate) chain_spec: ChainSpec, @@ -81,7 +76,7 @@ where P: Pool, { pub(crate) fn new(chain_spec: ChainSpec, router: EntryPointRouter, pool: P) -> Self { - let hc = hybrid_compute::HC_CONFIG.lock().unwrap().clone(); + let hc = hybrid_compute::HC_CONFIG.lock().unwrap().clone(); Self { router, pool, @@ -95,7 +90,7 @@ where op: UserOperationVariant, entry_point: Address, ) -> EthResult { - println!("HC send_user_operation {:?}", op); + println!("HC send_user_operation {:?}", op); let bundle_size = op.single_uo_bundle_size_bytes(); if bundle_size > self.chain_spec.max_transaction_size_bytes { return Err(EthRpcError::InvalidParams(format!( @@ -121,24 +116,27 @@ where state_override: Option, ) -> bool { let mut s2 = state_override.clone().unwrap_or_default(); - let hc_addr = self.hc.helper_addr; + let hc_addr = self.hc.helper_addr; - // Set a 1-byte value which will trigger a special revert code - let val_vrfy = "0xff00000000000000000000000000000000000000000000000000000000000002".parse::().unwrap(); - s2.account(hc_addr).store(key, H256::from_slice(&val_vrfy)); + // Set a 1-byte value which will trigger a special revert code + let val_vrfy = "0xff00000000000000000000000000000000000000000000000000000000000002" + .parse::() + .unwrap(); + s2.account(hc_addr).store(key, H256::from_slice(&val_vrfy)); - let result_v = self.router + let result_v = self + .router .estimate_gas(&entry_point, op.clone(), Some(s2), None) .await; - println!("HC result_v {:?}", result_v); + println!("HC result_v {:?}", result_v); match result_v { Err(EthRpcError::ExecutionReverted(ref msg)) => { if *msg == "_HC_VRFY".to_string() { return true; - } - } - _ => {} + } + } + _ => {} } false @@ -150,176 +148,265 @@ where &self, //context:&EntryPointContext, entry_point: Address, - op: UserOperationOptionalGas, - state_override: Option, - revert_data: &Bytes, + op: UserOperationOptionalGas, + state_override: Option, + revert_data: &Bytes, ) -> EthResult { - let s2 = state_override.unwrap_or_default(); - //let es = EstimationSettings { + let s2 = state_override.unwrap_or_default(); + //let es = EstimationSettings { // max_verification_gas: 0, // max_call_gas: 0, // max_simulate_handle_ops_gas: 0, // validation_estimation_gas_fee: 0, //}; - let op6:rundler_types::v0_6::UserOperationOptionalGas = op.clone().into(); - - let hh = op6.clone().into_user_operation(U256::from(0),U256::from(0)).hc_hash(); - println!("HC api.rs hh {:?}", hh); + let op6: rundler_types::v0_6::UserOperationOptionalGas = op.clone().into(); - let ep_addr = hybrid_compute::hc_ep_addr(revert_data); + let hh = op6 + .clone() + .into_user_operation(U256::from(0), U256::from(0)) + .hc_hash(); + println!("HC api.rs hh {:?}", hh); - let n_key:U256 = op6.nonce >> 64; - let at_price = op6.max_priority_fee_per_gas; - //let hc_nonce = context.gas_estimator.entry_point.get_nonce(op6.sender, n_key).await.unwrap(); - let hc_nonce = self.router.get_nonce(&entry_point, op6.sender, n_key).await.unwrap(); + let ep_addr = hybrid_compute::hc_ep_addr(revert_data); - let err_nonce = self.router.get_nonce(&entry_point, self.hc.sys_account, n_key).await.unwrap(); - println!("HC hc_nonce {:?} err_nonce {:?} op_nonce {:?} n_key {:?}", hc_nonce, err_nonce, op6.nonce, n_key); - let p2 = rundler_provider::new_provider(&self.hc.node_http, None)?; - - let hx = HH2::new(self.hc.helper_addr, p2.clone()); - let url = hx.registered_callers(ep_addr).await.expect("url_decode").1; - println!("HC registered_caller url {:?}", url); + let n_key: U256 = op6.nonce >> 64; + let at_price = op6.max_priority_fee_per_gas; + //let hc_nonce = context.gas_estimator.entry_point.get_nonce(op6.sender, n_key).await.unwrap(); + let hc_nonce = self + .router + .get_nonce(&entry_point, op6.sender, n_key) + .await + .unwrap(); - let cc = HttpClientBuilder::default().build(url); // could specify a request_timeout() here. + let err_nonce = self + .router + .get_nonce(&entry_point, self.hc.sys_account, n_key) + .await + .unwrap(); + println!( + "HC hc_nonce {:?} err_nonce {:?} op_nonce {:?} n_key {:?}", + hc_nonce, err_nonce, op6.nonce, n_key + ); + let p2 = rundler_provider::new_provider(&self.hc.node_http, None)?; + + let hx = HH2::new(self.hc.helper_addr, p2.clone()); + let url = hx.registered_callers(ep_addr).await.expect("url_decode").1; + println!("HC registered_caller url {:?}", url); + + let cc = HttpClientBuilder::default().build(url); // could specify a request_timeout() here. if cc.is_err() { - return Err(EthRpcError::Internal(anyhow::anyhow!("Invalid URL registered for HC"))); + return Err(EthRpcError::Internal(anyhow::anyhow!( + "Invalid URL registered for HC" + ))); } - let m = hex::encode(hybrid_compute::hc_selector(revert_data)); - let sub_key = hybrid_compute::hc_sub_key(revert_data); - let sk_hex = hex::encode(sub_key); - let map_key = hybrid_compute::hc_map_key(revert_data); + let m = hex::encode(hybrid_compute::hc_selector(revert_data)); + let sub_key = hybrid_compute::hc_sub_key(revert_data); + let sk_hex = hex::encode(sub_key); + let map_key = hybrid_compute::hc_map_key(revert_data); - println!("HC api.rs sk_hex {:?} mk {:?}", sk_hex, map_key); + println!("HC api.rs sk_hex {:?} mk {:?}", sk_hex, map_key); - let payload = hex::encode(hybrid_compute::hc_req_payload(revert_data)); - let n_bytes:[u8; 32] = (hc_nonce).into(); - let src_n = hex::encode(n_bytes); - let src_addr = hex::encode(op6.sender); + let payload = hex::encode(hybrid_compute::hc_req_payload(revert_data)); + let n_bytes: [u8; 32] = (hc_nonce).into(); + let src_n = hex::encode(n_bytes); + let src_addr = hex::encode(op6.sender); - let oo_n_key:U256 = U256::from_big_endian(op6.sender.as_fixed_bytes()); - let oo_nonce = self.router.get_nonce(&entry_point, ep_addr, oo_n_key).await.unwrap(); + let oo_n_key: U256 = U256::from_big_endian(op6.sender.as_fixed_bytes()); + let oo_nonce = self + .router + .get_nonce(&entry_point, ep_addr, oo_n_key) + .await + .unwrap(); let ha_owner = SimpleAccount::new(ep_addr, p2).owner().await; if ha_owner.is_err() { - return Err(EthRpcError::Internal(anyhow::anyhow!("Failed to look up HybridAccount owner"))); + return Err(EthRpcError::Internal(anyhow::anyhow!( + "Failed to look up HybridAccount owner" + ))); } - const REQ_VERSION:&str = "0.2"; + const REQ_VERSION: &str = "0.2"; - let mut params = ObjectParams::new(); - let _ = params.insert("ver", REQ_VERSION); - let _ = params.insert("sk", sk_hex); - let _ = params.insert("src_addr", src_addr); - let _ = params.insert("src_nonce", src_n); - let _ = params.insert("oo_nonce", oo_nonce); - let _ = params.insert("payload", payload); + let mut params = ObjectParams::new(); + let _ = params.insert("ver", REQ_VERSION); + let _ = params.insert("sk", sk_hex); + let _ = params.insert("src_addr", src_addr); + let _ = params.insert("src_nonce", src_n); + let _ = params.insert("oo_nonce", oo_nonce); + let _ = params.insert("payload", payload); - let resp: Result, _> = cc.unwrap().request(&m, params).await; + let resp: Result, _> = cc.unwrap().request(&m, params).await; println!("HC resp {:?}", resp); - let err_hc:hybrid_compute::HcErr; + let err_hc: hybrid_compute::HcErr; match resp { - Ok(resp) => { - if resp.contains_key("success") && resp.contains_key("response") && resp.contains_key("signature") && - resp["success"].is_boolean() && resp["response"].is_string() && resp["signature"].is_string() { + Ok(resp) => { + if resp.contains_key("success") + && resp.contains_key("response") + && resp.contains_key("signature") + && resp["success"].is_boolean() + && resp["response"].is_string() + && resp["signature"].is_string() + { let op_success = resp["success"].as_bool().unwrap(); - let resp_hex = resp["response"].as_str().unwrap(); - let sig_hex:String = resp["signature"].as_str().unwrap().into(); - let hc_res:Bytes = hex::decode(resp_hex).unwrap().into(); - //println!("HC api.rs do_op result sk {:?} success {:?} res {:?}", sub_key, op_success, hc_res); - - err_hc = hybrid_compute::external_op(hh, op6.sender, hc_nonce, op_success, &hc_res, sub_key, ep_addr, sig_hex, oo_nonce, map_key, &self.hc, ha_owner.unwrap(), err_nonce).await; + let resp_hex = resp["response"].as_str().unwrap(); + let sig_hex: String = resp["signature"].as_str().unwrap().into(); + let hc_res: Bytes = hex::decode(resp_hex).unwrap().into(); + //println!("HC api.rs do_op result sk {:?} success {:?} res {:?}", sub_key, op_success, hc_res); + + err_hc = hybrid_compute::external_op( + hh, + op6.sender, + hc_nonce, + op_success, + &hc_res, + sub_key, + ep_addr, + sig_hex, + oo_nonce, + map_key, + &self.hc, + ha_owner.unwrap(), + err_nonce, + ) + .await; } else { - err_hc = hybrid_compute::HcErr{code: 3, message:"HC03: Decode Error".to_string()}; - } - }, - Err(error) => { + err_hc = hybrid_compute::HcErr { + code: 3, + message: "HC03: Decode Error".to_string(), + }; + } + } + Err(error) => { match error { - jsonrpsee::core::Error::Call(e) => { - err_hc = hybrid_compute::HcErr{code: 2, message:"HC02: Call error: ".to_owned() + e.message()}; - }, + jsonrpsee::core::Error::Call(e) => { + err_hc = hybrid_compute::HcErr { + code: 2, + message: "HC02: Call error: ".to_owned() + e.message(), + }; + } jsonrpsee::core::Error::Transport(e) => { - if e.to_string().contains("Connection refused") || - e.to_string().contains("status code: 5") { // look for 500-class HTTP errors - err_hc = hybrid_compute::HcErr{code: 6, message:"HC06: ".to_owned() + &e.to_string()}; - } else { - err_hc = hybrid_compute::HcErr{code: 2, message:"HC02: ".to_owned() + &e.to_string()}; - } - }, + if e.to_string().contains("Connection refused") + || e.to_string().contains("status code: 5") + { + // look for 500-class HTTP errors + err_hc = hybrid_compute::HcErr { + code: 6, + message: "HC06: ".to_owned() + &e.to_string(), + }; + } else { + err_hc = hybrid_compute::HcErr { + code: 2, + message: "HC02: ".to_owned() + &e.to_string(), + }; + } + } jsonrpsee::core::Error::RequestTimeout => { - err_hc = hybrid_compute::HcErr{code: 6, message:"HC06: RequestTimeout".to_string()}; - }, + err_hc = hybrid_compute::HcErr { + code: 6, + message: "HC06: RequestTimeout".to_string(), + }; + } jsonrpsee::core::Error::Custom(e) => { - err_hc = hybrid_compute::HcErr{code: 2, message:"HC02: Custom error:".to_owned() + &e.to_string()}; - }, - other => { - println!("HC unmatched error {:?}", other); - err_hc = hybrid_compute::HcErr{code: 4, message:"HC04: Unrecognized Error:".to_owned() + &other.to_string()}; - } + err_hc = hybrid_compute::HcErr { + code: 2, + message: "HC02: Custom error:".to_owned() + &e.to_string(), + }; + } + other => { + println!("HC unmatched error {:?}", other); + err_hc = hybrid_compute::HcErr { + code: 4, + message: "HC04: Unrecognized Error:".to_owned() + &other.to_string(), + }; + } } - } - } + } + } if err_hc.code != 0 { println!("HC api.rs calling err_op {:?}", err_hc.message); - hybrid_compute::err_op(hh, entry_point, err_hc.clone(), sub_key, op6.sender, hc_nonce, err_nonce, map_key, &self.hc).await; - } + hybrid_compute::err_op( + hh, + entry_point, + err_hc.clone(), + sub_key, + op6.sender, + hc_nonce, + err_nonce, + map_key, + &self.hc, + ) + .await; + } let s2 = hybrid_compute::get_hc_op_statediff(hh, s2); - let result2 = self.router + let result2 = self + .router .estimate_gas(&entry_point, op.clone(), Some(s2), None) .await; - println!("HC result2 {:?}", result2); - let r3:RpcGasEstimateV0_6; - if result2.is_ok() { - println!("HC api.rs Ok gas result2 = {:?}", result2); - let r3a = result2.unwrap(); + println!("HC result2 {:?}", result2); + let r3: RpcGasEstimateV0_6; + if result2.is_ok() { + println!("HC api.rs Ok gas result2 = {:?}", result2); + let r3a = result2.unwrap(); match r3a { RpcGasEstimate::V0_6(abc) => { r3 = abc; - }, + } _ => { - return Err(EthRpcError::Internal(anyhow::anyhow!("HC04 user_op gas estimation failed"))); + return Err(EthRpcError::Internal(anyhow::anyhow!( + "HC04 user_op gas estimation failed" + ))); } } - let op_tmp = hybrid_compute::get_hc_ent(hh).unwrap().user_op; - let op_tmp_2: rundler_types::v0_6::UserOperationOptionalGas = rundler_types::v0_6::UserOperationOptionalGas { - sender: op_tmp.sender, - nonce: op_tmp.nonce, - init_code: op_tmp.init_code, - call_data: op_tmp.call_data, - call_gas_limit: Some(op_tmp.call_gas_limit), - verification_gas_limit: Some(op_tmp.verification_gas_limit), - pre_verification_gas: Some(op_tmp.pre_verification_gas), - max_fee_per_gas: Some(op_tmp.max_fee_per_gas), - max_priority_fee_per_gas: Some(op_tmp.max_priority_fee_per_gas), - paymaster_and_data: op_tmp.paymaster_and_data, - signature: op_tmp.signature, - }; + let op_tmp = hybrid_compute::get_hc_ent(hh).unwrap().user_op; + let op_tmp_2: rundler_types::v0_6::UserOperationOptionalGas = + rundler_types::v0_6::UserOperationOptionalGas { + sender: op_tmp.sender, + nonce: op_tmp.nonce, + init_code: op_tmp.init_code, + call_data: op_tmp.call_data, + call_gas_limit: Some(op_tmp.call_gas_limit), + verification_gas_limit: Some(op_tmp.verification_gas_limit), + pre_verification_gas: Some(op_tmp.pre_verification_gas), + max_fee_per_gas: Some(op_tmp.max_fee_per_gas), + max_priority_fee_per_gas: Some(op_tmp.max_priority_fee_per_gas), + paymaster_and_data: op_tmp.paymaster_and_data, + signature: op_tmp.signature, + }; // The op_tmp_2 below specifies a 0 gas price, but we need to estimate the L1 fee at the // price offered by real userOperation which will be paying for it. - let r2a = self.router - .estimate_gas(&entry_point, rundler_types::UserOperationOptionalGas::V0_6(op_tmp_2.clone()), Some(spoof::State::default()), at_price) + let r2a = self + .router + .estimate_gas( + &entry_point, + rundler_types::UserOperationOptionalGas::V0_6(op_tmp_2.clone()), + Some(spoof::State::default()), + at_price, + ) .await; - if let Err(EthRpcError::ExecutionReverted(ref r2_err)) = r2a { // FIXME + if let Err(EthRpcError::ExecutionReverted(ref r2_err)) = r2a { + // FIXME println!("HC op_tmp_2 gas estimation failed (RevertInValidation)"); let msg = "HC04: Offchain validation failed: ".to_string() + &r2_err; return Err(EthRpcError::Internal(anyhow::anyhow!(msg))); }; - - let r2:RpcGasEstimateV0_6; + + let r2: RpcGasEstimateV0_6; match r2a? { RpcGasEstimate::V0_6(abc) => { r2 = abc; - }, + } _ => { - return Err(EthRpcError::Internal(anyhow::anyhow!("HC04 offchain_op gas estimation failed"))); + return Err(EthRpcError::Internal(anyhow::anyhow!( + "HC04 offchain_op gas estimation failed" + ))); } } @@ -331,64 +418,93 @@ where return Err(EthRpcError::Internal(anyhow::anyhow!(msg))); } - let offchain_gas = r2.pre_verification_gas + r2.verification_gas_limit + r2.call_gas_limit; - - let mut cleanup_keys:Vec = Vec::new(); - cleanup_keys.push(map_key); - let c_nonce = self.router.get_nonce(&entry_point, self.hc.sys_account, U256::zero()).await.unwrap(); - let cleanup_op = hybrid_compute::rr_op(&self.hc, c_nonce, cleanup_keys.clone()).await; - let op_tmp_4: rundler_types::v0_6::UserOperationOptionalGas = rundler_types::v0_6::UserOperationOptionalGas { - sender: cleanup_op.sender, - nonce: cleanup_op.nonce, - init_code: cleanup_op.init_code, - call_data: cleanup_op.call_data, - call_gas_limit: Some(cleanup_op.call_gas_limit), - verification_gas_limit: Some(cleanup_op.verification_gas_limit), - pre_verification_gas: Some(cleanup_op.pre_verification_gas), - max_fee_per_gas: Some(cleanup_op.max_fee_per_gas), - max_priority_fee_per_gas: Some(cleanup_op.max_priority_fee_per_gas), - paymaster_and_data: cleanup_op.paymaster_and_data, - signature: cleanup_op.signature, - }; - println!("HC op_tmp_4 {:?} {:?}", op_tmp_4, cleanup_keys); - let r4a = self.router.estimate_gas(&entry_point, rundler_types::UserOperationOptionalGas::V0_6(op_tmp_4), Some(spoof::State::default()), at_price).await; - let r4:RpcGasEstimateV0_6; + let offchain_gas = + r2.pre_verification_gas + r2.verification_gas_limit + r2.call_gas_limit; + + let mut cleanup_keys: Vec = Vec::new(); + cleanup_keys.push(map_key); + let c_nonce = self + .router + .get_nonce(&entry_point, self.hc.sys_account, U256::zero()) + .await + .unwrap(); + let cleanup_op = hybrid_compute::rr_op(&self.hc, c_nonce, cleanup_keys.clone()).await; + let op_tmp_4: rundler_types::v0_6::UserOperationOptionalGas = + rundler_types::v0_6::UserOperationOptionalGas { + sender: cleanup_op.sender, + nonce: cleanup_op.nonce, + init_code: cleanup_op.init_code, + call_data: cleanup_op.call_data, + call_gas_limit: Some(cleanup_op.call_gas_limit), + verification_gas_limit: Some(cleanup_op.verification_gas_limit), + pre_verification_gas: Some(cleanup_op.pre_verification_gas), + max_fee_per_gas: Some(cleanup_op.max_fee_per_gas), + max_priority_fee_per_gas: Some(cleanup_op.max_priority_fee_per_gas), + paymaster_and_data: cleanup_op.paymaster_and_data, + signature: cleanup_op.signature, + }; + println!("HC op_tmp_4 {:?} {:?}", op_tmp_4, cleanup_keys); + let r4a = self + .router + .estimate_gas( + &entry_point, + rundler_types::UserOperationOptionalGas::V0_6(op_tmp_4), + Some(spoof::State::default()), + at_price, + ) + .await; + let r4: RpcGasEstimateV0_6; match r4a? { RpcGasEstimate::V0_6(abc) => { r4 = abc; - }, + } _ => { - return Err(EthRpcError::Internal(anyhow::anyhow!("HC04 cleanup_op gas estimation failed"))); + return Err(EthRpcError::Internal(anyhow::anyhow!( + "HC04 cleanup_op gas estimation failed" + ))); } } - let cleanup_gas = r4.pre_verification_gas + r4.verification_gas_limit + r4.call_gas_limit; + let cleanup_gas = + r4.pre_verification_gas + r4.verification_gas_limit + r4.call_gas_limit; let op_gas = r3.pre_verification_gas + r3.verification_gas_limit + r3.call_gas_limit; - println!("HC api.rs offchain_gas estimate {:?} sum {:?}", r2, offchain_gas); - println!("HC api.rs userop_gas estimate {:?} sum {:?}", r3, op_gas); - println!("HC api.rs cleanup_gas estimate {:?} sum {:?}", r4, cleanup_gas); + println!( + "HC api.rs offchain_gas estimate {:?} sum {:?}", + r2, offchain_gas + ); + println!("HC api.rs userop_gas estimate {:?} sum {:?}", r3, op_gas); + println!( + "HC api.rs cleanup_gas estimate {:?} sum {:?}", + r4, cleanup_gas + ); let needed_pvg = r3.pre_verification_gas + offchain_gas; - println!("HC needed_pvg {:?} = {:?} + {:?}", needed_pvg, r3.pre_verification_gas, offchain_gas); + println!( + "HC needed_pvg {:?} = {:?} + {:?}", + needed_pvg, r3.pre_verification_gas, offchain_gas + ); hybrid_compute::hc_set_pvg(hh, needed_pvg, offchain_gas + cleanup_gas + offchain_gas); if err_hc.code != 0 { return Err(EthRpcError::Internal(anyhow::anyhow!(err_hc.message))); - } + } let total_gas = needed_pvg + (r3.verification_gas_limit + VG_PAD) + r3.call_gas_limit; - if total_gas > U256::from(25_000_000) { // Approaching the block gas limit - let err_msg:String = "Excessive HC total_gas estimate = ".to_owned() + &total_gas.to_string(); + if total_gas > U256::from(25_000_000) { + // Approaching the block gas limit + let err_msg: String = + "Excessive HC total_gas estimate = ".to_owned() + &total_gas.to_string(); return Err(EthRpcError::Internal(anyhow::anyhow!(err_msg))); } - return Ok(RpcGasEstimateV0_6 { - pre_verification_gas: (needed_pvg + PVG_PAD), - verification_gas_limit: r3.verification_gas_limit, - call_gas_limit: r3.call_gas_limit, - }.into()); - } else { + return Ok(RpcGasEstimateV0_6 { + pre_verification_gas: (needed_pvg + PVG_PAD), + verification_gas_limit: r3.verification_gas_limit, + call_gas_limit: r3.call_gas_limit, + } + .into()); + } else { return result2; } } @@ -407,43 +523,48 @@ where ))); } - let mut result = self.router + let mut result = self + .router .estimate_gas(&entry_point, op.clone(), state_override.clone(), None) .await; - println!("HC api.rs estimate_gas result1 {:?}", result); + println!("HC api.rs estimate_gas result1 {:?}", result); match result { - Ok(ref estimate) => { - match estimate { - RpcGasEstimate::V0_6(estimate6) => { - return Ok(RpcGasEstimateV0_6{ - pre_verification_gas: estimate6.pre_verification_gas, - verification_gas_limit: estimate6.verification_gas_limit + VG_PAD, - call_gas_limit: estimate6.call_gas_limit, - }.into()); - }, - _ => {} + Ok(ref estimate) => match estimate { + RpcGasEstimate::V0_6(estimate6) => { + return Ok(RpcGasEstimateV0_6 { + pre_verification_gas: estimate6.pre_verification_gas, + verification_gas_limit: estimate6.verification_gas_limit + VG_PAD, + call_gas_limit: estimate6.call_gas_limit, + } + .into()); } - } - Err(EthRpcError::ExecutionRevertedWithBytes(ref r)) => { - if hybrid_compute::check_trigger(&r.revert_data) { + _ => {} + }, + Err(EthRpcError::ExecutionRevertedWithBytes(ref r)) => { + if hybrid_compute::check_trigger(&r.revert_data) { let bn = 0; //self.provider.get_block_number().await.unwrap(); println!("HC api.rs HC trigger at bn {}", bn); - let map_key = hybrid_compute::hc_map_key(&r.revert_data); - let key:H256 = hybrid_compute::hc_storage_key(map_key); - - if self.hc_verify_trigger(entry_point, op.clone(), key, state_override.clone()).await { - result = self.hc_simulate_response(entry_point, op, state_override, &r.revert_data).await; - } else { - println!("HC did not get expected _HC_VRFY"); - let msg = "HC04: Failed to verify trigger event".to_owned(); - return Err(EthRpcError::Internal(anyhow::anyhow!(msg))); - } - } - } - Err(_) => {} - } + let map_key = hybrid_compute::hc_map_key(&r.revert_data); + let key: H256 = hybrid_compute::hc_storage_key(map_key); + + if self + .hc_verify_trigger(entry_point, op.clone(), key, state_override.clone()) + .await + { + result = self + .hc_simulate_response(entry_point, op, state_override, &r.revert_data) + .await; + } else { + println!("HC did not get expected _HC_VRFY"); + let msg = "HC04: Failed to verify trigger event".to_owned(); + return Err(EthRpcError::Internal(anyhow::anyhow!(msg))); + } + } + } + Err(_) => {} + } result } diff --git a/crates/rpc/src/eth/router.rs b/crates/rpc/src/eth/router.rs index a52ecb59..1e643075 100644 --- a/crates/rpc/src/eth/router.rs +++ b/crates/rpc/src/eth/router.rs @@ -187,16 +187,15 @@ impl EntryPointRouter { EntryPointVersion::Unspecified => unreachable!("unspecified entry point version"), } } - + pub(crate) async fn get_nonce( &self, entry_point: &Address, addr: Address, key: U256, ) -> EthResult { - self.get_route(entry_point)? - .get_nonce(addr,key) + .get_nonce(addr, key) .await .map_err(Into::into) //Ok(U256::from(0)) @@ -261,11 +260,7 @@ pub(crate) trait EntryPointRoute: Send + Sync + 'static { at_price: Option, ) -> Result; - async fn get_nonce( - &self, - addr: Address, - key: U256, - ) -> anyhow::Result; + async fn get_nonce(&self, addr: Address, key: U256) -> anyhow::Result; async fn check_signature( &self, @@ -309,30 +304,27 @@ where async fn get_receipt(&self, hash: H256) -> anyhow::Result> { self.event_provider.get_receipt(hash).await } - + async fn estimate_gas( &self, uo: UserOperationOptionalGas, state_override: Option, at_price: Option, ) -> Result { - println!("HC router estimate_gas op {:?} state {:?}", uo, state_override); - let ret = self.gas_estimator + println!( + "HC router estimate_gas op {:?} state {:?}", + uo, state_override + ); + let ret = self + .gas_estimator .estimate_op_gas(uo.into(), state_override.unwrap_or_default(), at_price) .await; println!("HC router estimate_gas ret {:?}", ret); ret } - async fn get_nonce( - &self, - addr: Address, - key: U256, - ) -> anyhow::Result { - let output = self - .entry_point - .get_nonce(addr, key) - .await; + async fn get_nonce(&self, addr: Address, key: U256) -> anyhow::Result { + let output = self.entry_point.get_nonce(addr, key).await; if output.is_ok() { return Ok(output.unwrap()); } diff --git a/crates/rpc/src/task.rs b/crates/rpc/src/task.rs index 446f6901..8f12aeb7 100644 --- a/crates/rpc/src/task.rs +++ b/crates/rpc/src/task.rs @@ -16,12 +16,11 @@ use std::{net::SocketAddr, sync::Arc, time::Duration}; use anyhow::bail; use async_trait::async_trait; use ethers::providers::{JsonRpcClient, Provider}; +use hyper::Method; use jsonrpsee::{ server::{middleware::ProxyGetRequestLayer, ServerBuilder}, RpcModule, }; -use hyper::Method; -use tower_http::cors::{Any, CorsLayer}; use rundler_provider::{EthersEntryPointV0_6, EthersEntryPointV0_7}; use rundler_sim::{ EstimationSettings, FeeEstimator, GasEstimatorV0_6, GasEstimatorV0_7, PrecheckSettings, @@ -32,6 +31,7 @@ use rundler_task::{ }; use rundler_types::{builder::Builder, chain::ChainSpec, pool::Pool}; use tokio_util::sync::CancellationToken; +use tower_http::cors::{Any, CorsLayer}; use tracing::info; use crate::{ diff --git a/crates/sim/src/estimation/estimate_verification_gas.rs b/crates/sim/src/estimation/estimate_verification_gas.rs index 8072a8b7..5971d015 100644 --- a/crates/sim/src/estimation/estimate_verification_gas.rs +++ b/crates/sim/src/estimation/estimate_verification_gas.rs @@ -105,7 +105,10 @@ where // Make one attempt at max gas, to see if success is possible. // Capture the gas usage of this attempt and use as the initial guess in the binary search let initial_op = get_op(max_guess); - println!("HC estimate_verification initial_op {:?}", initial_op.clone()); + println!( + "HC estimate_verification initial_op {:?}", + initial_op.clone() + ); let SimulateOpCallData { call_data, spoofed_state, @@ -122,7 +125,10 @@ where ) .await .context("failed to run initial guess")?; - println!("HC estimate_verification SimulateHandleOp initial guess gas_used {:?}", gas_used); + println!( + "HC estimate_verification SimulateHandleOp initial guess gas_used {:?}", + gas_used + ); if gas_used.success { if self.entry_point.simulation_should_revert() { @@ -188,7 +194,10 @@ where } guess = max_failure_gas.saturating_add(min_success_gas) / 2; } - println!("HC after verification gas estimation loop max_fail {:?} min_success {:?}", max_failure_gas, min_success_gas); + println!( + "HC after verification gas estimation loop max_fail {:?} min_success {:?}", + max_failure_gas, min_success_gas + ); tracing::debug!( "binary search for verification gas took {num_rounds} rounds, {}ms", @@ -205,7 +214,6 @@ where } println!("HC verification min_success_gas {:?}", min_success_gas); - Ok(U128::try_from(min_success_gas) .ok() .context("min success gas should fit in 128-bit int")?) diff --git a/crates/sim/src/estimation/v0_6.rs b/crates/sim/src/estimation/v0_6.rs index 9d0251c8..5a7f440b 100644 --- a/crates/sim/src/estimation/v0_6.rs +++ b/crates/sim/src/estimation/v0_6.rs @@ -296,8 +296,11 @@ where } }; - if let Some(at_gas_price) = at_gas_price { - println!("HC Override estimate_pvg gas price from {:?} to {:?}", gas_price, at_gas_price); + if let Some(at_gas_price) = at_gas_price { + println!( + "HC Override estimate_pvg gas price from {:?} to {:?}", + gas_price, at_gas_price + ); gas_price = at_gas_price; } diff --git a/crates/sim/src/gas/gas.rs b/crates/sim/src/gas/gas.rs index a0328ca8..fa1233fc 100644 --- a/crates/sim/src/gas/gas.rs +++ b/crates/sim/src/gas/gas.rs @@ -59,7 +59,13 @@ pub async fn estimate_pre_verification_gas< .calc_l1_gas(entry_point.address(), random_op.clone(), gas_price) .await?; - println!("HC estimate_pre_verification_gas {} = {} + {} price {}", static_gas + dynamic_gas, static_gas, dynamic_gas, gas_price); + println!( + "HC estimate_pre_verification_gas {} = {} + {} price {}", + static_gas + dynamic_gas, + static_gas, + dynamic_gas, + gas_price + ); Ok(static_gas.saturating_add(dynamic_gas)) } @@ -76,7 +82,12 @@ pub async fn calc_required_pre_verification_gas< op: &UO, base_fee: U256, ) -> anyhow::Result { - println!("HC entering calc_pre_verification_gas, base_fee {} op_fees {} {}", base_fee, op.max_priority_fee_per_gas(), op.max_fee_per_gas()); + println!( + "HC entering calc_pre_verification_gas, base_fee {} op_fees {} {}", + base_fee, + op.max_priority_fee_per_gas(), + op.max_fee_per_gas() + ); let static_gas = op.calc_static_pre_verification_gas(chain_spec, true); if !chain_spec.calldata_pre_verification_gas { return Ok(static_gas); @@ -90,7 +101,12 @@ pub async fn calc_required_pre_verification_gas< let dynamic_gas = entry_point .calc_l1_gas(entry_point.address(), op.clone(), gas_price) .await?; - println!("HC calc_required_pre_verification_gas {} = {} + {}", static_gas + dynamic_gas, static_gas, dynamic_gas); + println!( + "HC calc_required_pre_verification_gas {} = {} + {}", + static_gas + dynamic_gas, + static_gas, + dynamic_gas + ); Ok(static_gas + dynamic_gas) } diff --git a/crates/sim/src/simulation/v0_6/tracer.rs b/crates/sim/src/simulation/v0_6/tracer.rs index 56392d55..f1cf77c8 100644 --- a/crates/sim/src/simulation/v0_6/tracer.rs +++ b/crates/sim/src/simulation/v0_6/tracer.rs @@ -18,8 +18,8 @@ use ethers::types::{ BlockId, GethDebugTracerType, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, }; use rundler_provider::{Provider, SimulationProvider}; -use rundler_types::UserOperation as UserOperation2; use rundler_types::v0_6::UserOperation; +use rundler_types::UserOperation as UserOperation2; use serde::Deserialize; use crate::simulation::context::TracerOutput; diff --git a/crates/types/src/hybrid_compute.rs b/crates/types/src/hybrid_compute.rs index 2ab23b30..d1edfc69 100644 --- a/crates/types/src/hybrid_compute.rs +++ b/crates/types/src/hybrid_compute.rs @@ -12,25 +12,21 @@ */ use ethers::{ - abi::{ - AbiDecode, AbiEncode - }, - types::{ - Address, Bytes, U256, H256, BigEndianHash, RecoveryMessage::Data, - }, - utils::keccak256, + abi::{AbiDecode, AbiEncode}, signers::{LocalWallet, Signer}, + types::{Address, BigEndianHash, Bytes, RecoveryMessage::Data, H256, U256}, + utils::keccak256, }; +use crate::user_operation::UserOperation; use crate::v0_6::UserOperation as UserOperationV0_6; -use crate::user_operation::{UserOperation}; -use std::{sync::Mutex, collections::HashMap, str::FromStr}; +use std::{collections::HashMap, str::FromStr, sync::Mutex}; use once_cell::sync::Lazy; use std::time::{Duration, SystemTime}; -#[derive(Clone,Debug)] +#[derive(Clone, Debug)] /// Error code pub struct HcErr { /// numeric code @@ -58,18 +54,18 @@ pub struct HcEntry { pub needed_pvg: U256, } -const EXPIRE_SECS:std::time::Duration = Duration::new(120, 0); +const EXPIRE_SECS: std::time::Duration = Duration::new(120, 0); impl Clone for HcEntry { fn clone(&self) -> HcEntry { HcEntry { sub_key: self.sub_key, - map_key: self.map_key, + map_key: self.map_key, //call_data: self.call_data.clone(), user_op: self.user_op.clone(), - ts: self.ts, - oc_gas: self.oc_gas, - needed_pvg: self.needed_pvg, + ts: self.ts, + oc_gas: self.oc_gas, + needed_pvg: self.needed_pvg, } } } @@ -79,7 +75,7 @@ static HC_MAP: Lazy>> = Lazy::new(|| Mutex::new(m) }); -#[derive(Clone, Debug,PartialEq)] +#[derive(Clone, Debug, PartialEq)] /// Parameters needed for Hybrid Compute, accessed from various modules. pub struct HcCfg { /// Helper contract address @@ -104,12 +100,29 @@ pub struct HcCfg { /// Parameters needed for Hybrid Compute, accessed from various modules. pub static HC_CONFIG: Lazy> = Lazy::new(|| { - let c = HcCfg { helper_addr:Address::zero(), sys_account:Address::zero(), sys_owner:Address::zero(), sys_privkey:H256::zero(), entry_point: Address::zero(), chain_id: 0, node_http:String::new(), from_addr: Address::zero()}; - Mutex::new(c) + let c = HcCfg { + helper_addr: Address::zero(), + sys_account: Address::zero(), + sys_owner: Address::zero(), + sys_privkey: H256::zero(), + entry_point: Address::zero(), + chain_id: 0, + node_http: String::new(), + from_addr: Address::zero(), + }; + Mutex::new(c) }); /// Set the HC parameters based on CLI args -pub fn init(helper_addr: Address, sys_account: Address, sys_owner:Address, sys_privkey:H256, entry_point: Address, chain_id:u64, node_http: String) { +pub fn init( + helper_addr: Address, + sys_account: Address, + sys_owner: Address, + sys_privkey: H256, + entry_point: Address, + chain_id: u64, + node_http: String, +) { let mut cfg = HC_CONFIG.lock().unwrap(); cfg.helper_addr = helper_addr; @@ -119,7 +132,6 @@ pub fn init(helper_addr: Address, sys_account: Address, sys_owner:Address, sys_p cfg.entry_point = entry_point; cfg.chain_id = chain_id; cfg.node_http = node_http.clone(); - } /// Set the EOA address which the bundler is using. Erigon, but not geth, needs this for tx simulation @@ -129,14 +141,10 @@ pub fn set_signer(from_addr: Address) { } /// Wrap the response payload into calldata for the HybridAccount + HCHelper contracts -pub fn make_op_calldata( - sender: Address, - map_key: ethers::types::H256, - payload : Bytes, -) -> Bytes { +pub fn make_op_calldata(sender: Address, map_key: ethers::types::H256, payload: Bytes) -> Bytes { let mut put_data = [0xdfu8, 0xc9, 0x8a, 0xe8].to_vec(); // helper "PutResponse(bytes32,bytes)" selector put_data.extend(AbiEncode::encode((map_key, payload))); - let put_bytes : Bytes = put_data.into(); + let put_bytes: Bytes = put_data.into(); let mut tmp_data = [0xb6u8, 0x1d, 0x27, 0xf6].to_vec(); // account "execute" selector tmp_data.extend(AbiEncode::encode((sender, U256::zero(), put_bytes))); @@ -144,14 +152,10 @@ pub fn make_op_calldata( } /// Wrap the error response payload into calldata for the HybridAccount + HCHelper contracts -pub fn make_err_calldata( - sender: Address, - map_key: ethers::types::H256, - payload : Bytes, -) -> Bytes { +pub fn make_err_calldata(sender: Address, map_key: ethers::types::H256, payload: Bytes) -> Bytes { let mut put_data = [0xfdu8, 0xe8, 0x9b, 0x64].to_vec(); // helper "PutSysResponse(bytes32,bytes)" selector put_data.extend(AbiEncode::encode((map_key, payload))); - let put_bytes : Bytes = put_data.into(); + let put_bytes: Bytes = put_data.into(); let mut tmp_data = [0xb6u8, 0x1d, 0x27, 0xf6].to_vec(); // account "execute" selector tmp_data.extend(AbiEncode::encode((sender, U256::zero(), put_bytes))); @@ -159,27 +163,29 @@ pub fn make_err_calldata( } /// Cleanup to remove any leaked responses at the end of a bundle -pub fn make_rr_calldata( - keys : Vec, -) -> Bytes { -// let mut put_data = [0xcbu8, 0x74, 0x30, 0xae].to_vec(); // helper RemoveResponse(bytes32[]) +pub fn make_rr_calldata(keys: Vec) -> Bytes { + // let mut put_data = [0xcbu8, 0x74, 0x30, 0xae].to_vec(); // helper RemoveResponse(bytes32[]) let mut put_data = [0x10u8, 0x40, 0x4d, 0x34].to_vec(); // helper RemoveResponses(bytes32[]) let cfg = HC_CONFIG.lock().unwrap(); put_data.extend(AbiEncode::encode(keys)); - let put_bytes : Bytes = put_data.into(); + let put_bytes: Bytes = put_data.into(); let mut tmp_data = [0xb6u8, 0x1d, 0x27, 0xf6].to_vec(); // account "execute" selector - tmp_data.extend(AbiEncode::encode((cfg.helper_addr, U256::zero(), put_bytes))); + tmp_data.extend(AbiEncode::encode(( + cfg.helper_addr, + U256::zero(), + put_bytes, + ))); tmp_data.into() } /// Check for a trigger string in the revert data -pub fn check_trigger(rev : &Bytes) -> bool { - const MIN_REQ_LEN:usize = 8 + 20 + 32 + 4; // trigger prefix + endpoint_addr + user_key + 4-byte selector +pub fn check_trigger(rev: &Bytes) -> bool { + const MIN_REQ_LEN: usize = 8 + 20 + 32 + 4; // trigger prefix + endpoint_addr + user_key + 4-byte selector println!("HC trigger check in {:?}", rev); - const TRIGGER : [u8; 8] = [0x5f, 0x48, 0x43, 0x5f, 0x54, 0x52, 0x49, 0x47]; + const TRIGGER: [u8; 8] = [0x5f, 0x48, 0x43, 0x5f, 0x54, 0x52, 0x49, 0x47]; if rev.len() >= MIN_REQ_LEN && &rev[0..8] == TRIGGER { println!("HC HC triggered"); @@ -189,39 +195,42 @@ pub fn check_trigger(rev : &Bytes) -> bool { } /// Key used to store response in the HCHelper mapping -pub fn hc_map_key(revert_data : &Bytes) -> H256 { - let sub_key:H256 = keccak256(&revert_data[28..]).into(); - let map_key:H256 = keccak256([&revert_data[8..28],&sub_key.to_fixed_bytes()].concat()).into(); +pub fn hc_map_key(revert_data: &Bytes) -> H256 { + let sub_key: H256 = keccak256(&revert_data[28..]).into(); + let map_key: H256 = keccak256([&revert_data[8..28], &sub_key.to_fixed_bytes()].concat()).into(); map_key } /// Calculates the HCHelper storage slot key for a ResponseCache entry -pub fn hc_storage_key(map_key:H256) -> H256 { - let slot_idx = "0x0000000000000000000000000000000000000000000000000000000000000000".parse::().unwrap(); - let storage_key:H256 = keccak256([Bytes::from(map_key.to_fixed_bytes()), slot_idx].concat()).into(); +pub fn hc_storage_key(map_key: H256) -> H256 { + let slot_idx = "0x0000000000000000000000000000000000000000000000000000000000000000" + .parse::() + .unwrap(); + let storage_key: H256 = + keccak256([Bytes::from(map_key.to_fixed_bytes()), slot_idx].concat()).into(); storage_key } /// Partial key, to be combined with msg.sender -pub fn hc_sub_key(revert_data : &Bytes) -> H256 { - let sub_key:H256 = keccak256(&revert_data[28..]).into(); +pub fn hc_sub_key(revert_data: &Bytes) -> H256 { + let sub_key: H256 = keccak256(&revert_data[28..]).into(); sub_key } /// Endpoint address (address of HybridAccount which called HCHelper) -pub fn hc_ep_addr(revert_data : &Bytes) -> Address { +pub fn hc_ep_addr(revert_data: &Bytes) -> Address { let ep_addr = Address::from_slice(&revert_data[8..28]); ep_addr } /// Extract the function selector called by the HC operation -pub fn hc_selector(revert_data : &Bytes) -> [u8;4] { - let sel_bytes:[u8; 4] = revert_data[60..64].to_vec().try_into().unwrap(); +pub fn hc_selector(revert_data: &Bytes) -> [u8; 4] { + let sel_bytes: [u8; 4] = revert_data[60..64].to_vec().try_into().unwrap(); sel_bytes } /// Extract the request payload -pub fn hc_req_payload(revert_data : &Bytes) -> Vec { +pub fn hc_req_payload(revert_data: &Bytes) -> Vec { revert_data[64..].to_vec() } @@ -237,29 +246,38 @@ fn make_external_op( oo_nonce: U256, cfg: &HcCfg, ) -> UserOperationV0_6 { + let tmp_bytes: Bytes = Bytes::from(response_payload.to_vec()); - let tmp_bytes:Bytes = Bytes::from(response_payload.to_vec()); - - let err_code:u32 = if op_success {0} else {1}; + let err_code: u32 = if op_success { 0 } else { 1 }; let merged_response = AbiEncode::encode((src_addr, nonce, err_code, tmp_bytes)); - let call_data = make_op_calldata(cfg.helper_addr, sub_key, Bytes::from(merged_response.to_vec())); - let call_gas = 705*response_payload.len() + 170000; - - println!("HC external_op call_data len {:?} {:?} gas {:?} {:?}", response_payload.len(), call_data.len(), call_gas, call_data); - - let mut new_op:UserOperationV0_6 = UserOperationV0_6{ + let call_data = make_op_calldata( + cfg.helper_addr, + sub_key, + Bytes::from(merged_response.to_vec()), + ); + let call_gas = 705 * response_payload.len() + 170000; + + println!( + "HC external_op call_data len {:?} {:?} gas {:?} {:?}", + response_payload.len(), + call_data.len(), + call_gas, + call_data + ); + + let mut new_op: UserOperationV0_6 = UserOperationV0_6 { sender: ep_addr, - nonce: oo_nonce.into(), - init_code: Bytes::new(), - call_data: call_data.clone(), - call_gas_limit: U256::from(call_gas), - verification_gas_limit: U256::from(0x10000), - pre_verification_gas: U256::from(0x10000), - max_fee_per_gas: U256::zero(), - max_priority_fee_per_gas: U256::zero(), - paymaster_and_data: Bytes::new(), - signature: Bytes::new(), + nonce: oo_nonce.into(), + init_code: Bytes::new(), + call_data: call_data.clone(), + call_gas_limit: U256::from(call_gas), + verification_gas_limit: U256::from(0x10000), + pre_verification_gas: U256::from(0x10000), + max_fee_per_gas: U256::zero(), + max_priority_fee_per_gas: U256::zero(), + paymaster_and_data: Bytes::new(), + signature: Bytes::new(), }; new_op.signature = sig_hex.parse::().unwrap(); @@ -283,21 +301,45 @@ pub async fn external_op( ha_owner: Address, nn: U256, ) -> HcErr { - let mut new_op = make_external_op(src_addr,nonce,op_success,response_payload,sub_key,ep_addr,sig_hex.clone(),oo_nonce,cfg); + let mut new_op = make_external_op( + src_addr, + nonce, + op_success, + response_payload, + sub_key, + ep_addr, + sig_hex.clone(), + oo_nonce, + cfg, + ); let check_hash = new_op.hash(cfg.entry_point, cfg.chain_id); - let check_sig: ethers::types::Signature = ethers::types::Signature::from_str(&sig_hex).expect("Signature decode"); - let check_msg: ethers::types::RecoveryMessage = Data(check_hash.to_fixed_bytes().to_vec()); + let check_sig: ethers::types::Signature = + ethers::types::Signature::from_str(&sig_hex).expect("Signature decode"); + let check_msg: ethers::types::RecoveryMessage = Data(check_hash.to_fixed_bytes().to_vec()); - let mut hc_err = HcErr{code: 0, message:"".to_string()}; + let mut hc_err = HcErr { + code: 0, + message: "".to_string(), + }; if check_sig.verify(check_msg, ha_owner).is_err() { println!("HC Bad offchain signature"); - hc_err = HcErr{code: 3, message:"HC03: Bad offchain signature".to_string()}; + hc_err = HcErr { + code: 3, + message: "HC03: Bad offchain signature".to_string(), + }; new_op = make_err_op(hc_err.clone(), sub_key, src_addr, nn, oo_nonce, cfg); } - let ent:HcEntry = HcEntry{ sub_key:sub_key, map_key:map_key, user_op:new_op.clone(), ts:SystemTime::now(), oc_gas:U256::zero(), needed_pvg: U256::zero() }; + let ent: HcEntry = HcEntry { + sub_key: sub_key, + map_key: map_key, + user_op: new_op.clone(), + ts: SystemTime::now(), + oc_gas: U256::zero(), + needed_pvg: U256::zero(), + }; HC_MAP.lock().unwrap().insert(op_key, ent); hc_err @@ -311,24 +353,28 @@ fn make_err_op( oo_nonce: U256, cfg: &HcCfg, ) -> UserOperationV0_6 { - - let response_payload:Bytes = AbiEncode::encode((src_addr, nn, err_hc.code, err_hc.message)).into(); - - let call_data = make_err_calldata(cfg.helper_addr, sub_key, Bytes::from(response_payload.to_vec())); + let response_payload: Bytes = + AbiEncode::encode((src_addr, nn, err_hc.code, err_hc.message)).into(); + + let call_data = make_err_calldata( + cfg.helper_addr, + sub_key, + Bytes::from(response_payload.to_vec()), + ); println!("HC err_op call_data {:?}", call_data); - let new_op:UserOperationV0_6 = UserOperationV0_6{ + let new_op: UserOperationV0_6 = UserOperationV0_6 { sender: cfg.sys_account, - nonce: oo_nonce.into(), - init_code: Bytes::new(), - call_data: call_data.clone(), - call_gas_limit:U256::from(0x40000), - verification_gas_limit: U256::from(0x10000), - pre_verification_gas: U256::from(0x10000), - max_fee_per_gas: U256::zero(), - max_priority_fee_per_gas: U256::zero(), - paymaster_and_data: Bytes::new(), - signature: Bytes::new(), + nonce: oo_nonce.into(), + init_code: Bytes::new(), + call_data: call_data.clone(), + call_gas_limit: U256::from(0x40000), + verification_gas_limit: U256::from(0x10000), + pre_verification_gas: U256::from(0x10000), + max_fee_per_gas: U256::zero(), + max_priority_fee_per_gas: U256::zero(), + paymaster_and_data: Bytes::new(), + signature: Bytes::new(), }; new_op @@ -336,8 +382,8 @@ fn make_err_op( /// Encapsulate an error code into a UserOperation pub async fn err_op( - op_key:H256, - entry_point:Address, + op_key: H256, + entry_point: Address, err_hc: HcErr, sub_key: H256, src_addr: Address, @@ -346,10 +392,13 @@ pub async fn err_op( map_key: H256, cfg: &HcCfg, ) { - println!("HC hybrid_compute err_op op_key {:?} err_str {:?}", op_key, err_hc.message); + println!( + "HC hybrid_compute err_op op_key {:?} err_str {:?}", + op_key, err_hc.message + ); assert!(err_hc.code >= 2); let mut new_op = make_err_op(err_hc, sub_key, src_addr, nn, oo_nonce, cfg); - let key_bytes: Bytes = cfg.sys_privkey.as_fixed_bytes().into(); + let key_bytes: Bytes = cfg.sys_privkey.as_fixed_bytes().into(); let wallet = LocalWallet::from_bytes(&key_bytes).unwrap(); let hh = new_op.hash(entry_point, cfg.chain_id); @@ -358,34 +407,37 @@ pub async fn err_op( new_op.signature = signature.as_ref().unwrap().to_vec().into(); println!("HC err_op signed {:?} {:?}", signature, new_op.signature); - let ent:HcEntry = HcEntry{ sub_key:sub_key, map_key:map_key, user_op:new_op.clone(), ts:SystemTime::now(), oc_gas:U256::zero(), needed_pvg:U256::zero()}; + let ent: HcEntry = HcEntry { + sub_key: sub_key, + map_key: map_key, + user_op: new_op.clone(), + ts: SystemTime::now(), + oc_gas: U256::zero(), + needed_pvg: U256::zero(), + }; HC_MAP.lock().unwrap().insert(op_key, ent); } /// Encapsulate a RemoveResponses into a UserOperation -pub async fn rr_op( - cfg: &HcCfg, - oo_nonce: U256, - keys: Vec, -) -> UserOperationV0_6 { +pub async fn rr_op(cfg: &HcCfg, oo_nonce: U256, keys: Vec) -> UserOperationV0_6 { let call_data = make_rr_calldata(keys); println!("HC rr_op call_data {:?}", call_data); - let mut new_op:UserOperationV0_6 = UserOperationV0_6{ + let mut new_op: UserOperationV0_6 = UserOperationV0_6 { sender: cfg.sys_account, - nonce: oo_nonce.into(), - init_code: Bytes::new(), - call_data: call_data.clone(), - call_gas_limit: U256::from(0x6000), - verification_gas_limit: U256::from(0x10000), - pre_verification_gas: U256::from(0x10000), - max_fee_per_gas: U256::zero(), - max_priority_fee_per_gas: U256::zero(), - paymaster_and_data: Bytes::new(), - signature: Bytes::new(), + nonce: oo_nonce.into(), + init_code: Bytes::new(), + call_data: call_data.clone(), + call_gas_limit: U256::from(0x6000), + verification_gas_limit: U256::from(0x10000), + pre_verification_gas: U256::from(0x10000), + max_fee_per_gas: U256::zero(), + max_priority_fee_per_gas: U256::zero(), + paymaster_and_data: Bytes::new(), + signature: Bytes::new(), }; - let key_bytes: Bytes = cfg.sys_privkey.as_fixed_bytes().into(); + let key_bytes: Bytes = cfg.sys_privkey.as_fixed_bytes().into(); let wallet = LocalWallet::from_bytes(&key_bytes).unwrap(); let hh = new_op.hash(cfg.entry_point, cfg.chain_id); @@ -425,7 +477,10 @@ pub fn get_hc_map_key(key: H256) -> H256 { } /// Retrieve a stateDiff object containing the encoded payload -pub fn get_hc_op_statediff(op_hash: H256, mut s2: ethers::types::spoof::State) -> ethers::types::spoof::State { +pub fn get_hc_op_statediff( + op_hash: H256, + mut s2: ethers::types::spoof::State, +) -> ethers::types::spoof::State { if HC_MAP.lock().unwrap().get(&op_hash).is_none() { return s2; } @@ -443,11 +498,11 @@ pub fn get_hc_op_statediff(op_hash: H256, mut s2: ethers::types::spoof::State) - let mut i = 0; while i < payload.len() { - let next_chunk:H256 = H256::from_slice(&payload[i..32+i]); - s2.account(cfg.helper_addr).store(key, next_chunk); - let u_key:U256 = key.into_uint()+1; - key = H256::from_uint(&u_key); - i += 32; + let next_chunk: H256 = H256::from_slice(&payload[i..32 + i]); + s2.account(cfg.helper_addr).store(key, next_chunk); + let u_key: U256 = key.into_uint() + 1; + key = H256::from_uint(&u_key); + i += 32; } s2 } @@ -458,7 +513,14 @@ pub fn hc_set_pvg(key: H256, needed_pvg: U256, oc_gas: U256) { let ent = map.get(&key).unwrap(); //assert!(ent.needed_pvg == U256::zero()); // This is now allowed as an error flag // FIXME - should be a better way to do this. - let new_ent = HcEntry{ sub_key:ent.sub_key, map_key:ent.map_key, user_op:ent.user_op.clone(), ts:ent.ts, needed_pvg:needed_pvg, oc_gas:oc_gas}; + let new_ent = HcEntry { + sub_key: ent.sub_key, + map_key: ent.map_key, + user_op: ent.user_op.clone(), + ts: ent.ts, + needed_pvg: needed_pvg, + oc_gas: oc_gas, + }; map.remove(&key); map.insert(key, new_ent); } @@ -478,7 +540,6 @@ pub fn expire_hc_cache() { map.retain(|_, ent| ent.ts > exp_time); } - #[cfg(test)] mod test { use super::*; @@ -486,27 +547,53 @@ mod test { #[test] fn test_init() { init( - "0x0000000000000000000000000000000000000001".parse::

().unwrap(), - "0x0000000000000000000000000000000000000002".parse::
().unwrap(), - "0x0000000000000000000000000000000000000003".parse::
().unwrap(), - "0x1111111111111111111111111111111111111111111111111111111111111111".parse::().unwrap(), - "0x0000000000000000000000000000000000000004".parse::
().unwrap(), + "0x0000000000000000000000000000000000000001" + .parse::
() + .unwrap(), + "0x0000000000000000000000000000000000000002" + .parse::
() + .unwrap(), + "0x0000000000000000000000000000000000000003" + .parse::
() + .unwrap(), + "0x1111111111111111111111111111111111111111111111111111111111111111" + .parse::() + .unwrap(), + "0x0000000000000000000000000000000000000004" + .parse::
() + .unwrap(), 123, "http://test.local/rpc".to_string(), ); - set_signer("0x0000000000000000000000000000000000000005".parse::
().unwrap()); - - let expected:HcCfg = HcCfg { - helper_addr: "0x0000000000000000000000000000000000000001".parse::
().unwrap(), - sys_account: "0x0000000000000000000000000000000000000002".parse::
().unwrap(), - sys_owner: "0x0000000000000000000000000000000000000003".parse::
().unwrap(), - sys_privkey: "0x1111111111111111111111111111111111111111111111111111111111111111".parse::().unwrap(), - entry_point: "0x0000000000000000000000000000000000000004".parse::
().unwrap(), - chain_id: 123, - node_http: "http://test.local/rpc".to_string(), - from_addr: "0x0000000000000000000000000000000000000005".parse::
().unwrap(), + set_signer( + "0x0000000000000000000000000000000000000005" + .parse::
() + .unwrap(), + ); + + let expected: HcCfg = HcCfg { + helper_addr: "0x0000000000000000000000000000000000000001" + .parse::
() + .unwrap(), + sys_account: "0x0000000000000000000000000000000000000002" + .parse::
() + .unwrap(), + sys_owner: "0x0000000000000000000000000000000000000003" + .parse::
() + .unwrap(), + sys_privkey: "0x1111111111111111111111111111111111111111111111111111111111111111" + .parse::() + .unwrap(), + entry_point: "0x0000000000000000000000000000000000000004" + .parse::
() + .unwrap(), + chain_id: 123, + node_http: "http://test.local/rpc".to_string(), + from_addr: "0x0000000000000000000000000000000000000005" + .parse::
() + .unwrap(), }; - let cfg:HcCfg = HC_CONFIG.lock().unwrap().clone(); + let cfg: HcCfg = HC_CONFIG.lock().unwrap().clone(); assert_eq!(expected, cfg); } @@ -523,29 +610,37 @@ mod test { #[test] fn test_req_parse() { let rev_data = "0x5f48435f545249479c6df0d4c9d8f527221b59c66ad5279c16a1dbc221e8f4e33617575840a20013d516f1be1937bb52bbd7d525d996fd557d3d597f97e0d7ba00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001".parse::().unwrap(); - let e_map_key = "0xa12faae2eedc0b231c96ab3c88c0b7e1e5dbc6fd02c462e79751c1eff7484efb".parse::().unwrap(); - let e_sub_key = "0x16d7f606293dca5dbbe97735b2913e6dade6e3f216310b12148cb67a6fd86947".parse::().unwrap(); - let e_ep_addr = "0x9c6df0d4c9d8f527221b59c66ad5279c16a1dbc2".parse::
().unwrap(); + let e_map_key = "0xa12faae2eedc0b231c96ab3c88c0b7e1e5dbc6fd02c462e79751c1eff7484efb" + .parse::() + .unwrap(); + let e_sub_key = "0x16d7f606293dca5dbbe97735b2913e6dade6e3f216310b12148cb67a6fd86947" + .parse::() + .unwrap(); + let e_ep_addr = "0x9c6df0d4c9d8f527221b59c66ad5279c16a1dbc2" + .parse::
() + .unwrap(); let e_sel = [151, 224, 215, 186]; let e_payload = "0x00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001".parse::().unwrap(); - let map_key = hc_map_key(&rev_data); - assert_eq!(e_map_key, map_key); - let sub_key = hc_sub_key(&rev_data); - assert_eq!(e_sub_key, sub_key); - let ep_addr = hc_ep_addr(&rev_data); - assert_eq!(e_ep_addr, ep_addr); - let sel = hc_selector(&rev_data); - assert_eq!(e_sel, sel); - let payload = hc_req_payload(&rev_data); - assert_eq!(e_payload, payload); + let map_key = hc_map_key(&rev_data); + assert_eq!(e_map_key, map_key); + let sub_key = hc_sub_key(&rev_data); + assert_eq!(e_sub_key, sub_key); + let ep_addr = hc_ep_addr(&rev_data); + assert_eq!(e_ep_addr, ep_addr); + let sel = hc_selector(&rev_data); + assert_eq!(e_sel, sel); + let payload = hc_req_payload(&rev_data); + assert_eq!(e_payload, payload); } #[test] fn test_op_gen_external() { - let cfg:HcCfg = HC_CONFIG.lock().unwrap().clone(); + let cfg: HcCfg = HC_CONFIG.lock().unwrap().clone(); - let payload = "0x0000000000000000000000000000000000000000000000000000000000000002".parse::().unwrap(); + let payload = "0x0000000000000000000000000000000000000000000000000000000000000002" + .parse::() + .unwrap(); let op = make_external_op( "0x1000000000000000000000000000000000000001".parse::
().unwrap(), U256::from(100), @@ -578,28 +673,49 @@ mod test { #[test] fn test_op_gen_error() { let cfg = HcCfg { - helper_addr: "0x0000000000000000000000000000000000000001".parse::
().unwrap(), - sys_account: "0x0000000000000000000000000000000000000002".parse::
().unwrap(), - sys_owner: "0x0000000000000000000000000000000000000003".parse::
().unwrap(), - sys_privkey: "0x1111111111111111111111111111111111111111111111111111111111111111".parse::().unwrap(), - entry_point: "0x0000000000000000000000000000000000000004".parse::
().unwrap(), - chain_id: 123, - node_http: "http://test.local/rpc".to_string(), - from_addr: "0x0000000000000000000000000000000000000005".parse::
().unwrap(), + helper_addr: "0x0000000000000000000000000000000000000001" + .parse::
() + .unwrap(), + sys_account: "0x0000000000000000000000000000000000000002" + .parse::
() + .unwrap(), + sys_owner: "0x0000000000000000000000000000000000000003" + .parse::
() + .unwrap(), + sys_privkey: "0x1111111111111111111111111111111111111111111111111111111111111111" + .parse::() + .unwrap(), + entry_point: "0x0000000000000000000000000000000000000004" + .parse::
() + .unwrap(), + chain_id: 123, + node_http: "http://test.local/rpc".to_string(), + from_addr: "0x0000000000000000000000000000000000000005" + .parse::
() + .unwrap(), }; let op = make_err_op( - HcErr{code:4, message:"unit test".to_string()}, - "0x2222222222222222222222222222222222222222222222222222222222222222".parse::().unwrap(), - "0x2000000000000000000000000000000000000002".parse::
().unwrap(), + HcErr { + code: 4, + message: "unit test".to_string(), + }, + "0x2222222222222222222222222222222222222222222222222222222222222222" + .parse::() + .unwrap(), + "0x2000000000000000000000000000000000000002" + .parse::
() + .unwrap(), U256::from(100), U256::from(222), &cfg, ); let e_calldata = "0xb61d27f60000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000124fde89b642222222222222222222222222222222222222222222222222222222222222222000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000020000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000064000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000009756e69742074657374000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000".parse::().unwrap(); - let expected:UserOperationV0_6 = UserOperationV0_6{ - sender: "0x0000000000000000000000000000000000000002".parse::
().unwrap(), + let expected: UserOperationV0_6 = UserOperationV0_6 { + sender: "0x0000000000000000000000000000000000000002" + .parse::
() + .unwrap(), nonce: U256::from(222), init_code: Bytes::new(), call_data: e_calldata, diff --git a/crates/types/src/user_operation/v0_6.rs b/crates/types/src/user_operation/v0_6.rs index 0a8cee1a..8a502118 100644 --- a/crates/types/src/user_operation/v0_6.rs +++ b/crates/types/src/user_operation/v0_6.rs @@ -62,11 +62,11 @@ impl UserOperationTrait for UserOperation { ])) .into() } - + fn hc_hash(&self) -> H256 { - keccak256(encode(&[ - Token::FixedBytes(keccak256(self.pack_for_hc_hash()).to_vec()), - ])) + keccak256(encode(&[Token::FixedBytes( + keccak256(self.pack_for_hc_hash()).to_vec(), + )])) .into() } diff --git a/crates/types/src/user_operation/v0_7.rs b/crates/types/src/user_operation/v0_7.rs index 0e160010..d9bf8b86 100644 --- a/crates/types/src/user_operation/v0_7.rs +++ b/crates/types/src/user_operation/v0_7.rs @@ -111,7 +111,7 @@ impl UserOperationTrait for UserOperation { } fn hc_hash(&self) -> H256 { - H256::zero() // Not yet implemented + H256::zero() // Not yet implemented } fn id(&self) -> UserOperationId { From 16412c3ecabfd7e678456591379e4188157b91f7 Mon Sep 17 00:00:00 2001 From: Michael Montour Date: Fri, 27 Sep 2024 13:45:32 -0700 Subject: [PATCH 07/13] More "cargo fmt" fixes. Changes to be committed: modified: bin/rundler/src/cli/mod.rs modified: crates/builder/src/bundle_proposer.rs modified: crates/builder/src/bundle_sender.rs modified: crates/builder/src/task.rs modified: crates/provider/src/ethers/entry_point/v0_6.rs modified: crates/rpc/src/eth/api.rs modified: crates/sim/src/estimation/mod.rs modified: crates/sim/src/simulation/v0_6/tracer.rs modified: crates/types/src/hybrid_compute.rs --- bin/rundler/src/cli/mod.rs | 3 +-- crates/builder/src/bundle_proposer.rs | 2 +- crates/builder/src/bundle_sender.rs | 2 +- crates/builder/src/task.rs | 4 +--- .../provider/src/ethers/entry_point/v0_6.rs | 3 +-- crates/rpc/src/eth/api.rs | 24 +++++++++---------- crates/sim/src/estimation/mod.rs | 3 +-- crates/sim/src/simulation/v0_6/tracer.rs | 4 +--- crates/types/src/hybrid_compute.rs | 16 +++++++------ 9 files changed, 28 insertions(+), 33 deletions(-) diff --git a/bin/rundler/src/cli/mod.rs b/bin/rundler/src/cli/mod.rs index 2dadaf27..1564f84c 100644 --- a/bin/rundler/src/cli/mod.rs +++ b/bin/rundler/src/cli/mod.rs @@ -24,6 +24,7 @@ mod rpc; mod tracing; use builder::BuilderCliArgs; +use ethers::types::{Address, H256}; use node::NodeCliArgs; use pool::PoolCliArgs; use rpc::RpcCliArgs; @@ -31,8 +32,6 @@ use rundler_rpc::{EthApiSettings, RundlerApiSettings}; use rundler_sim::{ EstimationSettings, PrecheckSettings, PriorityFeeMode, SimulationSettings, MIN_CALL_GAS_LIMIT, }; - -use ethers::types::{Address, H256}; use rundler_types::hybrid_compute; /// Main entry point for the CLI diff --git a/crates/builder/src/bundle_proposer.rs b/crates/builder/src/bundle_proposer.rs index 51898bbb..251a7b41 100644 --- a/crates/builder/src/bundle_proposer.rs +++ b/crates/builder/src/bundle_proposer.rs @@ -37,6 +37,7 @@ use rundler_sim::{ }; use rundler_types::{ chain::ChainSpec, + hybrid_compute, pool::{Pool, PoolOperation, SimulationViolation}, Entity, EntityInfo, EntityInfos, EntityType, EntityUpdate, EntityUpdateType, GasFees, Timestamp, UserOperation, UserOperationVariant, UserOpsPerAggregator, BUNDLE_BYTE_OVERHEAD, @@ -47,7 +48,6 @@ use tokio::{sync::broadcast, try_join}; use tracing::{error, info, warn}; use crate::emit::{BuilderEvent, ConditionNotMetReason, OpRejectionReason, SkipReason}; -use rundler_types::hybrid_compute; /// Extra buffer percent to add on the bundle transaction gas estimate to be sure it will be enough const BUNDLE_TRANSACTION_GAS_OVERHEAD_PERCENT: u64 = 5; diff --git a/crates/builder/src/bundle_sender.rs b/crates/builder/src/bundle_sender.rs index 424bbfac..5ec47b7f 100644 --- a/crates/builder/src/bundle_sender.rs +++ b/crates/builder/src/bundle_sender.rs @@ -21,10 +21,10 @@ use futures_util::StreamExt; use mockall::automock; use rundler_provider::{BundleHandler, EntryPoint}; use rundler_sim::ExpectedStorage; -use rundler_types::hybrid_compute; use rundler_types::{ builder::BundlingMode, chain::ChainSpec, + hybrid_compute, pool::{NewHead, Pool}, EntityUpdate, UserOperation, }; diff --git a/crates/builder/src/task.rs b/crates/builder/src/task.rs index 92d02772..3a8a7197 100644 --- a/crates/builder/src/task.rs +++ b/crates/builder/src/task.rs @@ -29,7 +29,7 @@ use rundler_sim::{ }; use rundler_task::Task; use rundler_types::{ - chain::ChainSpec, pool::Pool, v0_6, v0_7, EntryPointVersion, UserOperation, + chain::ChainSpec, hybrid_compute, pool::Pool, v0_6, v0_7, EntryPointVersion, UserOperation, UserOperationVariant, }; use rundler_utils::{emit::WithEntryPoint, handle}; @@ -52,8 +52,6 @@ use crate::{ transaction_tracker::{self, TransactionTrackerImpl}, }; -use rundler_types::hybrid_compute; - /// Builder task arguments #[derive(Debug)] pub struct Args { diff --git a/crates/provider/src/ethers/entry_point/v0_6.rs b/crates/provider/src/ethers/entry_point/v0_6.rs index 6ebb9fb3..8ad55bbe 100644 --- a/crates/provider/src/ethers/entry_point/v0_6.rs +++ b/crates/provider/src/ethers/entry_point/v0_6.rs @@ -23,8 +23,6 @@ use ethers::{ H256, U256, }, }; -use rundler_types::hybrid_compute; - use rundler_types::{ chain::ChainSpec, contracts::v0_6::{ @@ -36,6 +34,7 @@ use rundler_types::{ UserOpsPerAggregator as UserOpsPerAggregatorV0_6, }, }, + hybrid_compute, v0_6::UserOperation, GasFees, UserOpsPerAggregator, ValidationError, ValidationOutput, ValidationRevert, }; diff --git a/crates/rpc/src/eth/api.rs b/crates/rpc/src/eth/api.rs index 8bd863c2..2fd36f05 100644 --- a/crates/rpc/src/eth/api.rs +++ b/crates/rpc/src/eth/api.rs @@ -14,14 +14,20 @@ use std::{collections::HashMap, future::Future, pin::Pin}; use ethers::{ - types::{spoof, Address, H256, U64}, + types::{spoof, Address, Bytes, H256, U256, U64}, utils::{hex, to_checksum}, }; use futures_util::future; +use jsonrpsee::{ + core::{client::ClientT, params::ObjectParams, JsonValue}, + http_client::HttpClientBuilder, +}; use rundler_types::{ - chain::ChainSpec, contracts::v0_6::hc_helper::HCHelper as HH2, - contracts::v0_6::simple_account::SimpleAccount, pool::Pool, UserOperation, - UserOperationOptionalGas, UserOperationVariant, + chain::ChainSpec, + contracts::v0_6::{hc_helper::HCHelper as HH2, simple_account::SimpleAccount}, + hybrid_compute, + pool::Pool, + UserOperation, UserOperationOptionalGas, UserOperationVariant, }; use rundler_utils::log::LogOnError; use tracing::Level; @@ -30,15 +36,9 @@ use super::{ error::{EthResult, EthRpcError}, router::EntryPointRouter, }; -use crate::types::{RpcGasEstimate, RpcUserOperationByHash, RpcUserOperationReceipt}; - -use crate::types::RpcGasEstimateV0_6; -use ethers::types::{Bytes, U256}; -use jsonrpsee::{ - core::{client::ClientT, params::ObjectParams, JsonValue}, - http_client::HttpClientBuilder, +use crate::types::{ + RpcGasEstimate, RpcGasEstimateV0_6, RpcUserOperationByHash, RpcUserOperationReceipt, }; -use rundler_types::hybrid_compute; /// Settings for the `eth_` API #[derive(Copy, Clone, Debug)] diff --git a/crates/sim/src/estimation/mod.rs b/crates/sim/src/estimation/mod.rs index c5163e2e..64808dd6 100644 --- a/crates/sim/src/estimation/mod.rs +++ b/crates/sim/src/estimation/mod.rs @@ -11,13 +11,12 @@ // You should have received a copy of the GNU General Public License along with Rundler. // If not, see https://www.gnu.org/licenses/. -use ethers::types::{Bytes, U128}; +use ethers::types::{Bytes, U128, U256}; #[cfg(feature = "test-utils")] use mockall::automock; use rundler_types::{GasEstimate, ValidationRevert}; use crate::precheck::MIN_CALL_GAS_LIMIT; -use ethers::types::U256; mod estimate_verification_gas; pub use estimate_verification_gas::{VerificationGasEstimator, VerificationGasEstimatorImpl}; diff --git a/crates/sim/src/simulation/v0_6/tracer.rs b/crates/sim/src/simulation/v0_6/tracer.rs index f1cf77c8..70706e36 100644 --- a/crates/sim/src/simulation/v0_6/tracer.rs +++ b/crates/sim/src/simulation/v0_6/tracer.rs @@ -18,12 +18,10 @@ use ethers::types::{ BlockId, GethDebugTracerType, GethDebugTracingCallOptions, GethDebugTracingOptions, GethTrace, }; use rundler_provider::{Provider, SimulationProvider}; -use rundler_types::v0_6::UserOperation; -use rundler_types::UserOperation as UserOperation2; +use rundler_types::{hybrid_compute, v0_6::UserOperation, UserOperation as UserOperation2}; use serde::Deserialize; use crate::simulation::context::TracerOutput; -use rundler_types::hybrid_compute; impl TryFrom for TracerOutput { type Error = anyhow::Error; diff --git a/crates/types/src/hybrid_compute.rs b/crates/types/src/hybrid_compute.rs index d1edfc69..abe87887 100644 --- a/crates/types/src/hybrid_compute.rs +++ b/crates/types/src/hybrid_compute.rs @@ -11,20 +11,22 @@ ERR_CONNECT = 6 Unable to connect to RPC server (incl. 500-class HTTP error). Considered to be a temporary failure. */ +use std::{ + collections::HashMap, + str::FromStr, + sync::Mutex, + time::{Duration, SystemTime}, +}; + use ethers::{ abi::{AbiDecode, AbiEncode}, signers::{LocalWallet, Signer}, types::{Address, BigEndianHash, Bytes, RecoveryMessage::Data, H256, U256}, utils::keccak256, }; - -use crate::user_operation::UserOperation; -use crate::v0_6::UserOperation as UserOperationV0_6; - -use std::{collections::HashMap, str::FromStr, sync::Mutex}; - use once_cell::sync::Lazy; -use std::time::{Duration, SystemTime}; + +use crate::{user_operation::UserOperation, v0_6::UserOperation as UserOperationV0_6}; #[derive(Clone, Debug)] /// Error code From ceae77e75ccbe0103d4b35823e7234719973509d Mon Sep 17 00:00:00 2001 From: Michael Montour Date: Fri, 27 Sep 2024 14:26:38 -0700 Subject: [PATCH 08/13] Some "cargo clippy" fixes. Changes to be committed: modified: crates/builder/src/bundle_proposer.rs modified: crates/rpc/src/eth/api.rs modified: crates/rpc/src/eth/router.rs modified: crates/types/src/hybrid_compute.rs --- crates/builder/src/bundle_proposer.rs | 4 +-- crates/rpc/src/eth/api.rs | 45 ++++++++++----------------- crates/rpc/src/eth/router.rs | 4 +-- crates/types/src/hybrid_compute.rs | 30 +++++++++--------- 4 files changed, 37 insertions(+), 46 deletions(-) diff --git a/crates/builder/src/bundle_proposer.rs b/crates/builder/src/bundle_proposer.rs index 251a7b41..25519364 100644 --- a/crates/builder/src/bundle_proposer.rs +++ b/crates/builder/src/bundle_proposer.rs @@ -259,7 +259,7 @@ where .into_iter() .flatten() .collect::>(); - if ops_with_simulations.len() > 0 { + if !ops_with_simulations.is_empty() { println!( "HC bundle_proposer before assemble_context len {:?}", ops_with_simulations.len() @@ -671,7 +671,7 @@ where }); } - if cleanup_keys.len() > 0 { + if !cleanup_keys.is_empty() { println!("HC cleanup_keys {:?}", cleanup_keys); let cfg = hybrid_compute::HC_CONFIG.lock().unwrap().clone(); let c_nonce = self diff --git a/crates/rpc/src/eth/api.rs b/crates/rpc/src/eth/api.rs index 2fd36f05..12466dcc 100644 --- a/crates/rpc/src/eth/api.rs +++ b/crates/rpc/src/eth/api.rs @@ -130,13 +130,10 @@ where .await; println!("HC result_v {:?}", result_v); - match result_v { - Err(EthRpcError::ExecutionReverted(ref msg)) => { - if *msg == "_HC_VRFY".to_string() { - return true; - } + if let Err(EthRpcError::ExecutionReverted(ref msg)) = result_v { + if *msg == "_HC_VRFY" { + return true; } - _ => {} } false @@ -394,21 +391,18 @@ where if let Err(EthRpcError::ExecutionReverted(ref r2_err)) = r2a { // FIXME println!("HC op_tmp_2 gas estimation failed (RevertInValidation)"); - let msg = "HC04: Offchain validation failed: ".to_string() + &r2_err; + let msg = "HC04: Offchain validation failed: ".to_string() + r2_err; return Err(EthRpcError::Internal(anyhow::anyhow!(msg))); }; - let r2: RpcGasEstimateV0_6; - match r2a? { - RpcGasEstimate::V0_6(abc) => { - r2 = abc; - } + let r2: RpcGasEstimateV0_6 = match r2a? { + RpcGasEstimate::V0_6(estimate) => estimate, _ => { return Err(EthRpcError::Internal(anyhow::anyhow!( "HC04 offchain_op gas estimation failed" ))); } - } + }; // The current formula used to estimate gas usage in the offchain_rpc service // sometimes underestimates the true cost. For now all we can do is error here. @@ -421,8 +415,7 @@ where let offchain_gas = r2.pre_verification_gas + r2.verification_gas_limit + r2.call_gas_limit; - let mut cleanup_keys: Vec = Vec::new(); - cleanup_keys.push(map_key); + let cleanup_keys: Vec = vec![map_key]; let c_nonce = self .router .get_nonce(&entry_point, self.hc.sys_account, U256::zero()) @@ -453,17 +446,14 @@ where at_price, ) .await; - let r4: RpcGasEstimateV0_6; - match r4a? { - RpcGasEstimate::V0_6(abc) => { - r4 = abc; - } + let r4: RpcGasEstimateV0_6 = match r4a? { + RpcGasEstimate::V0_6(estimate) => estimate, _ => { return Err(EthRpcError::Internal(anyhow::anyhow!( "HC04 cleanup_op gas estimation failed" ))); } - } + }; let cleanup_gas = r4.pre_verification_gas + r4.verification_gas_limit + r4.call_gas_limit; @@ -498,14 +488,14 @@ where return Err(EthRpcError::Internal(anyhow::anyhow!(err_msg))); } - return Ok(RpcGasEstimateV0_6 { + Ok(RpcGasEstimateV0_6 { pre_verification_gas: (needed_pvg + PVG_PAD), verification_gas_limit: r3.verification_gas_limit, call_gas_limit: r3.call_gas_limit, } - .into()); + .into()) } else { - return result2; + result2 } } @@ -530,8 +520,8 @@ where println!("HC api.rs estimate_gas result1 {:?}", result); match result { - Ok(ref estimate) => match estimate { - RpcGasEstimate::V0_6(estimate6) => { + Ok(ref estimate) => { + if let RpcGasEstimate::V0_6(estimate6) = estimate { return Ok(RpcGasEstimateV0_6 { pre_verification_gas: estimate6.pre_verification_gas, verification_gas_limit: estimate6.verification_gas_limit + VG_PAD, @@ -539,8 +529,7 @@ where } .into()); } - _ => {} - }, + } Err(EthRpcError::ExecutionRevertedWithBytes(ref r)) => { if hybrid_compute::check_trigger(&r.revert_data) { let bn = 0; //self.provider.get_block_number().await.unwrap(); diff --git a/crates/rpc/src/eth/router.rs b/crates/rpc/src/eth/router.rs index 1e643075..d2a7146e 100644 --- a/crates/rpc/src/eth/router.rs +++ b/crates/rpc/src/eth/router.rs @@ -325,8 +325,8 @@ where async fn get_nonce(&self, addr: Address, key: U256) -> anyhow::Result { let output = self.entry_point.get_nonce(addr, key).await; - if output.is_ok() { - return Ok(output.unwrap()); + if let Ok(nonce) = output { + return Ok(nonce); } Err(anyhow::anyhow!("get_nonce() failed")) } diff --git a/crates/types/src/hybrid_compute.rs b/crates/types/src/hybrid_compute.rs index abe87887..9f5ef87a 100644 --- a/crates/types/src/hybrid_compute.rs +++ b/crates/types/src/hybrid_compute.rs @@ -133,7 +133,7 @@ pub fn init( cfg.sys_privkey = sys_privkey; cfg.entry_point = entry_point; cfg.chain_id = chain_id; - cfg.node_http = node_http.clone(); + cfg.node_http.clone_from(&node_http); } /// Set the EOA address which the bundler is using. Erigon, but not geth, needs this for tx simulation @@ -189,11 +189,11 @@ pub fn check_trigger(rev: &Bytes) -> bool { println!("HC trigger check in {:?}", rev); const TRIGGER: [u8; 8] = [0x5f, 0x48, 0x43, 0x5f, 0x54, 0x52, 0x49, 0x47]; - if rev.len() >= MIN_REQ_LEN && &rev[0..8] == TRIGGER { + if rev.len() >= MIN_REQ_LEN && rev[0..8] == TRIGGER { println!("HC HC triggered"); return true; } - return false; + false } /// Key used to store response in the HCHelper mapping @@ -221,8 +221,7 @@ pub fn hc_sub_key(revert_data: &Bytes) -> H256 { /// Endpoint address (address of HybridAccount which called HCHelper) pub fn hc_ep_addr(revert_data: &Bytes) -> Address { - let ep_addr = Address::from_slice(&revert_data[8..28]); - ep_addr + Address::from_slice(&revert_data[8..28]) } /// Extract the function selector called by the HC operation @@ -237,6 +236,7 @@ pub fn hc_req_payload(revert_data: &Bytes) -> Vec { } /// Internal function to generate a UserOperation for an offchain response +#[allow(clippy::too_many_arguments)] // FIXME later fn make_external_op( src_addr: Address, nonce: U256, @@ -270,7 +270,7 @@ fn make_external_op( let mut new_op: UserOperationV0_6 = UserOperationV0_6 { sender: ep_addr, - nonce: oo_nonce.into(), + nonce: oo_nonce, init_code: Bytes::new(), call_data: call_data.clone(), call_gas_limit: U256::from(call_gas), @@ -288,6 +288,7 @@ fn make_external_op( } /// Processes an external hybrid compute op. +#[allow(clippy::too_many_arguments)] // FIXME later pub async fn external_op( op_key: H256, src_addr: Address, @@ -335,8 +336,8 @@ pub async fn external_op( } let ent: HcEntry = HcEntry { - sub_key: sub_key, - map_key: map_key, + sub_key, + map_key, user_op: new_op.clone(), ts: SystemTime::now(), oc_gas: U256::zero(), @@ -367,7 +368,7 @@ fn make_err_op( let new_op: UserOperationV0_6 = UserOperationV0_6 { sender: cfg.sys_account, - nonce: oo_nonce.into(), + nonce: oo_nonce, init_code: Bytes::new(), call_data: call_data.clone(), call_gas_limit: U256::from(0x40000), @@ -383,6 +384,7 @@ fn make_err_op( } /// Encapsulate an error code into a UserOperation +#[allow(clippy::too_many_arguments)] // FIXME later pub async fn err_op( op_key: H256, entry_point: Address, @@ -410,8 +412,8 @@ pub async fn err_op( println!("HC err_op signed {:?} {:?}", signature, new_op.signature); let ent: HcEntry = HcEntry { - sub_key: sub_key, - map_key: map_key, + sub_key, + map_key, user_op: new_op.clone(), ts: SystemTime::now(), oc_gas: U256::zero(), @@ -427,7 +429,7 @@ pub async fn rr_op(cfg: &HcCfg, oo_nonce: U256, keys: Vec) -> UserOperatio let mut new_op: UserOperationV0_6 = UserOperationV0_6 { sender: cfg.sys_account, - nonce: oo_nonce.into(), + nonce: oo_nonce, init_code: Bytes::new(), call_data: call_data.clone(), call_gas_limit: U256::from(0x6000), @@ -520,8 +522,8 @@ pub fn hc_set_pvg(key: H256, needed_pvg: U256, oc_gas: U256) { map_key: ent.map_key, user_op: ent.user_op.clone(), ts: ent.ts, - needed_pvg: needed_pvg, - oc_gas: oc_gas, + needed_pvg, + oc_gas, }; map.remove(&key); map.insert(key, new_ent); From 729f7efbd8d39fbe2c70947a4ae56cae85cd0092 Mon Sep 17 00:00:00 2001 From: Michael Montour Date: Fri, 27 Sep 2024 14:46:20 -0700 Subject: [PATCH 09/13] More "clippy" fixes. Changes to be committed: modified: crates/types/src/hybrid_compute.rs --- crates/types/src/hybrid_compute.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/types/src/hybrid_compute.rs b/crates/types/src/hybrid_compute.rs index 9f5ef87a..6129ff6a 100644 --- a/crates/types/src/hybrid_compute.rs +++ b/crates/types/src/hybrid_compute.rs @@ -606,9 +606,9 @@ mod test { let t_no = "0x5f41415f545249479c6df0d4c9d8f527221b59c66ad5279c16a1dbc221e8f4e33617575840a20013d516f1be1937bb52bbd7d525d996fd557d3d597f97e0d7ba00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001".parse::().unwrap(); let t_yes = "0x5f48435f545249479c6df0d4c9d8f527221b59c66ad5279c16a1dbc221e8f4e33617575840a20013d516f1be1937bb52bbd7d525d996fd557d3d597f97e0d7ba00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001".parse::().unwrap(); let t_short = "0x5f48435f545249479c6df0d4c9d8f527221b59c66ad5279c16a1dbc221e8f4e33617575840a20013d516f1be1937bb52bbd7d525d996fd557d3d597f97e0d7".parse::().unwrap(); - assert_eq!(check_trigger(&t_no), false); - assert_eq!(check_trigger(&t_yes), true); - assert_eq!(check_trigger(&t_short), false); + assert!(!check_trigger(&t_no)); + assert!(check_trigger(&t_yes)); + assert!(!check_trigger(&t_short)); } #[test] From 4135df6b82759dd2f11594b667b417c2270ec220 Mon Sep 17 00:00:00 2001 From: Michael Montour Date: Fri, 27 Sep 2024 19:20:45 -0700 Subject: [PATCH 10/13] fix: remove actions-rs to resolve deprecation warnings. Changes to be committed: modified: .github/workflows/ci.yaml --- .github/workflows/ci.yaml | 29 ++++++++++++----------------- 1 file changed, 12 insertions(+), 17 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 161ce127..820d5401 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -5,6 +5,9 @@ on: - main pull_request: +env: + GH_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }} + name: ci jobs: lint: @@ -32,29 +35,21 @@ jobs: # Build to generate the ABI bindings. - name: cargo build - uses: actions-rs/cargo@v1 - with: - command: build - args: --all --all-features + run: | + cargo build --all --all-features - name: cargo check - uses: actions-rs/cargo@v1 - with: - command: check - args: --all --all-features + run: | + cargo check --all --all-features - name: cargo +nightly fmt - uses: actions-rs/cargo@v1 - with: - command: fmt - toolchain: nightly - args: --all --check + run: | + cargo +nightly fmt --all --check - name: cargo clippy - uses: actions-rs/clippy-check@v1 - with: - token: ${{ secrets.GITHUB_TOKEN }} - args: --all --all-features --tests -- -D warnings + run: | + cargo clippy --all --all-features --tests -- \ + -D warnings - name: buf setup uses: bufbuild/buf-setup-action@v1 From 84cde4a732fc37d578b77cdcf4839745f3048e30 Mon Sep 17 00:00:00 2001 From: Michael Montour Date: Fri, 27 Sep 2024 19:50:38 -0700 Subject: [PATCH 11/13] fix: point to a bobanetwork bundler-test-executor repo Changes to be committed: modified: .github/workflows/compliance.yaml --- .github/workflows/compliance.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/compliance.yaml b/.github/workflows/compliance.yaml index f0e58712..f7db090a 100644 --- a/.github/workflows/compliance.yaml +++ b/.github/workflows/compliance.yaml @@ -24,8 +24,8 @@ jobs: uses: actions/checkout@v4 with: path: bundler-test-executors - repository: alchemyplatform/bundler-test-executor - ref: releases/v0.6 + repository: bobanetwork/bundler-test-executor + ref: hc-dev - name: Build rundler image locally run: docker buildx build ./rundler -t alchemyplatform/rundler:latest From d65cc10bfe18ba3f937b74105bd8f8288bde370c Mon Sep 17 00:00:00 2001 From: Michael Montour Date: Mon, 30 Sep 2024 11:12:11 -0700 Subject: [PATCH 12/13] fix: Docker containers were broken. Changes to be committed: modified: hybrid-compute/Dockerfile.offchain-rpc modified: hybrid-compute/docker-compose.yml --- hybrid-compute/Dockerfile.offchain-rpc | 3 ++- hybrid-compute/docker-compose.yml | 5 ++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/hybrid-compute/Dockerfile.offchain-rpc b/hybrid-compute/Dockerfile.offchain-rpc index e1197eec..26a4f5ef 100644 --- a/hybrid-compute/Dockerfile.offchain-rpc +++ b/hybrid-compute/Dockerfile.offchain-rpc @@ -1,6 +1,7 @@ FROM python:3.8-slim RUN apt update RUN apt install -y wamerican git -RUN pip3 install --default-timeout=100 web3 git+https://github.com/bobanetwork/jsonrpclib.git redis python-dotenv +RUN pip3 install --default-timeout=100 web3 git+https://github.com/bobanetwork/jsonrpclib.git jsonrpcclient redis python-dotenv COPY ./offchain / +COPY ./aa_utils /aa_utils CMD [ "python", "-u", "./offchain.py" ] diff --git a/hybrid-compute/docker-compose.yml b/hybrid-compute/docker-compose.yml index 28f3df12..27b0cdc9 100644 --- a/hybrid-compute/docker-compose.yml +++ b/hybrid-compute/docker-compose.yml @@ -24,13 +24,16 @@ services: HC_SYS_ACCOUNT: ${HC_SYS_ACCOUNT} # Deployed contract address HC_SYS_OWNER: ${HC_SYS_OWNER} # Owner/Signer for HC_SYS_ACCOUNT HC_SYS_PRIVKEY: ${HC_SYS_PRIVKEY} # Owner/Signer for HC_SYS_ACCOUNT - ENTRY_POINTS: ${ENTRY_POINTS} # Deployed contract address BUILDER_PRIVATE_KEY: ${BUILDER_PRIVKEY} # Key for bundler EOA NODE_HTTP: ${NODE_HTTP} # Replica/Sequencer URL CHAIN_ID: ${CHAIN_ID} # Chain ID RPC_PORT: 3300 # RPC port to submit into Bundler ports: - 3300:3300 + command: > + node + --disable_entry_point_v0_7 + --builder.dropped_status_unsupported offchain-rpc: build: From b87ce6953a1658b43addbd87c31961fe415fa620 Mon Sep 17 00:00:00 2001 From: Michael Montour Date: Tue, 1 Oct 2024 15:11:59 -0700 Subject: [PATCH 13/13] fix: reverted an unneeded change in the AA Account contracts Changes to be committed: modified: crates/types/contracts/lib/account-abstraction-versions/v0_6 --- crates/types/contracts/lib/account-abstraction-versions/v0_6 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/types/contracts/lib/account-abstraction-versions/v0_6 b/crates/types/contracts/lib/account-abstraction-versions/v0_6 index 46a3d4b7..c79c0f59 160000 --- a/crates/types/contracts/lib/account-abstraction-versions/v0_6 +++ b/crates/types/contracts/lib/account-abstraction-versions/v0_6 @@ -1 +1 @@ -Subproject commit 46a3d4b75f448a92687b4c7102e525705d64b754 +Subproject commit c79c0f5910d2db18cff494883e38e3c2a9d2a6b1