From d9bc404641ce135d4959827b874a60c6cf7c7002 Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Thu, 6 May 2021 18:58:39 +0200 Subject: [PATCH] Verkle tree-based state with overlay transition Squash the main verkle PR ahead of rebase don't call Bytes() in GetTreeKey (#137) trie: avoid endianness conversion in GetTreeKey (#140) * trie/utils: add concrete expected value in trie key generation test Signed-off-by: Ignacio Hagopian * mod: update to latest go-verkle Signed-off-by: Ignacio Hagopian * trie/utils: avoid endianness conversions Signed-off-by: Ignacio Hagopian * apply review changes & update to official go-verkle version Signed-off-by: Ignacio Hagopian Signed-off-by: Ignacio Hagopian upgrade go-verkle to CoW version and get TestProcessVerkle to build (#138) updating ci to use self-hosted machine (#143) fix: storage offset in non-header group + reuse of value buffer (#145) dedup call to ChunkifyCode, same as replay branch (#156) * dedup call to ChunkifyCode, same as replay branch * fix some linter issues fix code offset in tree update (#157) fix REVERT in state processor test execution (#158) * fix code offset in tree update * fix REVERT in test execution save on key hashing: lump code size update with first code chunk group (#159) fix code chunk key calculation and storage key calculation (#161) * fix codeKey calculation * Remove * fix storageOffset * fix the fix to the fix to the offset fix * Remove copy/pasted, unused code in test * fix linter --------- Co-authored-by: Guillaume Ballet <3272758+gballet@users.noreply.github.com> fix: infinite loop when calling extcodecopy on empty code (#151) upgrade to latest go-verkle fix: only update code in the tree if it's dirty (#174) fix: read-touch the code size and Keccak of the origin (#175) List of changes for converting a sepolia database (#182) * naive conversion rebased on top of beverly hills * changes for the sepolia shadow fork conversion * fixes to please the linter * fixes to please the linter Unified point cache (#180) * Unified point cache * Use cache for Try*Account * alter Trie interface to use caching for slots (#181) * alter Trie interface to use caching for slots * fix: use a lock to protect the point cache (#185) * use fastest non-master go-verkle version & pull trie/Verkle.go changes to use new api (#184) * mod: update to fastest go-verkle version today Signed-off-by: Ignacio Hagopian * trie/verkle: use new batch serialization api Signed-off-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian Co-authored-by: Ignacio Hagopian * fix: TryDelete signature in unit tests --------- Signed-off-by: Ignacio Hagopian Co-authored-by: Ignacio Hagopian trie/utils: fix potential overflow (#191) * trie/utils: fix potential overflow Signed-off-by: Ignacio Hagopian * trie/utils: receive storage key as a byte slice Signed-off-by: Ignacio Hagopian * revert formatter changes Signed-off-by: Ignacio Hagopian * trie/utils: fix mod 256 Signed-off-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian trie/utils: fix incorrect bigint assignment (#193) Signed-off-by: Ignacio Hagopian upgrade precomp link to fix CI fix: add missing code size&keccak leaves in empty accounts (#192) fixes to use the latest go-verkle@master (#197) * fixes to use the latest go-verkle@master * linter fixes * linter fixes for tests * fix: use jsign's go-verkle fix refactor: remove unused (*StateDB).GetXLittleEndian methods (#204) fix gas accounting issue in state_processor_test.go (#207) update go-verkle not to use StatelessNode anymore (#206) * update go-verkle not to use StatelessNode anymore * update go-verkle to latest refactor: move verkle gas accounting to its own block in TransitionDB (#208) fix a panic in deserializeVerkleProof if GetProofItems returns a nil ProofElements use the cachingDB instead of a custom VerkleDB (#209) * use the cachingDB instead of a custom VerkleDB * fix stack trace in LES remove holiman from CODEOWNERS as he gets too many emails read from tree in state object if the snapshot is nil (#205) add missing error checks for the root node type (#214) implement OpenStorageTrie for verkle trees (#210) * implement OpenStorageTrie for verkle trees * add a few comments for future maintenance * fix linter issue fix: copy balance leaf to new buffer in TryGetAccount (#217) implement some heretofore unimplemented iterator methods (#219) params: move verkle params to their own file (#228) fix: proper number of chunk evals (#215) overlay transition (#244) * overlay transition Fix some bugs identified in the code review Co-authored-by: Ignacio Hagopian Include base -> overlay key-values migration logic (#199) * mod: add go-verkle version with key-value migration new apis Signed-off-by: Ignacio Hagopian * core/stateprocessor: use constant for max number of migrated key-values Signed-off-by: Ignacio Hagopian * core: add base->overlay key-values migration logic Signed-off-by: Ignacio Hagopian * core: fix some compiler errors Signed-off-by: Ignacio Hagopian * trie: consider removing transition trie api in the future Signed-off-by: Ignacio Hagopian * mod: use latest go-verkle Signed-off-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian fix some unit tests errors get convresion block from file fix compilation issues fix initialization issue in migrator fix: changes needed to run the first 28 blocks important sutff: fix the banner fix: use nonce instead of balance in nonce leaf (#202) fixes for performing the overlay transition (#203) * fixes for performing the overlay transition * fixes for the full replay * fix: deletion-and-recreation of EoA * fixes to replay 2M+ blocks * upgrade to go-verkle@master * fix: proper number of chunk evals * rewrite conversion loop to fix known issues changes to make replay work with the overlay method (#216) * fixes for performing the overlay transition fixes for the full replay fix: deletion-and-recreation of EoA fixes to replay 2M+ blocks upgrade to go-verkle@master fix: proper number of chunk evals rewrite conversion loop to fix known issues changes to make replay work with the overlay method fixes to replay 2M+ blocks update to latest go-verkle@master * use a PBSS-like scheme for internal nodes (#221) * use a PBSS-like scheme for internal nodes * a couple of fixes coming from debugging replay * fix: use an error to notify the transition tree that a deleted account was found in the overlay tree (#222) * fixes for pbss replay (#227) * fixes for pbss replay * trie/verkle: use capped batch size (#229) * trie/verkle: use capped batch size Signed-off-by: Ignacio Hagopian * trie/verkle: avoid path variable allocation per db.Put Signed-off-by: Ignacio Hagopian * don't keep more than 32 state root conversions in RAM (#230) --------- Signed-off-by: Ignacio Hagopian Co-authored-by: Guillaume Ballet <3272758+gballet@users.noreply.github.com> * cleanup some code * mod: update go-verkle Signed-off-by: Ignacio Hagopian * re-enable snapshot (#231) * re-enable cancun block / snapshot (#226) * clear storage conversion key upon translating account (#234) * clear storage conversion key upon translating account * mod: use latest go-verkle Signed-off-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian Co-authored-by: Ignacio Hagopian * fix: self-deadlock with translated root map mutex (#236) * return compressed commitment as root commitment (#237) --------- Signed-off-by: Ignacio Hagopian Co-authored-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian Co-authored-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian Co-authored-by: Ignacio Hagopian fix first panic in *TransitionTrie.Copy() upgrade go-verkle to latest master mod: update go-verkle (#239) Signed-off-by: Ignacio Hagopian core: print state root every 100 blocks (#240) Signed-off-by: Ignacio Hagopian fix: only Commit the account trie (#242) fixes to get TestProcessVerkle to work with the overlay branch (#238) * fixes to get TestProcessVerkle to work with the overlay branch * fix all panics in verkle state processor test * fix proof verification move transition management to cachingDB * fix: mark the verkle transition as started if it's ended without being started * fix the verkle state processing test * fix linter errors * Add a function to clear verkle params for replay * fix: handle TransitionTrie in OpenStorageTrie * fix linter issue * fix the deleted account error (#247) * code cleanup (#248) * fix: don't error on a missing conversion.txt (#249) * Overlay Tree preimages exporting and usage (#246) * export overlay preimages tool Signed-off-by: Ignacio Hagopian * use preimages flat file in overlay tree migration logic Signed-off-by: Ignacio Hagopian * cmd/geth: add --roothash to overlay tree preimage exporting command Signed-off-by: Ignacio Hagopian * cleanup Signed-off-by: Ignacio Hagopian * review feedback Signed-off-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian * fix: reduce the PR footprint (#250) * fix: don't fail when preimages.bin is missing (#251) * fix: don't fail when preimages.bin is missing * fix: don't open the preimages file when outside of transition --------- Signed-off-by: Ignacio Hagopian Co-authored-by: Ignacio Hagopian review changes remove replay-specific code move UpdateContractCode to updateStateObject fix: generated rlp decoders fix: activate Shanghai in test add withdrawals to witness fix the first half of verkle test fix: open an empty storage trie after transition activate verkle on IsVerkle, not IsCancun deactivate snapshot for tests enable proof of stake in verkle test save last MPT root for transition remove unnecessary snapshot Cap in flush fix test: include EIP-3860 implement missing odrDatabase function fix incorrect equality condition is HasAccount fixes to replay ~500 blocks fix to replay more blocks fix preimage issue in conversion code cleanup: remove a lot of TODOs & friends more code cleanup fix: alignment of values whose len is < 32 New access-witness module (#235) Use plain addresses/slot numbers instead of hashing them in the witness remove unused map param in MakeVerkleMultiProof (#261) core/state: rewrite a new optimized keyValueMigrator (#256) * trie/utils: add helper to calculate code tree indices * core/state: rewrite optimized version of keyValueMigrator Signed-off-by: Ignacio Hagopian * trie/verkle: remove uint256 allocs (#257) Signed-off-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian core: move overlay conversion code to its own file (#266) add pre-pbss rebase branches to CI (#270) * add post-pbss rebase branches to CI * fix go version in CI * fix linter issues * upgrade go version to 1.21.1 to avoid github deploy error quell zero-tree message port kaustinen code on top of shapella-rebased branch activate proof generation on fork + remove code dups use go-verkle's post-state API to verify proofs (#262) use prague as the verkle activation fork (#263) upgrade to latest go-ipa activate verkle transition in "miner" (#265) fix: do not force cancunTime upon verkle activation workaround: do not use root translation in replay workaround: deactivate overlay transition for now fixes from trying to get the devnet to work (#267) this line was left out from the previous commit upgrade to go-verkle with fixed newvalue serialization fix: ensure point cache isn't nil in copy (#268) fix: dependency cycle in tests (#269) upgrade to latest go-verkle fix: write trie preimage data to db (#274) fix: zero-root in produced block + sync (#275) upgrade go-ipa fix build fix typo include review feedback add switch to add proofs to blocks (#278) add fee recipient to witness (#279) touch all fields in withdrawal account header (#277) fix: add ProofInBlocks to chain config (#280) remove StateDB as an extra param to Prepare (#281) fix: need commitment in tests (#282) fix linter message fix: a couple of CI issues fix more CI complaints achieving perfection workaround: disable check for root presence (#283) fix: activate verkle at genesis (#284) fix: ensure read-only values are resolved in post trie (#285) upgrade deps to get proof generation absence/presence bugfix fix: make sure AccessWitness isn't nil when calling RPC methods (#287) fix: incorrect access copy (#288) fix: return serialized root in state root, not its mapping to a scalar field (#289) add workaround to use --override.prague (#292) Shanghai time workaround (#294) * add workaround to use --override.prague * fix typo in comments * fix: enable overrides at genesis time remove pre-state tree warmup (#293) * remove pre-state tree warmup Signed-off-by: Ignacio Hagopian * update go-verkle Signed-off-by: Ignacio Hagopian --------- Signed-off-by: Ignacio Hagopian --- .github/CODEOWNERS | 2 +- .github/workflows/go.yml | 48 +++ beacon/engine/gen_ed.go | 74 ++--- beacon/engine/types.go | 74 ++--- cmd/geth/chaincmd.go | 49 +++- cmd/geth/config.go | 6 +- cmd/geth/main.go | 3 +- cmd/geth/verkle.go | 255 +++++++++++++++- cmd/utils/cmd.go | 70 +++++ cmd/utils/flags.go | 9 +- consensus/beacon/consensus.go | 83 +++++- consensus/ethash/consensus.go | 13 + core/block_validator.go | 1 + core/blockchain.go | 63 ++++ core/chain_makers.go | 138 +++++++++ core/error.go | 5 + core/genesis.go | 81 ++++-- core/genesis_test.go | 2 +- core/overlay_transition.go | 245 ++++++++++++++++ core/state/access_witness.go | 254 ++++++++++++++++ core/state/database.go | 260 ++++++++++++++++- core/state/iterator.go | 5 +- core/state/snapshot/snapshot.go | 15 + core/state/state_object.go | 4 +- core/state/statedb.go | 63 +++- core/state/statedb_test.go | 31 +- core/state/sync_test.go | 5 +- core/state/trie_prefetcher.go | 2 +- core/state_processor.go | 199 +++++++++++++ core/state_processor_test.go | 125 ++++++++ core/state_transition.go | 40 +++ core/types/block.go | 32 ++ core/types/state_account.go | 5 + core/vm/common.go | 12 + core/vm/contract.go | 9 +- core/vm/contracts.go | 2 + core/vm/evm.go | 47 ++- core/vm/gas_table.go | 82 +++++- core/vm/instructions.go | 98 ++++++- core/vm/interface.go | 4 + core/vm/interpreter.go | 12 + core/vm/jump_table.go | 2 + core/vm/jump_table_export.go | 4 +- core/vm/operations_acl.go | 19 +- eth/backend.go | 4 +- eth/catalyst/api.go | 11 + eth/ethconfig/config.go | 2 +- eth/ethconfig/gen_config.go | 10 +- eth/tracers/api.go | 4 - eth/tracers/js/tracer_test.go | 3 + go.mod | 18 +- go.sum | 39 ++- internal/ethapi/api.go | 7 +- les/client.go | 4 +- les/server_requests.go | 2 +- light/odr_test.go | 2 +- light/trie.go | 71 ++++- miner/worker.go | 29 +- miner/worker_test.go | 19 ++ params/config.go | 22 +- params/verkle_params.go | 36 +++ trie/database.go | 44 +++ trie/secure_trie.go | 4 + trie/transition.go | 193 ++++++++++++ trie/utils/verkle.go | 300 +++++++++++++++++++ trie/utils/verkle_test.go | 95 ++++++ trie/verkle.go | 501 ++++++++++++++++++++++++++++++++ trie/verkle_iterator.go | 218 ++++++++++++++ trie/verkle_iterator_test.go | 68 +++++ trie/verkle_test.go | 381 ++++++++++++++++++++++++ 70 files changed, 4410 insertions(+), 229 deletions(-) create mode 100644 .github/workflows/go.yml create mode 100644 core/overlay_transition.go create mode 100644 core/state/access_witness.go create mode 100644 params/verkle_params.go create mode 100644 trie/transition.go create mode 100644 trie/utils/verkle.go create mode 100644 trie/utils/verkle_test.go create mode 100644 trie/verkle.go create mode 100644 trie/verkle_iterator.go create mode 100644 trie/verkle_iterator_test.go create mode 100644 trie/verkle_test.go diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index faf922df0161..f38d522b7edb 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -6,7 +6,7 @@ accounts/scwallet @gballet accounts/abi @gballet @MariusVanDerWijden cmd/clef @holiman consensus @karalabe -core/ @karalabe @holiman @rjl493456442 +core/ @karalabe @rjl493456442 eth/ @karalabe @holiman @rjl493456442 eth/catalyst/ @gballet eth/tracers/ @s1na diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml new file mode 100644 index 000000000000..61542140b004 --- /dev/null +++ b/.github/workflows/go.yml @@ -0,0 +1,48 @@ +name: Go lint and test + +on: + push: + branches: [ master ] + pull_request: + branches: [ master, verkle-trie-proof-in-block-rebased, verkle-trie-post-merge, beverly-hills-head, 'verkle/replay-change-with-tree-group-tryupdate', beverly-hills-just-before-pbss, kaustinen-with-shapella ] + workflow_dispatch: + +jobs: + build: + runs-on: self-hosted + steps: + - uses: actions/checkout@v2 + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: 1.21.1 + - name: Build + run: go build -v ./... + + lint: + runs-on: self-hosted + steps: + - uses: actions/checkout@v2 + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: 1.21.1 + - name: Download golangci-lint + run: wget -O- -nv https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s latest + - name: Lint + run: ./bin/golangci-lint run + - name: Vet + run: go vet + + test: + runs-on: self-hosted + steps: + - uses: actions/checkout@v2 + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: 1.21.1 + - name: Download precomputed points + run: wget -nv https://github.com/gballet/go-verkle/releases/download/banderwagonv3/precomp -Otrie/utils/precomp + - name: Test + run: go test ./... diff --git a/beacon/engine/gen_ed.go b/beacon/engine/gen_ed.go index 6893d64a1626..2a92e7420407 100644 --- a/beacon/engine/gen_ed.go +++ b/beacon/engine/gen_ed.go @@ -17,23 +17,24 @@ var _ = (*executableDataMarshaling)(nil) // MarshalJSON marshals as JSON. func (e ExecutableData) MarshalJSON() ([]byte, error) { type ExecutableData struct { - ParentHash common.Hash `json:"parentHash" gencodec:"required"` - FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"` - StateRoot common.Hash `json:"stateRoot" gencodec:"required"` - ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"` - LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"` - Random common.Hash `json:"prevRandao" gencodec:"required"` - Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"` - GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` - GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` - Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"` - ExtraData hexutil.Bytes `json:"extraData" gencodec:"required"` - BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` - BlockHash common.Hash `json:"blockHash" gencodec:"required"` - Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"` - Withdrawals []*types.Withdrawal `json:"withdrawals"` - BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"` - ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"` + ParentHash common.Hash `json:"parentHash" gencodec:"required"` + FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"` + StateRoot common.Hash `json:"stateRoot" gencodec:"required"` + ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"` + LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"` + Random common.Hash `json:"prevRandao" gencodec:"required"` + Number hexutil.Uint64 `json:"blockNumber" gencodec:"required"` + GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` + GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` + Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"` + ExtraData hexutil.Bytes `json:"extraData" gencodec:"required"` + BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` + BlockHash common.Hash `json:"blockHash" gencodec:"required"` + Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"` + Withdrawals []*types.Withdrawal `json:"withdrawals"` + BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"` + ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"` + ExecutionWitness *types.ExecutionWitness `json:"executionWitness"` } var enc ExecutableData enc.ParentHash = e.ParentHash @@ -58,29 +59,31 @@ func (e ExecutableData) MarshalJSON() ([]byte, error) { enc.Withdrawals = e.Withdrawals enc.BlobGasUsed = (*hexutil.Uint64)(e.BlobGasUsed) enc.ExcessBlobGas = (*hexutil.Uint64)(e.ExcessBlobGas) + enc.ExecutionWitness = e.ExecutionWitness return json.Marshal(&enc) } // UnmarshalJSON unmarshals from JSON. func (e *ExecutableData) UnmarshalJSON(input []byte) error { type ExecutableData struct { - ParentHash *common.Hash `json:"parentHash" gencodec:"required"` - FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"` - StateRoot *common.Hash `json:"stateRoot" gencodec:"required"` - ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"` - LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"` - Random *common.Hash `json:"prevRandao" gencodec:"required"` - Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"` - GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"` - GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` - Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"` - ExtraData *hexutil.Bytes `json:"extraData" gencodec:"required"` - BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` - BlockHash *common.Hash `json:"blockHash" gencodec:"required"` - Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"` - Withdrawals []*types.Withdrawal `json:"withdrawals"` - BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"` - ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"` + ParentHash *common.Hash `json:"parentHash" gencodec:"required"` + FeeRecipient *common.Address `json:"feeRecipient" gencodec:"required"` + StateRoot *common.Hash `json:"stateRoot" gencodec:"required"` + ReceiptsRoot *common.Hash `json:"receiptsRoot" gencodec:"required"` + LogsBloom *hexutil.Bytes `json:"logsBloom" gencodec:"required"` + Random *common.Hash `json:"prevRandao" gencodec:"required"` + Number *hexutil.Uint64 `json:"blockNumber" gencodec:"required"` + GasLimit *hexutil.Uint64 `json:"gasLimit" gencodec:"required"` + GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` + Timestamp *hexutil.Uint64 `json:"timestamp" gencodec:"required"` + ExtraData *hexutil.Bytes `json:"extraData" gencodec:"required"` + BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` + BlockHash *common.Hash `json:"blockHash" gencodec:"required"` + Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"` + Withdrawals []*types.Withdrawal `json:"withdrawals"` + BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"` + ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"` + ExecutionWitness *types.ExecutionWitness `json:"executionWitness"` } var dec ExecutableData if err := json.Unmarshal(input, &dec); err != nil { @@ -154,5 +157,8 @@ func (e *ExecutableData) UnmarshalJSON(input []byte) error { if dec.ExcessBlobGas != nil { e.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas) } + if dec.ExecutionWitness != nil { + e.ExecutionWitness = dec.ExecutionWitness + } return nil } diff --git a/beacon/engine/types.go b/beacon/engine/types.go index f1801edd1a93..0066e9181756 100644 --- a/beacon/engine/types.go +++ b/beacon/engine/types.go @@ -64,6 +64,8 @@ type ExecutableData struct { Withdrawals []*types.Withdrawal `json:"withdrawals"` BlobGasUsed *uint64 `json:"blobGasUsed"` ExcessBlobGas *uint64 `json:"excessBlobGas"` + + ExecutionWitness *types.ExecutionWitness `json:"executionWitness"` } // JSON type overrides for executableData. @@ -208,24 +210,25 @@ func ExecutableDataToBlock(params ExecutableData, versionedHashes []common.Hash) withdrawalsRoot = &h } header := &types.Header{ - ParentHash: params.ParentHash, - UncleHash: types.EmptyUncleHash, - Coinbase: params.FeeRecipient, - Root: params.StateRoot, - TxHash: types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)), - ReceiptHash: params.ReceiptsRoot, - Bloom: types.BytesToBloom(params.LogsBloom), - Difficulty: common.Big0, - Number: new(big.Int).SetUint64(params.Number), - GasLimit: params.GasLimit, - GasUsed: params.GasUsed, - Time: params.Timestamp, - BaseFee: params.BaseFeePerGas, - Extra: params.ExtraData, - MixDigest: params.Random, - WithdrawalsHash: withdrawalsRoot, - ExcessBlobGas: params.ExcessBlobGas, - BlobGasUsed: params.BlobGasUsed, + ParentHash: params.ParentHash, + UncleHash: types.EmptyUncleHash, + Coinbase: params.FeeRecipient, + Root: params.StateRoot, + TxHash: types.DeriveSha(types.Transactions(txs), trie.NewStackTrie(nil)), + ReceiptHash: params.ReceiptsRoot, + Bloom: types.BytesToBloom(params.LogsBloom), + Difficulty: common.Big0, + Number: new(big.Int).SetUint64(params.Number), + GasLimit: params.GasLimit, + GasUsed: params.GasUsed, + Time: params.Timestamp, + BaseFee: params.BaseFeePerGas, + Extra: params.ExtraData, + MixDigest: params.Random, + WithdrawalsHash: withdrawalsRoot, + ExcessBlobGas: params.ExcessBlobGas, + BlobGasUsed: params.BlobGasUsed, + ExecutionWitness: params.ExecutionWitness, } block := types.NewBlockWithHeader(header).WithBody(txs, nil /* uncles */).WithWithdrawals(params.Withdrawals) if block.Hash() != params.BlockHash { @@ -238,23 +241,24 @@ func ExecutableDataToBlock(params ExecutableData, versionedHashes []common.Hash) // fields from the given block. It assumes the given block is post-merge block. func BlockToExecutableData(block *types.Block, fees *big.Int, blobs []kzg4844.Blob, commitments []kzg4844.Commitment, proofs []kzg4844.Proof) *ExecutionPayloadEnvelope { data := &ExecutableData{ - BlockHash: block.Hash(), - ParentHash: block.ParentHash(), - FeeRecipient: block.Coinbase(), - StateRoot: block.Root(), - Number: block.NumberU64(), - GasLimit: block.GasLimit(), - GasUsed: block.GasUsed(), - BaseFeePerGas: block.BaseFee(), - Timestamp: block.Time(), - ReceiptsRoot: block.ReceiptHash(), - LogsBloom: block.Bloom().Bytes(), - Transactions: encodeTransactions(block.Transactions()), - Random: block.MixDigest(), - ExtraData: block.Extra(), - Withdrawals: block.Withdrawals(), - BlobGasUsed: block.BlobGasUsed(), - ExcessBlobGas: block.ExcessBlobGas(), + BlockHash: block.Hash(), + ParentHash: block.ParentHash(), + FeeRecipient: block.Coinbase(), + StateRoot: block.Root(), + Number: block.NumberU64(), + GasLimit: block.GasLimit(), + GasUsed: block.GasUsed(), + BaseFeePerGas: block.BaseFee(), + Timestamp: block.Time(), + ReceiptsRoot: block.ReceiptHash(), + LogsBloom: block.Bloom().Bytes(), + Transactions: encodeTransactions(block.Transactions()), + Random: block.MixDigest(), + ExtraData: block.Extra(), + Withdrawals: block.Withdrawals(), + BlobGasUsed: block.BlobGasUsed(), + ExcessBlobGas: block.ExcessBlobGas(), + ExecutionWitness: block.ExecutionWitness(), } blobsBundle := BlobsBundleV1{ Commitments: make([]hexutil.Bytes, 0), diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index 41591ac138a8..55c22f7322f3 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -49,7 +49,7 @@ var ( Name: "init", Usage: "Bootstrap and initialize a new genesis block", ArgsUsage: "", - Flags: flags.Merge([]cli.Flag{utils.CachePreimagesFlag}, utils.DatabasePathFlags), + Flags: flags.Merge([]cli.Flag{utils.CachePreimagesFlag, utils.OverridePrague}, utils.DatabasePathFlags), Description: ` The init command initializes a new genesis block and definition for the network. This is a destructive action and changes the network in which you will be @@ -144,6 +144,17 @@ It's deprecated, please use "geth db import" instead. Description: ` The export-preimages command exports hash preimages to an RLP encoded stream. It's deprecated, please use "geth db export" instead. +`, + } + exportOverlayPreimagesCommand = &cli.Command{ + Action: exportOverlayPreimages, + Name: "export-overlay-preimages", + Usage: "Export the preimage in overlay tree migration order", + ArgsUsage: "", + Flags: flags.Merge([]cli.Flag{utils.TreeRootFlag}, utils.DatabasePathFlags), + Description: ` +The export-overlay-preimages command exports hash preimages to a flat file, in exactly +the expected order for the overlay tree migration. `, } dumpCommand = &cli.Command{ @@ -190,6 +201,12 @@ func initGenesis(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() + var overrides core.ChainOverrides + if ctx.IsSet(utils.OverridePrague.Name) { + v := ctx.Uint64(utils.OverridePrague.Name) + overrides.OverridePrague = &v + } + for _, name := range []string{"chaindata", "lightchaindata"} { chaindb, err := stack.OpenDatabaseWithFreezer(name, 0, 0, ctx.String(utils.AncientFlag.Name), "", false) if err != nil { @@ -197,8 +214,9 @@ func initGenesis(ctx *cli.Context) error { } triedb := trie.NewDatabaseWithConfig(chaindb, &trie.Config{ Preimages: ctx.Bool(utils.CachePreimagesFlag.Name), + Verkle: true, }) - _, hash, err := core.SetupGenesisBlock(chaindb, triedb, genesis) + _, hash, err := core.SetupGenesisBlockWithOverride(chaindb, triedb, genesis, &overrides) if err != nil { utils.Fatalf("Failed to write genesis block: %v", err) } @@ -399,6 +417,33 @@ func exportPreimages(ctx *cli.Context) error { return nil } +// exportOverlayPreimages dumps the preimage data to a flat file. +func exportOverlayPreimages(ctx *cli.Context) error { + if ctx.Args().Len() < 1 { + utils.Fatalf("This command requires an argument.") + } + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + chain, _ := utils.MakeChain(ctx, stack, true) + + var root common.Hash + if ctx.String(utils.TreeRootFlag.Name) != "" { + rootBytes := common.FromHex(ctx.String(utils.StartKeyFlag.Name)) + if len(rootBytes) != common.HashLength { + return fmt.Errorf("invalid root hash length") + } + root = common.BytesToHash(rootBytes) + } + + start := time.Now() + if err := utils.ExportOverlayPreimages(chain, ctx.Args().First(), root); err != nil { + utils.Fatalf("Export error: %v\n", err) + } + fmt.Printf("Export done in %v\n", time.Since(start)) + return nil +} + func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) { db := utils.MakeChainDatabase(ctx, stack, true) var header *types.Header diff --git a/cmd/geth/config.go b/cmd/geth/config.go index 1a3de04bd4d2..bf01c6f91857 100644 --- a/cmd/geth/config.go +++ b/cmd/geth/config.go @@ -171,9 +171,9 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { v := ctx.Uint64(utils.OverrideCancun.Name) cfg.Eth.OverrideCancun = &v } - if ctx.IsSet(utils.OverrideVerkle.Name) { - v := ctx.Uint64(utils.OverrideVerkle.Name) - cfg.Eth.OverrideVerkle = &v + if ctx.IsSet(utils.OverridePrague.Name) { + v := ctx.Uint64(utils.OverridePrague.Name) + cfg.Eth.OverridePrague = &v } backend, eth := utils.RegisterEthService(stack, &cfg.Eth) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 568aec4b8b7a..1d7bb33dc381 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -66,7 +66,7 @@ var ( utils.USBFlag, utils.SmartCardDaemonPathFlag, utils.OverrideCancun, - utils.OverrideVerkle, + utils.OverridePrague, utils.EnablePersonal, utils.TxPoolLocalsFlag, utils.TxPoolNoLocalsFlag, @@ -207,6 +207,7 @@ func init() { exportCommand, importPreimagesCommand, exportPreimagesCommand, + exportOverlayPreimagesCommand, removedbCommand, dumpCommand, dumpGenesisCommand, diff --git a/cmd/geth/verkle.go b/cmd/geth/verkle.go index 9ba2b4167164..d1953697b9bd 100644 --- a/cmd/geth/verkle.go +++ b/cmd/geth/verkle.go @@ -18,17 +18,26 @@ package main import ( "bytes" + "encoding/binary" "encoding/hex" "errors" "fmt" "os" + "runtime" + "time" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state/snapshot" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/internal/flags" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" + tutils "github.com/ethereum/go-ethereum/trie/utils" "github.com/gballet/go-verkle" + "github.com/holiman/uint256" cli "github.com/urfave/cli/v2" ) @@ -40,6 +49,20 @@ var ( Usage: "A set of experimental verkle tree management commands", Description: "", Subcommands: []*cli.Command{ + { + Name: "to-verkle", + Usage: "use the snapshot to compute a translation of a MPT into a verkle tree", + ArgsUsage: "", + Action: convertToVerkle, + Flags: flags.Merge([]cli.Flag{}, utils.NetworkFlags, utils.DatabasePathFlags), + Description: ` +geth verkle to-verkle +This command takes a snapshot and inserts its values in a fresh verkle tree. + +The argument is interpreted as the root hash. If none is provided, the latest +block is used. + `, + }, { Name: "verify", Usage: "verify the conversion of a MPT into a verkle tree", @@ -67,6 +90,228 @@ in which key1, key2, ... are expanded. } ) +func convertToVerkle(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + chaindb := utils.MakeChainDatabase(ctx, stack, false) + if chaindb == nil { + return errors.New("nil chaindb") + } + headBlock := rawdb.ReadHeadBlock(chaindb) + if headBlock == nil { + log.Error("Failed to load head block") + return errors.New("no head block") + } + if ctx.NArg() > 1 { + log.Error("Too many arguments given") + return errors.New("too many arguments") + } + var ( + root common.Hash + err error + ) + if ctx.NArg() == 1 { + root, err = parseRoot(ctx.Args().First()) + if err != nil { + log.Error("Failed to resolve state root", "error", err) + return err + } + log.Info("Start traversing the state", "root", root) + } else { + root = headBlock.Root() + log.Info("Start traversing the state", "root", root, "number", headBlock.NumberU64()) + } + + var ( + accounts int + lastReport time.Time + start = time.Now() + vRoot = verkle.New().(*verkle.InternalNode) + ) + + saveverkle := func(path []byte, node verkle.VerkleNode) { + node.Commit() + s, err := node.Serialize() + if err != nil { + panic(err) + } + if err := chaindb.Put(path, s); err != nil { + panic(err) + } + } + + snaptree, err := snapshot.New(snapshot.Config{CacheSize: 256}, chaindb, trie.NewDatabase(chaindb), root) + if err != nil { + return err + } + accIt, err := snaptree.AccountIterator(root, common.Hash{}) + if err != nil { + return err + } + defer accIt.Release() + + // root.FlushAtDepth(depth, saveverkle) + + // Process all accounts sequentially + for accIt.Next() { + accounts += 1 + acc, err := types.FullAccount(accIt.Account()) + if err != nil { + log.Error("Invalid account encountered during traversal", "error", err) + return err + } + + // Store the basic account data + var ( + nonce, balance, version, size [32]byte + newValues = make([][]byte, 256) + ) + newValues[0] = version[:] + newValues[1] = balance[:] + newValues[2] = nonce[:] + newValues[4] = version[:] // memory-saving trick: by default, an account has 0 size + binary.LittleEndian.PutUint64(nonce[:8], acc.Nonce) + for i, b := range acc.Balance.Bytes() { + balance[len(acc.Balance.Bytes())-1-i] = b + } + addr := rawdb.ReadPreimage(chaindb, accIt.Hash()) + if addr == nil { + return fmt.Errorf("could not find preimage for address %x %v %v", accIt.Hash(), acc, accIt.Error()) + } + addrPoint := tutils.EvaluateAddressPoint(addr) + stem := tutils.GetTreeKeyVersionWithEvaluatedAddress(addrPoint) + + // Store the account code if present + if !bytes.Equal(acc.CodeHash, types.EmptyRootHash[:]) { + code := rawdb.ReadCode(chaindb, common.BytesToHash(acc.CodeHash)) + chunks := trie.ChunkifyCode(code) + + for i := 0; i < 128 && i < len(chunks)/32; i++ { + newValues[128+i] = chunks[32*i : 32*(i+1)] + } + + for i := 128; i < len(chunks)/32; { + values := make([][]byte, 256) + chunkkey := tutils.GetTreeKeyCodeChunkWithEvaluatedAddress(addrPoint, uint256.NewInt(uint64(i))) + j := i + for ; (j-i) < 256 && j < len(chunks)/32; j++ { + values[(j-128)%256] = chunks[32*j : 32*(j+1)] + } + i = j + + // Otherwise, store the previous group in the tree with a + // stem insertion. + vRoot.InsertStem(chunkkey[:31], values, chaindb.Get) + } + + // Write the code size in the account header group + binary.LittleEndian.PutUint64(size[:8], uint64(len(code))) + } + newValues[3] = acc.CodeHash[:] + newValues[4] = size[:] + + // Save every slot into the tree + if acc.Root != types.EmptyRootHash { + var translatedStorage = map[string][][]byte{} + + storageIt, err := snaptree.StorageIterator(root, accIt.Hash(), common.Hash{}) + if err != nil { + log.Error("Failed to open storage trie", "root", acc.Root, "error", err) + return err + } + for storageIt.Next() { + // The value is RLP-encoded, decode it + var ( + value []byte // slot value after RLP decoding + safeValue [32]byte // 32-byte aligned value + ) + if err := rlp.DecodeBytes(storageIt.Slot(), &value); err != nil { + return fmt.Errorf("error decoding bytes %x: %w", storageIt.Slot(), err) + } + copy(safeValue[32-len(value):], value) + + slotnr := rawdb.ReadPreimage(chaindb, storageIt.Hash()) + if slotnr == nil { + return fmt.Errorf("could not find preimage for slot %x", storageIt.Hash()) + } + + // if the slot belongs to the header group, store it there - and skip + // calculating the slot key. + slotnrbig := uint256.NewInt(0).SetBytes(slotnr) + if slotnrbig.Cmp(uint256.NewInt(64)) < 0 { + newValues[64+slotnr[31]] = safeValue[:] + continue + } + + // Slot not in the header group, get its tree key + slotkey := tutils.GetTreeKeyStorageSlotWithEvaluatedAddress(addrPoint, slotnr) + + // Create the group if need be + values := translatedStorage[string(slotkey[:31])] + if values == nil { + values = make([][]byte, 256) + } + + // Store value in group + values[slotkey[31]] = safeValue[:] + translatedStorage[string(slotkey[:31])] = values + + // Dump the stuff to disk if we ran out of space + var mem runtime.MemStats + runtime.ReadMemStats(&mem) + if mem.Alloc > 25*1024*1024*1024 { + fmt.Println("Memory usage exceeded threshold, calling mitigation function") + for s, vs := range translatedStorage { + var k [31]byte + copy(k[:], []byte(s)) + // reminder that InsertStem will merge leaves + // if they exist. + vRoot.InsertStem(k[:31], vs, chaindb.Get) + } + translatedStorage = make(map[string][][]byte) + vRoot.FlushAtDepth(2, saveverkle) + } + } + for s, vs := range translatedStorage { + var k [31]byte + copy(k[:], []byte(s)) + vRoot.InsertStem(k[:31], vs, chaindb.Get) + } + storageIt.Release() + if storageIt.Error() != nil { + log.Error("Failed to traverse storage trie", "root", acc.Root, "error", storageIt.Error()) + return storageIt.Error() + } + } + // Finish with storing the complete account header group inside the tree. + vRoot.InsertStem(stem[:31], newValues, chaindb.Get) + + if time.Since(lastReport) > time.Second*8 { + log.Info("Traversing state", "accounts", accounts, "elapsed", common.PrettyDuration(time.Since(start))) + lastReport = time.Now() + } + + var mem runtime.MemStats + runtime.ReadMemStats(&mem) + if mem.Alloc > 25*1024*1024*1024 { + fmt.Println("Memory usage exceeded threshold, calling mitigation function") + vRoot.FlushAtDepth(2, saveverkle) + } + } + if accIt.Error() != nil { + log.Error("Failed to compute commitment", "root", root, "error", accIt.Error()) + return accIt.Error() + } + log.Info("Wrote all leaves", "accounts", accounts, "elapsed", common.PrettyDuration(time.Since(start))) + + vRoot.Commit() + vRoot.Flush(saveverkle) + + log.Info("Conversion complete", "root commitment", fmt.Sprintf("%x", vRoot.Commit().Bytes()), "accounts", accounts, "elapsed", common.PrettyDuration(time.Since(start))) + return nil +} + // recurse into each child to ensure they can be loaded from the db. The tree isn't rebuilt // (only its nodes are loaded) so there is no need to flush them, the garbage collector should // take care of that for us. @@ -74,7 +319,7 @@ func checkChildren(root verkle.VerkleNode, resolver verkle.NodeResolverFn) error switch node := root.(type) { case *verkle.InternalNode: for i, child := range node.Children() { - childC := child.Commit().Bytes() + childC := child.Commitment().Bytes() childS, err := resolver(childC[:]) if bytes.Equal(childC[:], zero[:]) { @@ -84,7 +329,7 @@ func checkChildren(root verkle.VerkleNode, resolver verkle.NodeResolverFn) error return fmt.Errorf("could not find child %x in db: %w", childC, err) } // depth is set to 0, the tree isn't rebuilt so it's not a problem - childN, err := verkle.ParseNode(childS, 0, childC[:]) + childN, err := verkle.ParseNode(childS, 0) if err != nil { return fmt.Errorf("decode error child %x in db: %w", child.Commitment().Bytes(), err) } @@ -144,7 +389,7 @@ func verifyVerkle(ctx *cli.Context) error { if err != nil { return err } - root, err := verkle.ParseNode(serializedRoot, 0, rootC[:]) + root, err := verkle.ParseNode(serializedRoot, 0) if err != nil { return err } @@ -193,7 +438,7 @@ func expandVerkle(ctx *cli.Context) error { if err != nil { return err } - root, err := verkle.ParseNode(serializedRoot, 0, rootC[:]) + root, err := verkle.ParseNode(serializedRoot, 0) if err != nil { return err } @@ -203,7 +448,7 @@ func expandVerkle(ctx *cli.Context) error { root.Get(key, chaindb.Get) } - if err := os.WriteFile("dump.dot", []byte(verkle.ToDot(root)), 0600); err != nil { + if err := os.WriteFile("dump.dot", []byte(verkle.ToDot(root)), 0o600); err != nil { log.Error("Failed to dump file", "err", err) } else { log.Info("Tree was dumped to file", "file", "dump.dot") diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go index 16b126057218..de25fd1a146d 100644 --- a/cmd/utils/cmd.go +++ b/cmd/utils/cmd.go @@ -176,6 +176,7 @@ func ImportChain(chain *core.BlockChain, fn string) error { return err } } + stream := rlp.NewStream(reader, 0) // Run actual the import. @@ -374,6 +375,75 @@ func ExportPreimages(db ethdb.Database, fn string) error { return nil } +// ExportOverlayPreimages exports all known hash preimages into the specified file, +// in the same order as expected by the overlay tree migration. +func ExportOverlayPreimages(chain *core.BlockChain, fn string, root common.Hash) error { + log.Info("Exporting preimages", "file", fn) + + fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) + if err != nil { + return err + } + defer fh.Close() + + writer := bufio.NewWriter(fh) + defer writer.Flush() + + statedb, err := chain.State() + if err != nil { + return fmt.Errorf("failed to open statedb: %w", err) + } + + if root == (common.Hash{}) { + root = chain.CurrentBlock().Root + } + + accIt, err := statedb.Snaps().AccountIterator(root, common.Hash{}) + if err != nil { + return err + } + defer accIt.Release() + + count := 0 + for accIt.Next() { + acc, err := types.FullAccount(accIt.Account()) + if err != nil { + return fmt.Errorf("invalid account encountered during traversal: %s", err) + } + addr := rawdb.ReadPreimage(statedb.Database().DiskDB(), accIt.Hash()) + if len(addr) != 20 { + return fmt.Errorf("addr len is zero is not 32: %d", len(addr)) + } + if _, err := writer.Write(addr); err != nil { + return fmt.Errorf("failed to write addr preimage: %w", err) + } + + if acc.HasStorage() { + stIt, err := statedb.Snaps().StorageIterator(root, accIt.Hash(), common.Hash{}) + if err != nil { + return fmt.Errorf("failed to create storage iterator: %w", err) + } + for stIt.Next() { + slotnr := rawdb.ReadPreimage(statedb.Database().DiskDB(), stIt.Hash()) + if len(slotnr) != 32 { + return fmt.Errorf("slotnr not 32 len") + } + if _, err := writer.Write(slotnr); err != nil { + return fmt.Errorf("failed to write slotnr preimage: %w", err) + } + } + stIt.Release() + } + count++ + if count%100000 == 0 { + log.Info("Last exported account", "account", accIt.Hash()) + } + } + + log.Info("Exported preimages", "file", fn) + return nil +} + // exportHeader is used in the export/import flow. When we do an export, // the first element we output is the exportHeader. // Whenever a backwards-incompatible change is made, the Version header diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index e0c7a42670e1..b927d0f94f83 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -216,6 +216,11 @@ var ( Usage: "Max number of elements (0 = no limit)", Value: 0, } + TreeRootFlag = &cli.StringFlag{ + Name: "roothash", + Usage: "Root hash of the tree (if empty, use the latest)", + Value: "", + } defaultSyncMode = ethconfig.Defaults.SyncMode SyncModeFlag = &flags.TextMarshalerFlag{ @@ -263,8 +268,8 @@ var ( Usage: "Manually specify the Cancun fork timestamp, overriding the bundled setting", Category: flags.EthCategory, } - OverrideVerkle = &cli.Uint64Flag{ - Name: "override.verkle", + OverridePrague = &cli.Uint64Flag{ + Name: "override.prague", Usage: "Manually specify the Verkle fork timestamp, overriding the bundled setting", Category: flags.EthCategory, } diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index 1ad4358cfff8..4627b4d837bc 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -30,6 +30,9 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/utils" + "github.com/gballet/go-verkle" + "github.com/holiman/uint256" ) // Proof-of-stake protocol constants. @@ -352,8 +355,19 @@ func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types. amount := new(big.Int).SetUint64(w.Amount) amount = amount.Mul(amount, big.NewInt(params.GWei)) state.AddBalance(w.Address, amount) - } - // No block reward which is issued by consensus layer instead. + + // The returned gas is not charged + state.Witness().TouchAddressOnWriteAndComputeGas(w.Address[:], uint256.Int{}, utils.VersionLeafKey) + state.Witness().TouchAddressOnWriteAndComputeGas(w.Address[:], uint256.Int{}, utils.BalanceLeafKey) + state.Witness().TouchAddressOnWriteAndComputeGas(w.Address[:], uint256.Int{}, utils.NonceLeafKey) + state.Witness().TouchAddressOnWriteAndComputeGas(w.Address[:], uint256.Int{}, utils.CodeKeccakLeafKey) + state.Witness().TouchAddressOnWriteAndComputeGas(w.Address[:], uint256.Int{}, utils.CodeSizeLeafKey) + } + state.Witness().TouchAddressOnWriteAndComputeGas(header.Coinbase[:], uint256.Int{}, utils.VersionLeafKey) + state.Witness().TouchAddressOnWriteAndComputeGas(header.Coinbase[:], uint256.Int{}, utils.BalanceLeafKey) + state.Witness().TouchAddressOnWriteAndComputeGas(header.Coinbase[:], uint256.Int{}, utils.NonceLeafKey) + state.Witness().TouchAddressOnWriteAndComputeGas(header.Coinbase[:], uint256.Int{}, utils.CodeKeccakLeafKey) + state.Witness().TouchAddressOnWriteAndComputeGas(header.Coinbase[:], uint256.Int{}, utils.CodeSizeLeafKey) } // FinalizeAndAssemble implements consensus.Engine, setting the final state and @@ -379,8 +393,71 @@ func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea // Assign the final state root to header. header.Root = state.IntermediateRoot(true) + var ( + p *verkle.VerkleProof + k verkle.StateDiff + keys = state.Witness().Keys() + ) + if chain.Config().IsPrague(header.Number, header.Time) && chain.Config().ProofInBlocks { + // Open the pre-tree to prove the pre-state against + parent := chain.GetHeaderByNumber(header.Number.Uint64() - 1) + if parent == nil { + return nil, fmt.Errorf("nil parent header for block %d", header.Number) + } + + preTrie, err := state.Database().OpenTrie(parent.Root) + if err != nil { + return nil, fmt.Errorf("error opening pre-state tree root: %w", err) + } + + var okpre, okpost bool + var vtrpre, vtrpost *trie.VerkleTrie + switch pre := preTrie.(type) { + case *trie.VerkleTrie: + vtrpre, okpre = preTrie.(*trie.VerkleTrie) + vtrpost, okpost = state.GetTrie().(*trie.VerkleTrie) + case *trie.TransitionTrie: + vtrpre = pre.Overlay() + okpre = true + post, _ := state.GetTrie().(*trie.TransitionTrie) + vtrpost = post.Overlay() + okpost = true + default: + panic("invalid tree type") + } + if okpre && okpost { + // TODO: see if this can be captured at the witness + // level, like it used to. + for _, key := range keys { + // WORKAROUND: the post trie would normally not + // need to be searched for keys, as all of them + // were resolved during block execution. + // But since the prefetcher isn't currently used + // with verkle, the values that are read but not + // written to, are not resolved as they are read + // straight from the snapshot. They must be read + // in order to build the proof. + _, err = vtrpost.GetWithHashedKey(key) + if err != nil { + panic(err) + } + } + + if len(keys) > 0 { + p, k, err = trie.ProveAndSerialize(vtrpre, vtrpost, keys, vtrpre.FlatdbNodeResolver) + if err != nil { + return nil, fmt.Errorf("error generating verkle proof for block %d: %w", header.Number, err) + } + } + } + } + // Assemble and return the final block. - return types.NewBlockWithWithdrawals(header, txs, uncles, receipts, withdrawals, trie.NewStackTrie(nil)), nil + block := types.NewBlockWithWithdrawals(header, txs, uncles, receipts, withdrawals, trie.NewStackTrie(nil)) + if chain.Config().IsPrague(header.Number, header.Time) && chain.Config().ProofInBlocks { + block.SetVerkleProof(p, k) + } + return block, nil } // Seal generates a new sealing request for the given input block and pushes diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index 8eb9863da1e2..44aec25c1216 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -33,6 +33,8 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/utils" + "github.com/holiman/uint256" "golang.org/x/crypto/sha3" ) @@ -564,10 +566,21 @@ func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header r.Sub(r, header.Number) r.Mul(r, blockReward) r.Div(r, big8) + + // This should not happen, but it's useful for replay tests + if config.IsPrague(header.Number, header.Time) { + state.Witness().TouchAddressOnReadAndComputeGas(uncle.Coinbase.Bytes(), uint256.Int{}, utils.BalanceLeafKey) + } state.AddBalance(uncle.Coinbase, r) r.Div(blockReward, big32) reward.Add(reward, r) } + if config.IsPrague(header.Number, header.Time) { + state.Witness().TouchAddressOnReadAndComputeGas(header.Coinbase.Bytes(), uint256.Int{}, utils.BalanceLeafKey) + state.Witness().TouchAddressOnReadAndComputeGas(header.Coinbase.Bytes(), uint256.Int{}, utils.VersionLeafKey) + state.Witness().TouchAddressOnReadAndComputeGas(header.Coinbase.Bytes(), uint256.Int{}, utils.NonceLeafKey) + state.Witness().TouchAddressOnReadAndComputeGas(header.Coinbase.Bytes(), uint256.Int{}, utils.CodeKeccakLeafKey) + } state.AddBalance(header.Coinbase, reward) } diff --git a/core/block_validator.go b/core/block_validator.go index 3c9ac3dc49d5..b1ceab9d5c6c 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -102,6 +102,7 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error { if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) { return consensus.ErrUnknownAncestor } + fmt.Println("failure here") return consensus.ErrPrunedAncestor } return nil diff --git a/core/blockchain.go b/core/blockchain.go index 3952c31b688f..27a74de822bf 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -18,11 +18,15 @@ package core import ( + "bufio" "errors" "fmt" "io" + "math" "math/big" + "os" "runtime" + "strconv" "strings" "sync" "sync/atomic" @@ -305,6 +309,12 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis } // Make sure the state associated with the block is available head := bc.CurrentBlock() + + // Declare the end of the verkle transition if need be + if bc.chainConfig.Rules(head.Number, false /* XXX */, head.Time).IsPrague { + bc.stateCache.EndVerkleTransition() + } + if !bc.HasState(head.Root) { // Head state is missing, before the state recovery, find out the // disk layer point of snapshot(if it's enabled). Make sure the @@ -401,6 +411,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis Recovery: recover, NoBuild: bc.cacheConfig.SnapshotNoBuild, AsyncBuild: !bc.cacheConfig.SnapshotWait, + Verkle: chainConfig.IsPrague(head.Number, head.Time), } bc.snaps, _ = snapshot.New(snapconfig, bc.db, bc.triedb, head.Root) } @@ -1336,6 +1347,7 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. if err := blockBatch.Write(); err != nil { log.Crit("Failed to write block into disk", "err", err) } + state.Database().TrieDB().WritePreimages() // Commit all cached state changes into underlying memory database. root, err := state.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number())) if err != nil { @@ -1508,6 +1520,30 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) { return bc.insertChain(chain, true) } +func findVerkleConversionBlock() (uint64, error) { + if _, err := os.Stat("conversion.txt"); os.IsNotExist(err) { + return math.MaxUint64, nil + } + + f, err := os.Open("conversion.txt") + if err != nil { + log.Error("Failed to open conversion.txt", "err", err) + return 0, err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + scanner.Scan() + conversionBlock, err := strconv.ParseUint(scanner.Text(), 10, 64) + if err != nil { + log.Error("Failed to parse conversionBlock", "err", err) + return 0, err + } + log.Info("Found conversion block info", "conversionBlock", conversionBlock) + + return conversionBlock, nil +} + // insertChain is the internal implementation of InsertChain, which assumes that // 1) chains are contiguous, and 2) The chain mutex is held. // @@ -1522,6 +1558,11 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) return 0, nil } + conversionBlock, err := findVerkleConversionBlock() + if err != nil { + return 0, err + } + // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss) SenderCacher.RecoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number(), chain[0].Time()), chain) @@ -1703,6 +1744,11 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error) if parent == nil { parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1) } + + if parent.Number.Uint64() == conversionBlock { + bc.StartVerkleTransition(parent.Root, emptyVerkleRoot, bc.Config(), &parent.Time) + bc.stateCache.SetLastMerkleRoot(parent.Root) + } statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps) if err != nil { return it.index, err @@ -2332,6 +2378,8 @@ func (bc *BlockChain) skipBlock(err error, it *insertIterator) bool { return false } +var emptyVerkleRoot common.Hash + // indexBlocks reindexes or unindexes transactions depending on user configuration func (bc *BlockChain) indexBlocks(tail *uint64, head uint64, done chan struct{}) { defer func() { close(done) }() @@ -2483,3 +2531,18 @@ func (bc *BlockChain) SetTrieFlushInterval(interval time.Duration) { func (bc *BlockChain) GetTrieFlushInterval() time.Duration { return time.Duration(bc.flushInterval.Load()) } + +func (bc *BlockChain) StartVerkleTransition(originalRoot, translatedRoot common.Hash, chainConfig *params.ChainConfig, pragueTime *uint64) { + bc.stateCache.StartVerkleTransition(originalRoot, translatedRoot, chainConfig, pragueTime) +} +func (bc *BlockChain) ReorgThroughVerkleTransition() { + bc.stateCache.ReorgThroughVerkleTransition() +} + +func (bc *BlockChain) EndVerkleTransition() { + bc.stateCache.EndVerkleTransition() +} + +func (bc *BlockChain) AddRootTranslation(originalRoot, translatedRoot common.Hash) { + bc.stateCache.AddRootTranslation(originalRoot, translatedRoot) +} diff --git a/core/chain_makers.go b/core/chain_makers.go index 4e8e80b92b56..5d8ade8a0fb0 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -26,11 +26,13 @@ import ( "github.com/ethereum/go-ethereum/consensus/misc/eip1559" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" + "github.com/gballet/go-verkle" ) // BlockGen creates blocks for testing. @@ -355,10 +357,90 @@ func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, if err != nil { panic(err) } + if genesis.Config != nil && genesis.Config.IsPrague(genesis.ToBlock().Number(), genesis.ToBlock().Time()) { + blocks, receipts, _, _ := GenerateVerkleChain(genesis.Config, genesis.ToBlock(), engine, db, n, gen) + return db, blocks, receipts + } blocks, receipts := GenerateChain(genesis.Config, genesis.ToBlock(), engine, db, n, gen) return db, blocks, receipts } +func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine consensus.Engine, db ethdb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts, []*verkle.VerkleProof, []verkle.StateDiff) { + if config == nil { + config = params.TestChainConfig + } + proofs := make([]*verkle.VerkleProof, 0, n) + keyvals := make([]verkle.StateDiff, 0, n) + blocks, receipts := make(types.Blocks, n), make([]types.Receipts, n) + chainreader := &generatedLinearChainReader{ + config: config, + // GenerateVerkleChain should only be called with the genesis block + // as parent. + genesis: parent, + chain: blocks, + } + genblock := func(i int, parent *types.Block, statedb *state.StateDB) (*types.Block, types.Receipts) { + b := &BlockGen{i: i, chain: blocks, parent: parent, statedb: statedb, config: config, engine: engine} + b.header = makeHeader(chainreader, parent, statedb, b.engine) + preState := statedb.Copy() + fmt.Println("prestate", preState.GetTrie().(*trie.VerkleTrie).ToDot()) + + // Mutate the state and block according to any hard-fork specs + if daoBlock := config.DAOForkBlock; daoBlock != nil { + limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange) + if b.header.Number.Cmp(daoBlock) >= 0 && b.header.Number.Cmp(limit) < 0 { + if config.DAOForkSupport { + b.header.Extra = common.CopyBytes(params.DAOForkBlockExtra) + } + } + } + if config.DAOForkSupport && config.DAOForkBlock != nil && config.DAOForkBlock.Cmp(b.header.Number) == 0 { + misc.ApplyDAOHardFork(statedb) + } + // Execute any user modifications to the block + if gen != nil { + gen(i, b) + } + if b.engine != nil { + // Finalize and seal the block + block, err := b.engine.FinalizeAndAssemble(chainreader, b.header, statedb, b.txs, b.uncles, b.receipts, b.withdrawals) + if err != nil { + panic(err) + } + + // Write state changes to db + root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number)) + if err != nil { + panic(fmt.Sprintf("state write error: %v", err)) + } + if err := statedb.Database().TrieDB().Commit(root, false); err != nil { + panic(fmt.Sprintf("trie write error: %v", err)) + } + + proofs = append(proofs, block.ExecutionWitness().VerkleProof) + keyvals = append(keyvals, block.ExecutionWitness().StateDiff) + + return block, b.receipts + } + return nil, nil + } + var snaps *snapshot.Tree + for i := 0; i < n; i++ { + triedb := state.NewDatabaseWithConfig(db, nil) + triedb.EndVerkleTransition() + statedb, err := state.New(parent.Root(), triedb, snaps) + if err != nil { + panic(fmt.Sprintf("could not find state for block %d: err=%v, parent root=%x", i, err, parent.Root())) + } + block, receipt := genblock(i, parent, statedb) + blocks[i] = block + receipts[i] = receipt + parent = block + snaps = statedb.Snaps() + } + return blocks, receipts, proofs, keyvals +} + func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.StateDB, engine consensus.Engine) *types.Header { var time uint64 if parent.Time() == 0 { @@ -441,3 +523,59 @@ func (cr *fakeChainReader) GetHeaderByHash(hash common.Hash) *types.Header func (cr *fakeChainReader) GetHeader(hash common.Hash, number uint64) *types.Header { return nil } func (cr *fakeChainReader) GetBlock(hash common.Hash, number uint64) *types.Block { return nil } func (cr *fakeChainReader) GetTd(hash common.Hash, number uint64) *big.Int { return nil } + +type generatedLinearChainReader struct { + config *params.ChainConfig + genesis *types.Block + chain []*types.Block +} + +func (v *generatedLinearChainReader) Config() *params.ChainConfig { + return v.config +} + +func (v *generatedLinearChainReader) CurrentHeader() *types.Header { + return nil +} + +func (v *generatedLinearChainReader) GetHeader(_ common.Hash, number uint64) *types.Header { + if number == 0 { + return v.genesis.Header() + } + return v.chain[number-1].Header() +} + +func (v *generatedLinearChainReader) GetHeaderByNumber(number uint64) *types.Header { + if number == 0 { + return v.genesis.Header() + } + return v.chain[number-1].Header() +} + +func (v *generatedLinearChainReader) GetHeaderByHash(hash common.Hash) *types.Header { + if hash == v.genesis.Hash() { + return v.genesis.Header() + } + + for _, block := range v.chain { + if block.Hash() == hash { + return block.Header() + } + } + + return nil +} + +func (v *generatedLinearChainReader) GetBlock(_ common.Hash, number uint64) *types.Block { + if number == 0 { + return v.genesis + } + return v.chain[number-1] +} + +func (v *generatedLinearChainReader) GetTd(_ common.Hash, number uint64) *big.Int { + if number == 0 { + return v.genesis.Difficulty() + } + return v.chain[number-1].Difficulty() +} diff --git a/core/error.go b/core/error.go index 4214ed207a91..2d9fa5463ce7 100644 --- a/core/error.go +++ b/core/error.go @@ -67,6 +67,11 @@ var ( // than init code size limit. ErrMaxInitCodeSizeExceeded = errors.New("max initcode size exceeded") + // ErrInsufficientBalanceWitness is returned if the transaction sender has enough + // funds to cover the transfer, but not enough to pay for witness access/modification + // costs for the transaction + ErrInsufficientBalanceWitness = errors.New("insufficient funds to cover witness access costs for transaction") + // ErrInsufficientFunds is returned if the total cost of executing a transaction // is higher than the balance of the user's account. ErrInsufficientFunds = errors.New("insufficient funds for gas * price + value") diff --git a/core/genesis.go b/core/genesis.go index 33716d0b61df..74294afb7272 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -121,10 +121,13 @@ func (ga *GenesisAlloc) UnmarshalJSON(data []byte) error { } // deriveHash computes the state root according to the genesis specification. -func (ga *GenesisAlloc) deriveHash() (common.Hash, error) { +func (ga *GenesisAlloc) deriveHash(cfg *params.ChainConfig, timestamp uint64) (common.Hash, error) { // Create an ephemeral in-memory database for computing hash, // all the derived states will be discarded to not pollute disk. db := state.NewDatabase(rawdb.NewMemoryDatabase()) + if cfg.IsPrague(big.NewInt(int64(0)), timestamp) { + db.EndVerkleTransition() + } statedb, err := state.New(types.EmptyRootHash, db, nil) if err != nil { return common.Hash{}, err @@ -143,11 +146,17 @@ func (ga *GenesisAlloc) deriveHash() (common.Hash, error) { // flush is very similar with deriveHash, but the main difference is // all the generated states will be persisted into the given database. // Also, the genesis state specification will be flushed as well. -func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhash common.Hash) error { +func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhash common.Hash, cfg *params.ChainConfig) error { statedb, err := state.New(types.EmptyRootHash, state.NewDatabaseWithNodeDB(db, triedb), nil) if err != nil { return err } + + // End the verkle conversion at genesis if the fork block is 0 + if triedb.IsVerkle() { + statedb.Database().EndVerkleTransition() + } + for addr, account := range *ga { statedb.AddBalance(addr, account.Balance) statedb.SetCode(addr, account.Code) @@ -171,6 +180,7 @@ func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhas if err != nil { return err } + rawdb.WriteGenesisStateSpec(db, blockhash, blob) return nil } @@ -179,11 +189,16 @@ func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhas // hash and commits it into the provided trie database. func CommitGenesisState(db ethdb.Database, triedb *trie.Database, blockhash common.Hash) error { var alloc GenesisAlloc + var config *params.ChainConfig blob := rawdb.ReadGenesisStateSpec(db, blockhash) if len(blob) != 0 { if err := alloc.UnmarshalJSON(blob); err != nil { return err } + config = rawdb.ReadChainConfig(db, blockhash) + if config == nil { + return errors.New("genesis config missing from db") + } } else { // Genesis allocation is missing and there are several possibilities: // the node is legacy which doesn't persist the genesis allocation or @@ -201,11 +216,12 @@ func CommitGenesisState(db ethdb.Database, triedb *trie.Database, blockhash comm } if genesis != nil { alloc = genesis.Alloc + config = genesis.Config } else { return errors.New("not found") } } - return alloc.flush(db, triedb, blockhash) + return alloc.flush(db, triedb, blockhash, config) } // GenesisAccount is an account in the state of the genesis block. @@ -273,7 +289,7 @@ func (e *GenesisMismatchError) Error() string { // ChainOverrides contains the changes to chain config. type ChainOverrides struct { OverrideCancun *uint64 - OverrideVerkle *uint64 + OverridePrague *uint64 } // SetupGenesisBlock writes or updates the genesis block in db. @@ -302,8 +318,8 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen if overrides != nil && overrides.OverrideCancun != nil { config.CancunTime = overrides.OverrideCancun } - if overrides != nil && overrides.OverrideVerkle != nil { - config.VerkleTime = overrides.OverrideVerkle + if overrides != nil && overrides.OverridePrague != nil { + config.PragueTime = overrides.OverridePrague } } } @@ -316,34 +332,35 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen } else { log.Info("Writing custom genesis block") } + applyOverrides(genesis.Config) block, err := genesis.Commit(db, triedb) if err != nil { return genesis.Config, common.Hash{}, err } - applyOverrides(genesis.Config) return genesis.Config, block.Hash(), nil } // We have the genesis block in database(perhaps in ancient database) // but the corresponding state is missing. - header := rawdb.ReadHeader(db, stored, 0) - if header.Root != types.EmptyRootHash && !rawdb.HasLegacyTrieNode(db, header.Root) { - if genesis == nil { - genesis = DefaultGenesisBlock() - } - // Ensure the stored genesis matches with the given one. - hash := genesis.ToBlock().Hash() - if hash != stored { - return genesis.Config, hash, &GenesisMismatchError{stored, hash} - } - block, err := genesis.Commit(db, triedb) - if err != nil { - return genesis.Config, hash, err - } - applyOverrides(genesis.Config) - return genesis.Config, block.Hash(), nil - } + // header := rawdb.ReadHeader(db, stored, 0) + // if header.Root != types.EmptyRootHash && !rawdb.HasLegacyTrieNode(db, header.Root) { + // if genesis == nil { + // genesis = DefaultGenesisBlock() + // } + // // Ensure the stored genesis matches with the given one. + // hash := genesis.ToBlock().Hash() + // if hash != stored { + // return genesis.Config, hash, &GenesisMismatchError{stored, hash} + // } + // block, err := genesis.Commit(db, triedb) + // if err != nil { + // return genesis.Config, hash, err + // } + // applyOverrides(genesis.Config) + // return genesis.Config, block.Hash(), nil + // } // Check whether the genesis block is already written. if genesis != nil { + applyOverrides(genesis.Config) hash := genesis.ToBlock().Hash() if hash != stored { return genesis.Config, hash, &GenesisMismatchError{stored, hash} @@ -352,9 +369,12 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, triedb *trie.Database, gen // Get the existing chain configuration. newcfg := genesis.configOrDefault(stored) applyOverrides(newcfg) - if err := newcfg.CheckConfigForkOrder(); err != nil { - return newcfg, common.Hash{}, err - } + // WORKAROUND it looks like this is broken, because overriding + // pragueTime will cause an error here, claiming that shanghaiTime + // wasn't set (it is). + // if err := newcfg.CheckConfigForkOrder(); err != nil { + // return newcfg, common.Hash{}, err + // } storedcfg := rawdb.ReadChainConfig(db, stored) if storedcfg == nil { log.Warn("Found genesis block without chain config") @@ -438,7 +458,7 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig { // ToBlock returns the genesis block according to genesis specification. func (g *Genesis) ToBlock() *types.Block { - root, err := g.Alloc.deriveHash() + root, err := g.Alloc.deriveHash(g.Config, g.Timestamp) if err != nil { panic(err) } @@ -510,7 +530,7 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *trie.Database) (*types.Block // All the checks has passed, flush the states derived from the genesis // specification as well as the specification itself into the provided // database. - if err := g.Alloc.flush(db, triedb, block.Hash()); err != nil { + if err := g.Alloc.flush(db, triedb, block.Hash(), g.Config); err != nil { return nil, err } rawdb.WriteTd(db, block.Hash(), block.NumberU64(), block.Difficulty()) @@ -529,7 +549,8 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *trie.Database) (*types.Block // Note the state changes will be committed in hash-based scheme, use Commit // if path-scheme is preferred. func (g *Genesis) MustCommit(db ethdb.Database) *types.Block { - block, err := g.Commit(db, trie.NewDatabase(db)) + triedb := trie.NewDatabaseWithConfig(db, &trie.Config{Verkle: g.Config != nil && g.Config.IsPrague(big.NewInt(int64(g.Number)), g.Timestamp)}) + block, err := g.Commit(db, triedb) if err != nil { panic(err) } diff --git a/core/genesis_test.go b/core/genesis_test.go index 723d1e476bf1..77a214ebe785 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -219,7 +219,7 @@ func TestReadWriteGenesisAlloc(t *testing.T) { {1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}}, {2}: {Balance: big.NewInt(2), Storage: map[common.Hash]common.Hash{{2}: {2}}}, } - hash, _ = alloc.deriveHash() + hash, _ = alloc.deriveHash(¶ms.ChainConfig{}, 0) ) blob, _ := json.Marshal(alloc) rawdb.WriteGenesisStateSpec(db, hash, blob) diff --git a/core/overlay_transition.go b/core/overlay_transition.go new file mode 100644 index 000000000000..35c09d22d938 --- /dev/null +++ b/core/overlay_transition.go @@ -0,0 +1,245 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package core + +import ( + "bufio" + "bytes" + "fmt" + "io" + "os" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" +) + +// OverlayVerkleTransition contains the overlay conversion logic +func OverlayVerkleTransition(statedb *state.StateDB) error { + migrdb := statedb.Database() + + // verkle transition: if the conversion process is in progress, move + // N values from the MPT into the verkle tree. + if migrdb.InTransition() { + var ( + now = time.Now() + tt = statedb.GetTrie().(*trie.TransitionTrie) + mpt = tt.Base() + vkt = tt.Overlay() + hasPreimagesBin = false + preimageSeek = migrdb.GetCurrentPreimageOffset() + fpreimages *bufio.Reader + ) + + // TODO: avoid opening the preimages file here and make it part of, potentially, statedb.Database(). + filePreimages, err := os.Open("preimages.bin") + if err != nil { + // fallback on reading the db + log.Warn("opening preimage file", "error", err) + } else { + defer filePreimages.Close() + if _, err := filePreimages.Seek(preimageSeek, io.SeekStart); err != nil { + return fmt.Errorf("seeking preimage file: %s", err) + } + fpreimages = bufio.NewReader(filePreimages) + hasPreimagesBin = true + } + + accIt, err := statedb.Snaps().AccountIterator(mpt.Hash(), migrdb.GetCurrentAccountHash()) + if err != nil { + return err + } + defer accIt.Release() + accIt.Next() + + // If we're about to start with the migration process, we have to read the first account hash preimage. + if migrdb.GetCurrentAccountAddress() == nil { + var addr common.Address + if hasPreimagesBin { + if _, err := io.ReadFull(fpreimages, addr[:]); err != nil { + return fmt.Errorf("reading preimage file: %s", err) + } + } else { + addr = common.BytesToAddress(rawdb.ReadPreimage(migrdb.DiskDB(), accIt.Hash())) + if len(addr) != 20 { + return fmt.Errorf("addr len is zero is not 32: %d", len(addr)) + } + } + migrdb.SetCurrentAccountAddress(addr) + if migrdb.GetCurrentAccountHash() != accIt.Hash() { + return fmt.Errorf("preimage file does not match account hash: %s != %s", crypto.Keccak256Hash(addr[:]), accIt.Hash()) + } + preimageSeek += int64(len(addr)) + } + + const maxMovedCount = 10000 + // mkv will be assiting in the collection of up to maxMovedCount key values to be migrated to the VKT. + // It has internal caches to do efficient MPT->VKT key calculations, which will be discarded after + // this function. + mkv := newKeyValueMigrator() + // move maxCount accounts into the verkle tree, starting with the + // slots from the previous account. + count := 0 + + // if less than maxCount slots were moved, move to the next account + for count < maxMovedCount { + acc, err := types.FullAccount(accIt.Account()) + if err != nil { + log.Error("Invalid account encountered during traversal", "error", err) + return err + } + vkt.SetStorageRootConversion(*migrdb.GetCurrentAccountAddress(), acc.Root) + + // Start with processing the storage, because once the account is + // converted, the `stateRoot` field loses its meaning. Which means + // that it opens the door to a situation in which the storage isn't + // converted, but it can not be found since the account was and so + // there is no way to find the MPT storage from the information found + // in the verkle account. + // Note that this issue can still occur if the account gets written + // to during normal block execution. A mitigation strategy has been + // introduced with the `*StorageRootConversion` fields in VerkleDB. + if acc.HasStorage() { + stIt, err := statedb.Snaps().StorageIterator(mpt.Hash(), accIt.Hash(), migrdb.GetCurrentSlotHash()) + if err != nil { + return err + } + stIt.Next() + + // fdb.StorageProcessed will be initialized to `true` if the + // entire storage for an account was not entirely processed + // by the previous block. This is used as a signal to resume + // processing the storage for that account where we left off. + // If the entire storage was processed, then the iterator was + // created in vain, but it's ok as this will not happen often. + for ; !migrdb.GetStorageProcessed() && count < maxMovedCount; count++ { + var ( + value []byte // slot value after RLP decoding + safeValue [32]byte // 32-byte aligned value + ) + if err := rlp.DecodeBytes(stIt.Slot(), &value); err != nil { + return fmt.Errorf("error decoding bytes %x: %w", stIt.Slot(), err) + } + copy(safeValue[32-len(value):], value) + + var slotnr []byte + if hasPreimagesBin { + var s [32]byte + slotnr = s[:] + if _, err := io.ReadFull(fpreimages, slotnr); err != nil { + return fmt.Errorf("reading preimage file: %s", err) + } + } else { + slotnr = rawdb.ReadPreimage(migrdb.DiskDB(), stIt.Hash()) + if len(slotnr) != 32 { + return fmt.Errorf("slotnr len is zero is not 32: %d", len(slotnr)) + } + } + if crypto.Keccak256Hash(slotnr[:]) != stIt.Hash() { + return fmt.Errorf("preimage file does not match storage hash: %s!=%s", crypto.Keccak256Hash(slotnr), stIt.Hash()) + } + preimageSeek += int64(len(slotnr)) + + mkv.addStorageSlot(migrdb.GetCurrentAccountAddress().Bytes(), slotnr, safeValue[:]) + + // advance the storage iterator + migrdb.SetStorageProcessed(!stIt.Next()) + if !migrdb.GetStorageProcessed() { + migrdb.SetCurrentSlotHash(stIt.Hash()) + } + } + stIt.Release() + } + + // If the maximum number of leaves hasn't been reached, then + // it means that the storage has finished processing (or none + // was available for this account) and that the account itself + // can be processed. + if count < maxMovedCount { + count++ // count increase for the account itself + + mkv.addAccount(migrdb.GetCurrentAccountAddress().Bytes(), acc) + vkt.ClearStrorageRootConversion(*migrdb.GetCurrentAccountAddress()) + + // Store the account code if present + if !bytes.Equal(acc.CodeHash, types.EmptyCodeHash[:]) { + code := rawdb.ReadCode(statedb.Database().DiskDB(), common.BytesToHash(acc.CodeHash)) + chunks := trie.ChunkifyCode(code) + + mkv.addAccountCode(migrdb.GetCurrentAccountAddress().Bytes(), uint64(len(code)), chunks) + } + + // reset storage iterator marker for next account + migrdb.SetStorageProcessed(false) + migrdb.SetCurrentSlotHash(common.Hash{}) + + // Move to the next account, if available - or end + // the transition otherwise. + if accIt.Next() { + var addr common.Address + if hasPreimagesBin { + if _, err := io.ReadFull(fpreimages, addr[:]); err != nil { + return fmt.Errorf("reading preimage file: %s", err) + } + } else { + addr = common.BytesToAddress(rawdb.ReadPreimage(migrdb.DiskDB(), accIt.Hash())) + if len(addr) != 20 { + return fmt.Errorf("account address len is zero is not 20: %d", len(addr)) + } + } + // fmt.Printf("account switch: %s != %s\n", crypto.Keccak256Hash(addr[:]), accIt.Hash()) + if crypto.Keccak256Hash(addr[:]) != accIt.Hash() { + return fmt.Errorf("preimage file does not match account hash: %s != %s", crypto.Keccak256Hash(addr[:]), accIt.Hash()) + } + preimageSeek += int64(len(addr)) + migrdb.SetCurrentAccountAddress(addr) + } else { + // case when the account iterator has + // reached the end but count < maxCount + migrdb.EndVerkleTransition() + break + } + } + } + migrdb.SetCurrentPreimageOffset(preimageSeek) + + log.Info("Collected key values from base tree", "count", count, "duration", time.Since(now), "last account", statedb.Database().GetCurrentAccountHash()) + + // Take all the collected key-values and prepare the new leaf values. + // This fires a background routine that will start doing the work that + // migrateCollectedKeyValues() will use to insert into the tree. + // + // TODO: Now both prepare() and migrateCollectedKeyValues() are next to each other, but + // after we fix an existing bug, we can call prepare() before the block execution and + // let it do the work in the background. After the block execution and finalization + // finish, we can call migrateCollectedKeyValues() which should already find everything ready. + mkv.prepare() + now = time.Now() + if err := mkv.migrateCollectedKeyValues(tt.Overlay()); err != nil { + return fmt.Errorf("could not migrate key values: %w", err) + } + log.Info("Inserted key values in overlay tree", "count", count, "duration", time.Since(now)) + } + + return nil +} diff --git a/core/state/access_witness.go b/core/state/access_witness.go new file mode 100644 index 000000000000..8b03cf371a60 --- /dev/null +++ b/core/state/access_witness.go @@ -0,0 +1,254 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package state + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie/utils" + "github.com/holiman/uint256" +) + +// mode specifies how a tree location has been accessed +// for the byte value: +// * the first bit is set if the branch has been edited +// * the second bit is set if the branch has been read +type mode byte + +const ( + AccessWitnessReadFlag = mode(1) + AccessWitnessWriteFlag = mode(2) +) + +var zeroTreeIndex uint256.Int + +// AccessWitness lists the locations of the state that are being accessed +// during the production of a block. +type AccessWitness struct { + branches map[branchAccessKey]mode + chunks map[chunkAccessKey]mode + + pointCache *utils.PointCache +} + +func NewAccessWitness(pointCache *utils.PointCache) *AccessWitness { + return &AccessWitness{ + branches: make(map[branchAccessKey]mode), + chunks: make(map[chunkAccessKey]mode), + pointCache: pointCache, + } +} + +// Merge is used to merge the witness that got generated during the execution +// of a tx, with the accumulation of witnesses that were generated during the +// execution of all the txs preceding this one in a given block. +func (aw *AccessWitness) Merge(other *AccessWitness) { + for k := range other.branches { + aw.branches[k] |= other.branches[k] + } + for k, chunk := range other.chunks { + aw.chunks[k] |= chunk + } +} + +// Key returns, predictably, the list of keys that were touched during the +// buildup of the access witness. +func (aw *AccessWitness) Keys() [][]byte { + // TODO: consider if parallelizing this is worth it, probably depending on len(aw.chunks). + keys := make([][]byte, 0, len(aw.chunks)) + for chunk := range aw.chunks { + basePoint := aw.pointCache.GetTreeKeyHeader(chunk.addr[:]) + key := utils.GetTreeKeyWithEvaluatedAddess(basePoint, &chunk.treeIndex, chunk.leafKey) + keys = append(keys, key) + } + return keys +} + +func (aw *AccessWitness) Copy() *AccessWitness { + naw := &AccessWitness{ + branches: make(map[branchAccessKey]mode), + chunks: make(map[chunkAccessKey]mode), + pointCache: aw.pointCache, + } + naw.Merge(aw) + return naw +} + +func (aw *AccessWitness) TouchAndChargeProofOfAbsence(addr []byte) uint64 { + var gas uint64 + gas += aw.TouchAddressOnReadAndComputeGas(addr, zeroTreeIndex, utils.VersionLeafKey) + gas += aw.TouchAddressOnReadAndComputeGas(addr, zeroTreeIndex, utils.BalanceLeafKey) + gas += aw.TouchAddressOnReadAndComputeGas(addr, zeroTreeIndex, utils.CodeSizeLeafKey) + gas += aw.TouchAddressOnReadAndComputeGas(addr, zeroTreeIndex, utils.CodeKeccakLeafKey) + gas += aw.TouchAddressOnReadAndComputeGas(addr, zeroTreeIndex, utils.NonceLeafKey) + return gas +} + +func (aw *AccessWitness) TouchAndChargeMessageCall(addr []byte) uint64 { + var gas uint64 + gas += aw.TouchAddressOnReadAndComputeGas(addr, zeroTreeIndex, utils.VersionLeafKey) + gas += aw.TouchAddressOnReadAndComputeGas(addr, zeroTreeIndex, utils.CodeSizeLeafKey) + return gas +} + +func (aw *AccessWitness) TouchAndChargeValueTransfer(callerAddr, targetAddr []byte) uint64 { + var gas uint64 + gas += aw.TouchAddressOnWriteAndComputeGas(callerAddr, zeroTreeIndex, utils.BalanceLeafKey) + gas += aw.TouchAddressOnWriteAndComputeGas(targetAddr, zeroTreeIndex, utils.BalanceLeafKey) + return gas +} + +// TouchAndChargeContractCreateInit charges access costs to initiate +// a contract creation +func (aw *AccessWitness) TouchAndChargeContractCreateInit(addr []byte, createSendsValue bool) uint64 { + var gas uint64 + gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.VersionLeafKey) + gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.NonceLeafKey) + gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.CodeKeccakLeafKey) + if createSendsValue { + gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.BalanceLeafKey) + } + return gas +} + +// TouchAndChargeContractCreateCompleted charges access access costs after +// the completion of a contract creation to populate the created account in +// the tree +func (aw *AccessWitness) TouchAndChargeContractCreateCompleted(addr []byte) uint64 { + var gas uint64 + gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.VersionLeafKey) + gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.BalanceLeafKey) + gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.CodeSizeLeafKey) + gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.CodeKeccakLeafKey) + gas += aw.TouchAddressOnWriteAndComputeGas(addr, zeroTreeIndex, utils.NonceLeafKey) + return gas +} + +func (aw *AccessWitness) TouchTxOriginAndComputeGas(originAddr []byte) uint64 { + var gas uint64 + gas += aw.TouchAddressOnReadAndComputeGas(originAddr, zeroTreeIndex, utils.VersionLeafKey) + gas += aw.TouchAddressOnReadAndComputeGas(originAddr, zeroTreeIndex, utils.CodeSizeLeafKey) + gas += aw.TouchAddressOnReadAndComputeGas(originAddr, zeroTreeIndex, utils.CodeKeccakLeafKey) + gas += aw.TouchAddressOnWriteAndComputeGas(originAddr, zeroTreeIndex, utils.NonceLeafKey) + gas += aw.TouchAddressOnWriteAndComputeGas(originAddr, zeroTreeIndex, utils.BalanceLeafKey) + return gas +} + +func (aw *AccessWitness) TouchTxExistingAndComputeGas(targetAddr []byte, sendsValue bool) uint64 { + var gas uint64 + gas += aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.VersionLeafKey) + gas += aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.CodeSizeLeafKey) + gas += aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.CodeKeccakLeafKey) + gas += aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.NonceLeafKey) + if sendsValue { + gas += aw.TouchAddressOnWriteAndComputeGas(targetAddr, zeroTreeIndex, utils.BalanceLeafKey) + } else { + gas += aw.TouchAddressOnReadAndComputeGas(targetAddr, zeroTreeIndex, utils.BalanceLeafKey) + } + return gas +} + +func (aw *AccessWitness) TouchAddressOnWriteAndComputeGas(addr []byte, treeIndex uint256.Int, subIndex byte) uint64 { + return aw.touchAddressAndChargeGas(addr, treeIndex, subIndex, true) +} + +func (aw *AccessWitness) TouchAddressOnReadAndComputeGas(addr []byte, treeIndex uint256.Int, subIndex byte) uint64 { + return aw.touchAddressAndChargeGas(addr, treeIndex, subIndex, false) +} + +func (aw *AccessWitness) touchAddressAndChargeGas(addr []byte, treeIndex uint256.Int, subIndex byte, isWrite bool) uint64 { + stemRead, selectorRead, stemWrite, selectorWrite, selectorFill := aw.touchAddress(addr, treeIndex, subIndex, isWrite) + + var gas uint64 + if stemRead { + gas += params.WitnessBranchReadCost + } + if selectorRead { + gas += params.WitnessChunkReadCost + } + if stemWrite { + gas += params.WitnessBranchWriteCost + } + if selectorWrite { + gas += params.WitnessChunkWriteCost + } + if selectorFill { + gas += params.WitnessChunkFillCost + } + + return gas +} + +// touchAddress adds any missing access event to the witness. +func (aw *AccessWitness) touchAddress(addr []byte, treeIndex uint256.Int, subIndex byte, isWrite bool) (bool, bool, bool, bool, bool) { + branchKey := newBranchAccessKey(addr, treeIndex) + chunkKey := newChunkAccessKey(branchKey, subIndex) + + // Read access. + var branchRead, chunkRead bool + if _, hasStem := aw.branches[branchKey]; !hasStem { + branchRead = true + aw.branches[branchKey] = AccessWitnessReadFlag + } + if _, hasSelector := aw.chunks[chunkKey]; !hasSelector { + chunkRead = true + aw.chunks[chunkKey] = AccessWitnessReadFlag + } + + // Write access. + var branchWrite, chunkWrite, chunkFill bool + if isWrite { + if (aw.branches[branchKey] & AccessWitnessWriteFlag) == 0 { + branchWrite = true + aw.branches[branchKey] |= AccessWitnessWriteFlag + } + + chunkValue := aw.chunks[chunkKey] + if (chunkValue & AccessWitnessWriteFlag) == 0 { + chunkWrite = true + aw.chunks[chunkKey] |= AccessWitnessWriteFlag + } + + // TODO: charge chunk filling costs if the leaf was previously empty in the state + } + + return branchRead, chunkRead, branchWrite, chunkWrite, chunkFill +} + +type branchAccessKey struct { + addr common.Address + treeIndex uint256.Int +} + +func newBranchAccessKey(addr []byte, treeIndex uint256.Int) branchAccessKey { + var sk branchAccessKey + copy(sk.addr[:], addr) + sk.treeIndex = treeIndex + return sk +} + +type chunkAccessKey struct { + branchAccessKey + leafKey byte +} + +func newChunkAccessKey(branchKey branchAccessKey, leafKey byte) chunkAccessKey { + var lk chunkAccessKey + lk.branchAccessKey = branchKey + lk.leafKey = leafKey + return lk +} diff --git a/core/state/database.go b/core/state/database.go index a3b6322ae313..af8cc1a36e30 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -26,8 +26,11 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/ethereum/go-ethereum/trie/utils" + "github.com/gballet/go-verkle" ) const ( @@ -44,7 +47,7 @@ type Database interface { OpenTrie(root common.Hash) (Trie, error) // OpenStorageTrie opens the storage trie of an account. - OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash) (Trie, error) + OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, main Trie) (Trie, error) // CopyTrie returns an independent copy of the given trie. CopyTrie(Trie) Trie @@ -60,6 +63,38 @@ type Database interface { // TrieDB retrieves the low level trie database used for data storage. TrieDB() *trie.Database + + StartVerkleTransition(originalRoot, translatedRoot common.Hash, chainConfig *params.ChainConfig, cancunTime *uint64) + + ReorgThroughVerkleTransition() + + EndVerkleTransition() + + InTransition() bool + + Transitioned() bool + + SetCurrentSlotHash(hash common.Hash) + + GetCurrentAccountAddress() *common.Address + + SetCurrentAccountAddress(common.Address) + + GetCurrentAccountHash() common.Hash + + GetCurrentSlotHash() common.Hash + + SetStorageProcessed(bool) + + GetStorageProcessed() bool + + GetCurrentPreimageOffset() int64 + + SetCurrentPreimageOffset(int64) + + AddRootTranslation(originalRoot, translatedRoot common.Hash) + + SetLastMerkleRoot(root common.Hash) } // Trie is a Ethereum Merkle Patricia trie. @@ -130,6 +165,9 @@ type Trie interface { // nodes of the longest existing prefix of the key (at least the root), ending // with the node that proves the absence of the key. Prove(key []byte, proofDb ethdb.KeyValueWriter) error + + // IsVerkle returns true if the trie is verkle-tree based + IsVerkle() bool } // NewDatabase creates a backing store for state. The returned database is safe for @@ -148,6 +186,7 @@ func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database { codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), triedb: trie.NewDatabaseWithConfig(db, config), + addrToPoint: utils.NewPointCache(), } } @@ -158,7 +197,56 @@ func NewDatabaseWithNodeDB(db ethdb.Database, triedb *trie.Database) Database { codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), triedb: triedb, + addrToPoint: utils.NewPointCache(), + ended: triedb.IsVerkle(), + } +} + +func (db *cachingDB) InTransition() bool { + return db.started && !db.ended +} + +func (db *cachingDB) Transitioned() bool { + return db.ended +} + +// Fork implements the fork +func (db *cachingDB) StartVerkleTransition(originalRoot, translatedRoot common.Hash, chainConfig *params.ChainConfig, pragueTime *uint64) { + fmt.Println(` + __________.__ .__ .__ __ .__ .__ ____ + \__ ___| |__ ____ ____ | | ____ ______ | |__ _____ _____/ |_ | |__ _____ ______ __ _ _|__| ____ / ___\ ______ + | | | | \_/ __ \ _/ __ \| | _/ __ \\____ \| | \\__ \ / \ __\ | | \\__ \ / ___/ \ \/ \/ | |/ \ / /_/ / ___/ + | | | Y \ ___/ \ ___/| |_\ ___/| |_> | Y \/ __ \| | | | | Y \/ __ \_\___ \ \ /| | | \\___ /\___ \ + |____| |___| /\___ \___ |____/\___ | __/|___| (____ |___| |__| |___| (____ /_____/ \/\_/ |__|___| /_____//_____/ + |__|`) + db.started = true + db.ended = false + // db.AddTranslation(originalRoot, translatedRoot) + db.baseRoot = originalRoot + // initialize so that the first storage-less accounts are processed + db.StorageProcessed = true + if pragueTime != nil { + chainConfig.PragueTime = pragueTime + } +} + +func (db *cachingDB) ReorgThroughVerkleTransition() { + db.ended, db.started = false, false +} + +func (db *cachingDB) EndVerkleTransition() { + if !db.started { + db.started = true } + + fmt.Println(` + __________.__ .__ .__ __ .__ .__ .___ .___ + \__ ___| |__ ____ ____ | | ____ ______ | |__ _____ _____/ |_ | |__ _____ ______ | | _____ ____ __| _/____ __| _/ + | | | | \_/ __ \ _/ __ \| | _/ __ \\____ \| | \\__ \ / \ __\ | | \\__ \ / ___/ | | \__ \ / \ / __ _/ __ \ / __ | + | | | Y \ ___/ \ ___/| |_\ ___/| |_> | Y \/ __ \| | | | | Y \/ __ \_\___ \ | |__/ __ \| | / /_/ \ ___// /_/ | + |____| |___| /\___ \___ |____/\___ | __/|___| (____ |___| |__| |___| (____ /_____/ |____(____ |___| \____ |\___ \____ | + |__|`) + db.ended = true } type cachingDB struct { @@ -166,10 +254,26 @@ type cachingDB struct { codeSizeCache *lru.Cache[common.Hash, int] codeCache *lru.SizeConstrainedCache[common.Hash, []byte] triedb *trie.Database + + // Verkle specific fields + // TODO ensure that this info is in the DB + started, ended bool + LastMerkleRoot common.Hash // root hash of the read-only base tree + + addrToPoint *utils.PointCache + + baseRoot common.Hash // hash of the read-only base tree + CurrentAccountAddress *common.Address // addresss of the last translated account + CurrentSlotHash common.Hash // hash of the last translated storage slot + CurrentPreimageOffset int64 // next byte to read from the preimage file + + // Mark whether the storage for an account has been processed. This is useful if the + // maximum number of leaves of the conversion is reached before the whole storage is + // processed. + StorageProcessed bool } -// OpenTrie opens the main account trie at a specific root hash. -func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { +func (db *cachingDB) openMPTTrie(root common.Hash) (Trie, error) { tr, err := trie.NewStateTrie(trie.StateTrieID(root), db.triedb) if err != nil { return nil, err @@ -177,8 +281,60 @@ func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { return tr, nil } -// OpenStorageTrie opens the storage trie of an account. -func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash) (Trie, error) { +func (db *cachingDB) openVKTrie(root common.Hash) (Trie, error) { + payload, err := db.DiskDB().Get(trie.FlatDBVerkleNodeKeyPrefix) + if err != nil { + return trie.NewVerkleTrie(verkle.New(), db.triedb, db.addrToPoint, db.ended), nil + } + + r, err := verkle.ParseNode(payload, 0) + if err != nil { + panic(err) + } + return trie.NewVerkleTrie(r, db.triedb, db.addrToPoint, db.ended), err +} + +// OpenTrie opens the main account trie at a specific root hash. +func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { + var ( + mpt Trie + err error + ) + + // TODO separate both cases when I can be certain that it won't + // find a Verkle trie where is expects a Transitoion trie. + if db.started || db.ended { + // NOTE this is a kaustinen-only change, it will break replay + vkt, err := db.openVKTrie(root) + if err != nil { + return nil, err + } + + // If the verkle conversion has ended, return a single + // verkle trie. + if db.ended { + return vkt, nil + } + + // Otherwise, return a transition trie, with a base MPT + // trie and an overlay, verkle trie. + mpt, err = db.openMPTTrie(db.baseRoot) + if err != nil { + return nil, err + } + + return trie.NewTransitionTree(mpt.(*trie.SecureTrie), vkt.(*trie.VerkleTrie), false), nil + } else { + mpt, err = db.openMPTTrie(root) + if err != nil { + return nil, err + } + } + + return mpt, nil +} + +func (db *cachingDB) openStorageMPTrie(stateRoot common.Hash, address common.Address, root common.Hash, _ Trie) (Trie, error) { tr, err := trie.NewStateTrie(trie.StorageTrieID(stateRoot, crypto.Keccak256Hash(address.Bytes()), root), db.triedb) if err != nil { return nil, err @@ -186,11 +342,54 @@ func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Addre return tr, nil } +// OpenStorageTrie opens the storage trie of an account +func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, self Trie) (Trie, error) { + // TODO this should only return a verkle tree + if db.ended { + mpt, err := db.openStorageMPTrie(types.EmptyRootHash, address, common.Hash{}, self) + if err != nil { + return nil, err + } + // Return a "storage trie" that is an adapter between the storge MPT + // and the unique verkle tree. + switch self := self.(type) { + case *trie.VerkleTrie: + return trie.NewTransitionTree(mpt.(*trie.StateTrie), self, true), nil + case *trie.TransitionTrie: + return trie.NewTransitionTree(mpt.(*trie.StateTrie), self.Overlay(), true), nil + default: + panic("unexpected trie type") + } + } + if db.started { + mpt, err := db.openStorageMPTrie(db.LastMerkleRoot, address, root, nil) + if err != nil { + return nil, err + } + // Return a "storage trie" that is an adapter between the storge MPT + // and the unique verkle tree. + switch self := self.(type) { + case *trie.VerkleTrie: + return trie.NewTransitionTree(mpt.(*trie.SecureTrie), self, true), nil + case *trie.TransitionTrie: + return trie.NewTransitionTree(mpt.(*trie.SecureTrie), self.Overlay(), true), nil + default: + panic("unexpected trie type") + } + } + mpt, err := db.openStorageMPTrie(stateRoot, address, root, nil) + return mpt, err +} + // CopyTrie returns an independent copy of the given trie. func (db *cachingDB) CopyTrie(t Trie) Trie { switch t := t.(type) { case *trie.StateTrie: return t.Copy() + case *trie.TransitionTrie: + return t.Copy() + case *trie.VerkleTrie: + return t.Copy() default: panic(fmt.Errorf("unknown trie type %T", t)) } @@ -246,3 +445,54 @@ func (db *cachingDB) DiskDB() ethdb.KeyValueStore { func (db *cachingDB) TrieDB() *trie.Database { return db.triedb } + +func (db *cachingDB) GetTreeKeyHeader(addr []byte) *verkle.Point { + return db.addrToPoint.GetTreeKeyHeader(addr) +} + +func (db *cachingDB) SetCurrentAccountAddress(addr common.Address) { + db.CurrentAccountAddress = &addr +} + +func (db *cachingDB) GetCurrentAccountHash() common.Hash { + var addrHash common.Hash + if db.CurrentAccountAddress != nil { + addrHash = crypto.Keccak256Hash(db.CurrentAccountAddress[:]) + } + return addrHash +} + +func (db *cachingDB) GetCurrentAccountAddress() *common.Address { + return db.CurrentAccountAddress +} + +func (db *cachingDB) GetCurrentPreimageOffset() int64 { + return db.CurrentPreimageOffset +} + +func (db *cachingDB) SetCurrentPreimageOffset(offset int64) { + db.CurrentPreimageOffset = offset +} + +func (db *cachingDB) SetCurrentSlotHash(hash common.Hash) { + db.CurrentSlotHash = hash +} + +func (db *cachingDB) GetCurrentSlotHash() common.Hash { + return db.CurrentSlotHash +} + +func (db *cachingDB) SetStorageProcessed(processed bool) { + db.StorageProcessed = processed +} + +func (db *cachingDB) GetStorageProcessed() bool { + return db.StorageProcessed +} + +func (db *cachingDB) AddRootTranslation(originalRoot, translatedRoot common.Hash) { +} + +func (db *cachingDB) SetLastMerkleRoot(root common.Hash) { + db.LastMerkleRoot = root +} diff --git a/core/state/iterator.go b/core/state/iterator.go index bb9af082061f..bf00fc0e7e1e 100644 --- a/core/state/iterator.go +++ b/core/state/iterator.go @@ -106,10 +106,11 @@ func (it *nodeIterator) step() error { it.state, it.stateIt = nil, nil return nil } - // If the state trie node is an internal entry, leave as is + // If the state trie node is an internal entry, leave as is. if !it.stateIt.Leaf() { return nil } + // Otherwise we've reached an account node, initiate data iteration var account types.StateAccount if err := rlp.Decode(bytes.NewReader(it.stateIt.LeafBlob()), &account); err != nil { @@ -123,7 +124,7 @@ func (it *nodeIterator) step() error { address := common.BytesToAddress(preimage) // Traverse the storage slots belong to the account - dataTrie, err := it.state.db.OpenStorageTrie(it.state.originalRoot, address, account.Root) + dataTrie, err := it.state.db.OpenStorageTrie(it.state.originalRoot, address, account.Root, nil) if err != nil { return err } diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index efc0fc26afdf..7880a8799753 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -154,6 +154,7 @@ type Config struct { Recovery bool // Indicator that the snapshots is in the recovery mode NoBuild bool // Indicator that the snapshots generation is disallowed AsyncBuild bool // The snapshot generation is allowed to be constructed asynchronously + Verkle bool // True if verkle trees are enabled } // Tree is an Ethereum state snapshot tree. It consists of one persistent base @@ -213,6 +214,20 @@ func New(config Config, diskdb ethdb.KeyValueStore, triedb *trie.Database, root if err != nil { log.Warn("Failed to load snapshot", "err", err) if !config.NoBuild { + if config.Verkle { + // TODO update the Rebuild function + // snap.layers = map[common.Hash]snapshot{ + // root: &diskLayer{ + // diskdb: diskdb, + // triedb: triedb, + // root: root, + // cache: fastcache.New(config.CacheSize * 1024 * 1024), + // }, + // } + // return snap, nil + return nil, nil + } + log.Warn("Failed to load snapshot, regenerating", "err", err) snap.Rebuild(root) return snap, nil } diff --git a/core/state/state_object.go b/core/state/state_object.go index cd72f3fb9b91..a250d48ffabb 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -102,6 +102,7 @@ func newObject(db *StateDB, address common.Address, acct *types.StateAccount) *s if acct == nil { acct = types.NewEmptyStateAccount() } + return &stateObject{ db: db, address: address, @@ -145,7 +146,7 @@ func (s *stateObject) getTrie() (Trie, error) { s.trie = s.db.prefetcher.trie(s.addrHash, s.data.Root) } if s.trie == nil { - tr, err := s.db.db.OpenStorageTrie(s.db.originalRoot, s.address, s.data.Root) + tr, err := s.db.db.OpenStorageTrie(s.db.originalRoot, s.address, s.data.Root, s.db.trie) if err != nil { return nil, err } @@ -222,6 +223,7 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { } value.SetBytes(val) } + s.originStorage[key] = value return value } diff --git a/core/state/statedb.go b/core/state/statedb.go index 5c33e2d7e130..ea1e18613870 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -118,6 +118,9 @@ type StateDB struct { // Transient storage transientStorage transientStorage + // Verkle witness + witness *AccessWitness + // Journal of state modifications. This is the backbone of // Snapshot and RevertToSnapshot. journal *journal @@ -170,12 +173,35 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) transientStorage: newTransientStorage(), hasher: crypto.NewKeccakState(), } - if sdb.snaps != nil { - sdb.snap = sdb.snaps.Snapshot(root) + if tr.IsVerkle() { + sdb.witness = sdb.NewAccessWitness() } + // if sdb.snaps != nil { + // if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap == nil { + // } + // } return sdb, nil } +func (s *StateDB) Snaps() *snapshot.Tree { + return s.snaps +} + +func (s *StateDB) NewAccessWitness() *AccessWitness { + return NewAccessWitness(s.db.(*cachingDB).addrToPoint) +} + +func (s *StateDB) Witness() *AccessWitness { + if s.witness == nil { + s.witness = s.NewAccessWitness() + } + return s.witness +} + +func (s *StateDB) SetWitness(aw *AccessWitness) { + s.witness = aw +} + // StartPrefetcher initializes a new trie prefetcher to pull in nodes from the // state trie concurrently while the state is mutated so that when we reach the // commit phase, most of the needed data is already hot. @@ -184,7 +210,7 @@ func (s *StateDB) StartPrefetcher(namespace string) { s.prefetcher.close() s.prefetcher = nil } - if s.snap != nil { + if s.snap != nil && !s.trie.IsVerkle() { s.prefetcher = newTriePrefetcher(s.db, s.originalRoot, namespace) } } @@ -789,6 +815,9 @@ func (s *StateDB) Copy() *StateDB { snaps: s.snaps, snap: s.snap, } + if s.witness != nil { + state.witness = s.witness.Copy() + } // Copy the dirty states, logs, and preimages for addr := range s.journal.dirties { // As documented [here](https://github.com/ethereum/go-ethereum/pull/16485#issuecomment-380438527), @@ -971,7 +1000,11 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // to pull useful data from disk. for addr := range s.stateObjectsPending { if obj := s.stateObjects[addr]; !obj.deleted { - obj.updateRoot() + if s.trie.IsVerkle() { + obj.updateTrie() + } else { + obj.updateRoot() + } } } // Now we're about to start to write changes to the trie. The trie is so far @@ -1003,7 +1036,14 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { if metrics.EnabledExpensive { defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now()) } - return s.trie.Hash() + root := s.trie.Hash() + + // Save the root of the MPT so that it can be used during the transition + if !s.Database().InTransition() && !s.Database().Transitioned() { + s.Database().SetLastMerkleRoot(root) + } + + return root } // SetTxContext sets the current transaction hash and index which are @@ -1025,8 +1065,14 @@ func (s *StateDB) clearJournalAndRefund() { // deleteStorage iterates the storage trie belongs to the account and mark all // slots inside as deleted. func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, map[common.Hash][]byte, *trienode.NodeSet, error) { + // verkle: a deletion is akin to overwriting with 0s + if s.GetTrie().IsVerkle() { + return false, nil, trienode.NewNodeSet(addrHash), nil + } start := time.Now() - tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root) + tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root, s.trie) + // XXX NOTE: it might just be possible to use an empty trie here, as verkle will not + // delete anything in the tree. if err != nil { return false, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err) } @@ -1153,6 +1199,11 @@ func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.A return incomplete, nil } +// GetTrie returns the account trie. +func (s *StateDB) GetTrie() Trie { + return s.trie +} + // Commit writes the state to the underlying in-memory trie database. // Once the state is committed, tries cached in stateDB (including account // trie, storage tries) will no longer be functional. A new state instance diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go index 66dda238e11e..e6479076fa98 100644 --- a/core/state/statedb_test.go +++ b/core/state/statedb_test.go @@ -751,7 +751,10 @@ func TestMissingTrieNodes(t *testing.T) { memDb := rawdb.NewMemoryDatabase() db := NewDatabase(memDb) var root common.Hash - state, _ := New(types.EmptyRootHash, db, nil) + state, err := New(types.EmptyRootHash, db, nil) + if err != nil { + panic("nil stte") + } addr := common.BytesToAddress([]byte("so")) { state.SetBalance(addr, big.NewInt(1)) @@ -783,7 +786,7 @@ func TestMissingTrieNodes(t *testing.T) { } // Modify the state state.SetBalance(addr, big.NewInt(2)) - root, err := state.Commit(0, false) + root, err = state.Commit(0, false) if err == nil { t.Fatalf("expected error, got root :%x", root) } @@ -1070,3 +1073,27 @@ func TestResetObject(t *testing.T) { t.Fatalf("Unexpected storage slot value %v", slot) } } + +// Test that an account with more than 128 pieces of code overflows +// correctly into the next group. +func TestCodeChunkOverflow(t *testing.T) { + // Create an empty state database + db := rawdb.NewMemoryDatabase() + state, _ := New(common.Hash{}, NewDatabaseWithConfig(db, nil), nil) + + // Update it with some accounts + addr := common.BytesToAddress([]byte{1}) + state.AddBalance(addr, big.NewInt(int64(11))) + state.SetNonce(addr, uint64(42)) + state.SetState(addr, common.BytesToHash([]byte{1, 1, 1}), common.BytesToHash([]byte{1, 1, 1, 1})) + code := make([]byte, 31*256) + for i := range code { + code[i] = 1 + } + state.SetCode(addr, code) + + root := state.IntermediateRoot(false) + if err := state.Database().TrieDB().Commit(root, false); err != nil { + t.Errorf("can not commit trie %v to persistent database", root.Hex()) + } +} diff --git a/core/state/sync_test.go b/core/state/sync_test.go index b065ad8355c3..db0b23a8e1b4 100644 --- a/core/state/sync_test.go +++ b/core/state/sync_test.go @@ -70,7 +70,10 @@ func makeTestState() (ethdb.Database, Database, common.Hash, []*testAccount) { state.updateStateObject(obj) accounts = append(accounts, acc) } - root, _ := state.Commit(0, false) + root, err := state.Commit(0, false) + if err != nil { + panic(err) + } // Return the generated state return db, sdb, root, accounts diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go index 4e8fd1e10f57..6c5c158cc239 100644 --- a/core/state/trie_prefetcher.go +++ b/core/state/trie_prefetcher.go @@ -302,7 +302,7 @@ func (sf *subfetcher) loop() { } sf.trie = trie } else { - trie, err := sf.db.OpenStorageTrie(sf.state, sf.addr, sf.root) + trie, err := sf.db.OpenStorageTrie(sf.state, sf.addr, sf.root, nil /* safe to set to nil for now, as there is no prefetcher for verkle */) if err != nil { log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err) return diff --git a/core/state_processor.go b/core/state_processor.go index fcaf5a8ff3c9..5d10bceb1817 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -17,9 +17,13 @@ package core import ( + "encoding/binary" "errors" "fmt" "math/big" + "runtime" + "sync" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" @@ -28,7 +32,12 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" + tutils "github.com/ethereum/go-ethereum/trie/utils" + "github.com/gballet/go-verkle" + "github.com/holiman/uint256" ) // StateProcessor is a basic Processor, which takes care of transitioning @@ -95,15 +104,27 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg if len(withdrawals) > 0 && !p.config.IsShanghai(block.Number(), block.Time()) { return nil, nil, 0, errors.New("withdrawals before shanghai") } + + // Perform the overlay transition, if relevant + if err := OverlayVerkleTransition(statedb); err != nil { + return nil, nil, 0, fmt.Errorf("error performing verkle overlay transition: %w", err) + } + // Finalize the block, applying any consensus engine specific extras (e.g. block rewards) p.engine.Finalize(p.bc, header, statedb, block.Transactions(), block.Uncles(), withdrawals) + if block.NumberU64()%100 == 0 { + stateRoot := statedb.GetTrie().Hash() + log.Info("State root", "number", block.NumberU64(), "hash", stateRoot) + } + return receipts, allLogs, *usedGas, nil } func applyTransaction(msg *Message, config *params.ChainConfig, gp *GasPool, statedb *state.StateDB, blockNumber *big.Int, blockHash common.Hash, tx *types.Transaction, usedGas *uint64, evm *vm.EVM) (*types.Receipt, error) { // Create a new context to be used in the EVM environment. txContext := NewEVMTxContext(msg) + txContext.Accesses = statedb.NewAccessWitness() evm.Reset(txContext, statedb) // Apply the transaction to the current state (included in the env). @@ -137,6 +158,8 @@ func applyTransaction(msg *Message, config *params.ChainConfig, gp *GasPool, sta receipt.ContractAddress = crypto.CreateAddress(evm.TxContext.Origin, tx.Nonce()) } + statedb.Witness().Merge(txContext.Accesses) + // Set the receipt logs and create the bloom filter. receipt.Logs = statedb.GetLogs(tx.Hash(), blockNumber.Uint64(), blockHash) receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) @@ -160,3 +183,179 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo vmenv := vm.NewEVM(blockContext, vm.TxContext{BlobHashes: tx.BlobHashes()}, statedb, config, cfg) return applyTransaction(msg, config, gp, statedb, header.Number, header.Hash(), tx, usedGas, vmenv) } + +var zeroTreeIndex uint256.Int + +// keyValueMigrator is a helper module that collects key-values from the overlay-tree migration for Verkle Trees. +// It assumes that the walk of the base tree is done in address-order, so it exploit that fact to +// collect the key-values in a way that is efficient. +type keyValueMigrator struct { + // leafData contains the values for the future leaf for a particular VKT branch. + leafData []migratedKeyValue + + // When prepare() is called, it will start a background routine that will process the leafData + // saving the result in newLeaves to be used by migrateCollectedKeyValues(). The background + // routine signals that it is done by closing processingReady. + processingReady chan struct{} + newLeaves []verkle.LeafNode + prepareErr error +} + +func newKeyValueMigrator() *keyValueMigrator { + // We do initialize the VKT config since prepare() might indirectly make multiple GetConfig() calls + // in different goroutines when we never called GetConfig() before, causing a race considering the way + // that `config` is designed in go-verkle. + // TODO: jsign as a fix for this in the PR where we move to a file-less precomp, since it allows safe + // concurrent calls to GetConfig(). When that gets merged, we can remove this line. + _ = verkle.GetConfig() + return &keyValueMigrator{ + processingReady: make(chan struct{}), + leafData: make([]migratedKeyValue, 0, 10_000), + } +} + +type migratedKeyValue struct { + branchKey branchKey + leafNodeData verkle.BatchNewLeafNodeData +} +type branchKey struct { + addr common.Address + treeIndex uint256.Int +} + +func newBranchKey(addr []byte, treeIndex *uint256.Int) branchKey { + var sk branchKey + copy(sk.addr[:], addr) + sk.treeIndex = *treeIndex + return sk +} + +func (kvm *keyValueMigrator) addStorageSlot(addr []byte, slotNumber []byte, slotValue []byte) { + treeIndex, subIndex := tutils.GetTreeKeyStorageSlotTreeIndexes(slotNumber) + leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, treeIndex)) + leafNodeData.Values[subIndex] = slotValue +} + +func (kvm *keyValueMigrator) addAccount(addr []byte, acc *types.StateAccount) { + leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, &zeroTreeIndex)) + + var version [verkle.LeafValueSize]byte + leafNodeData.Values[tutils.VersionLeafKey] = version[:] + + var balance [verkle.LeafValueSize]byte + for i, b := range acc.Balance.Bytes() { + balance[len(acc.Balance.Bytes())-1-i] = b + } + leafNodeData.Values[tutils.BalanceLeafKey] = balance[:] + + var nonce [verkle.LeafValueSize]byte + binary.LittleEndian.PutUint64(nonce[:8], acc.Nonce) + leafNodeData.Values[tutils.NonceLeafKey] = nonce[:] + + leafNodeData.Values[tutils.CodeKeccakLeafKey] = acc.CodeHash[:] +} + +func (kvm *keyValueMigrator) addAccountCode(addr []byte, codeSize uint64, chunks []byte) { + leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, &zeroTreeIndex)) + + // Save the code size. + var codeSizeBytes [verkle.LeafValueSize]byte + binary.LittleEndian.PutUint64(codeSizeBytes[:8], codeSize) + leafNodeData.Values[tutils.CodeSizeLeafKey] = codeSizeBytes[:] + + // The first 128 chunks are stored in the account header leaf. + for i := 0; i < 128 && i < len(chunks)/32; i++ { + leafNodeData.Values[byte(128+i)] = chunks[32*i : 32*(i+1)] + } + + // Potential further chunks, have their own leaf nodes. + for i := 128; i < len(chunks)/32; { + treeIndex, _ := tutils.GetTreeKeyCodeChunkIndices(uint256.NewInt(uint64(i))) + leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, treeIndex)) + + j := i + for ; (j-i) < 256 && j < len(chunks)/32; j++ { + leafNodeData.Values[byte((j-128)%256)] = chunks[32*j : 32*(j+1)] + } + i = j + } +} + +func (kvm *keyValueMigrator) getOrInitLeafNodeData(bk branchKey) *verkle.BatchNewLeafNodeData { + // Remember that keyValueMigration receives actions ordered by (address, subtreeIndex). + // This means that we can assume that the last element of leafData is the one that we + // are looking for, or that we need to create a new one. + if len(kvm.leafData) == 0 || kvm.leafData[len(kvm.leafData)-1].branchKey != bk { + kvm.leafData = append(kvm.leafData, migratedKeyValue{ + branchKey: bk, + leafNodeData: verkle.BatchNewLeafNodeData{ + Stem: nil, // It will be calculated in the prepare() phase, since it's CPU heavy. + Values: make(map[byte][]byte), + }, + }) + } + return &kvm.leafData[len(kvm.leafData)-1].leafNodeData +} + +func (kvm *keyValueMigrator) prepare() { + // We fire a background routine to process the leafData and save the result in newLeaves. + // The background routine signals that it is done by closing processingReady. + go func() { + // Step 1: We split kvm.leafData in numBatches batches, and we process each batch in a separate goroutine. + // This fills each leafNodeData.Stem with the correct value. + var wg sync.WaitGroup + batchNum := runtime.NumCPU() + batchSize := (len(kvm.leafData) + batchNum - 1) / batchNum + for i := 0; i < len(kvm.leafData); i += batchSize { + start := i + end := i + batchSize + if end > len(kvm.leafData) { + end = len(kvm.leafData) + } + wg.Add(1) + + batch := kvm.leafData[start:end] + go func() { + defer wg.Done() + var currAddr common.Address + var currPoint *verkle.Point + for i := range batch { + if batch[i].branchKey.addr != currAddr { + currAddr = batch[i].branchKey.addr + currPoint = tutils.EvaluateAddressPoint(currAddr[:]) + } + stem := tutils.GetTreeKeyWithEvaluatedAddess(currPoint, &batch[i].branchKey.treeIndex, 0) + stem = stem[:verkle.StemSize] + batch[i].leafNodeData.Stem = stem + } + }() + } + wg.Wait() + + // Step 2: Now that we have all stems (i.e: tree keys) calculated, we can create the new leaves. + nodeValues := make([]verkle.BatchNewLeafNodeData, len(kvm.leafData)) + for i := range kvm.leafData { + nodeValues[i] = kvm.leafData[i].leafNodeData + } + + // Create all leaves in batch mode so we can optimize cryptography operations. + kvm.newLeaves, kvm.prepareErr = verkle.BatchNewLeafNode(nodeValues) + close(kvm.processingReady) + }() +} + +func (kvm *keyValueMigrator) migrateCollectedKeyValues(tree *trie.VerkleTrie) error { + now := time.Now() + <-kvm.processingReady + if kvm.prepareErr != nil { + return fmt.Errorf("failed to prepare key values: %w", kvm.prepareErr) + } + log.Info("Prepared key values from base tree", "duration", time.Since(now)) + + // Insert into the tree. + if err := tree.InsertMigratedLeaves(kvm.newLeaves); err != nil { + return fmt.Errorf("failed to insert migrated leaves: %w", err) + } + + return nil +} diff --git a/core/state_processor_test.go b/core/state_processor_test.go index b4482acf35ba..6cdb2e9a4f33 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -17,8 +17,12 @@ package core import ( + //"bytes" "crypto/ecdsa" + + //"fmt" "math/big" + //"os" "testing" "github.com/ethereum/go-ethereum/common" @@ -33,6 +37,8 @@ import ( "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" + + //"github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" "github.com/holiman/uint256" "golang.org/x/crypto/sha3" @@ -417,3 +423,122 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr } return types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil)) } + +// A contract creation that calls EXTCODECOPY in the constructor. Used to ensure that the witness +// will not contain that copied data. +// Source: https://gist.github.com/gballet/a23db1e1cb4ed105616b5920feb75985 +var ( + code = common.FromHex(`6060604052600a8060106000396000f360606040526008565b00`) + intrinsicContractCreationGas, _ = IntrinsicGas(code, nil, true, true, true, true) + codeWithExtCodeCopy = common.FromHex(`0x60806040526040516100109061017b565b604051809103906000f08015801561002c573d6000803e3d6000fd5b506000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555034801561007857600080fd5b5060008067ffffffffffffffff8111156100955761009461024a565b5b6040519080825280601f01601f1916602001820160405280156100c75781602001600182028036833780820191505090505b50905060008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690506020600083833c81610101906101e3565b60405161010d90610187565b61011791906101a3565b604051809103906000f080158015610133573d6000803e3d6000fd5b50600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550505061029b565b60d58061046783390190565b6102068061053c83390190565b61019d816101d9565b82525050565b60006020820190506101b86000830184610194565b92915050565b6000819050602082019050919050565b600081519050919050565b6000819050919050565b60006101ee826101ce565b826101f8846101be565b905061020381610279565b925060208210156102435761023e7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8360200360080261028e565b831692505b5050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600061028582516101d9565b80915050919050565b600082821b905092915050565b6101bd806102aa6000396000f3fe608060405234801561001057600080fd5b506004361061002b5760003560e01c8063f566852414610030575b600080fd5b61003861004e565b6040516100459190610146565b60405180910390f35b6000600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166381ca91d36040518163ffffffff1660e01b815260040160206040518083038186803b1580156100b857600080fd5b505afa1580156100cc573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906100f0919061010a565b905090565b60008151905061010481610170565b92915050565b6000602082840312156101205761011f61016b565b5b600061012e848285016100f5565b91505092915050565b61014081610161565b82525050565b600060208201905061015b6000830184610137565b92915050565b6000819050919050565b600080fd5b61017981610161565b811461018457600080fd5b5056fea2646970667358221220a6a0e11af79f176f9c421b7b12f441356b25f6489b83d38cc828a701720b41f164736f6c63430008070033608060405234801561001057600080fd5b5060b68061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063ab5ed15014602d575b600080fd5b60336047565b604051603e9190605d565b60405180910390f35b60006001905090565b6057816076565b82525050565b6000602082019050607060008301846050565b92915050565b600081905091905056fea26469706673582212203a14eb0d5cd07c277d3e24912f110ddda3e553245a99afc4eeefb2fbae5327aa64736f6c63430008070033608060405234801561001057600080fd5b5060405161020638038061020683398181016040528101906100329190610063565b60018160001c6100429190610090565b60008190555050610145565b60008151905061005d8161012e565b92915050565b60006020828403121561007957610078610129565b5b60006100878482850161004e565b91505092915050565b600061009b826100f0565b91506100a6836100f0565b9250827fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff038211156100db576100da6100fa565b5b828201905092915050565b6000819050919050565b6000819050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600080fd5b610137816100e6565b811461014257600080fd5b50565b60b3806101536000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c806381ca91d314602d575b600080fd5b60336047565b604051603e9190605a565b60405180910390f35b60005481565b6054816073565b82525050565b6000602082019050606d6000830184604d565b92915050565b600081905091905056fea26469706673582212209bff7098a2f526de1ad499866f27d6d0d6f17b74a413036d6063ca6a0998ca4264736f6c63430008070033`) + intrinsicCodeWithExtCodeCopyGas, _ = IntrinsicGas(codeWithExtCodeCopy, nil, true, true, true, true) +) + +func TestProcessVerkle(t *testing.T) { + var ( + config = ¶ms.ChainConfig{ + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + BerlinBlock: big.NewInt(0), + LondonBlock: big.NewInt(0), + Ethash: new(params.EthashConfig), + ShanghaiTime: u64(0), + PragueTime: u64(0), + TerminalTotalDifficulty: common.Big0, + TerminalTotalDifficultyPassed: true, + ProofInBlocks: true, + } + signer = types.LatestSigner(config) + testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + bcdb = rawdb.NewMemoryDatabase() // Database for the blockchain + gendb = rawdb.NewMemoryDatabase() // Database for the block-generation code, they must be separate as they are path-based. + coinbase = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") + gspec = &Genesis{ + Config: config, + Alloc: GenesisAlloc{ + coinbase: GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 0, + }, + }, + } + ) + // Verkle trees use the snapshot, which must be enabled before the + // data is saved into the tree+database. + genesis := gspec.MustCommit(bcdb) + blockchain, _ := NewBlockChain(bcdb, nil, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil, nil) + defer blockchain.Stop() + + // Commit the genesis block to the block-generation database as it + // is now independent of the blockchain database. + gspec.MustCommit(gendb) + + txCost1 := params.WitnessBranchWriteCost*2 + params.WitnessBranchReadCost*2 + params.WitnessChunkWriteCost*3 + params.WitnessChunkReadCost*10 + params.TxGas + txCost2 := params.WitnessBranchWriteCost + params.WitnessBranchReadCost*2 + params.WitnessChunkWriteCost*2 + params.WitnessChunkReadCost*10 + params.TxGas + contractCreationCost := intrinsicContractCreationGas + uint64(6900 /* from */ +7700 /* creation */ +2939 /* execution costs */) + codeWithExtCodeCopyGas := intrinsicCodeWithExtCodeCopyGas + uint64(6900 /* from */ +7000 /* creation */ +315944 /* execution costs */) + blockGasUsagesExpected := []uint64{ + txCost1*2 + txCost2, + txCost1*2 + txCost2 + contractCreationCost + codeWithExtCodeCopyGas, + } + // TODO utiliser GenerateChainWithGenesis pour le rendre plus pratique + chain, _, proofs, keyvals := GenerateVerkleChain(gspec.Config, genesis, beacon.New(ethash.NewFaker()), gendb, 2, func(i int, gen *BlockGen) { + gen.SetPoS() + // TODO need to check that the tx cost provided is the exact amount used (no remaining left-over) + tx, _ := types.SignTx(types.NewTransaction(uint64(i)*3, common.Address{byte(i), 2, 3}, big.NewInt(999), txCost1, big.NewInt(875000000), nil), signer, testKey) + gen.AddTx(tx) + tx, _ = types.SignTx(types.NewTransaction(uint64(i)*3+1, common.Address{}, big.NewInt(999), txCost1, big.NewInt(875000000), nil), signer, testKey) + gen.AddTx(tx) + tx, _ = types.SignTx(types.NewTransaction(uint64(i)*3+2, common.Address{}, big.NewInt(0), txCost2, big.NewInt(875000000), nil), signer, testKey) + gen.AddTx(tx) + + // Add two contract creations in block #2 + if i == 1 { + tx, _ = types.SignTx(types.NewContractCreation(6, big.NewInt(16), 3000000, big.NewInt(875000000), code), signer, testKey) + gen.AddTx(tx) + + tx, _ = types.SignTx(types.NewContractCreation(7, big.NewInt(0), 3000000, big.NewInt(875000000), codeWithExtCodeCopy), signer, testKey) + gen.AddTx(tx) + } + }) + + // Uncomment to extract block #2 + //f, _ := os.Create("block2.rlp") + //defer f.Close() + //var buf bytes.Buffer + //rlp.Encode(&buf, chain[1]) + //f.Write(buf.Bytes()) + //fmt.Printf("root= %x\n", chain[0].Root()) + // check the proof for the last block + err := trie.DeserializeAndVerifyVerkleProof(proofs[1], chain[0].Root().Bytes(), keyvals[1]) + if err != nil { + t.Fatal(err) + } + t.Log("verfied verkle proof") + + endnum, err := blockchain.InsertChain(chain) + if err != nil { + t.Fatalf("block %d imported with error: %v", endnum, err) + } + + for i := 0; i < 2; i++ { + b := blockchain.GetBlockByNumber(uint64(i) + 1) + if b == nil { + t.Fatalf("expected block %d to be present in chain", i+1) + } + if b.Hash() != chain[i].Hash() { + t.Fatalf("block #%d not found at expected height", b.NumberU64()) + } + if b.GasUsed() != blockGasUsagesExpected[i] { + t.Fatalf("expected block #%d txs to use %d, got %d\n", b.NumberU64(), blockGasUsagesExpected[i], b.GasUsed()) + } + } +} diff --git a/core/state_transition.go b/core/state_transition.go index f84757be781f..2bdfef1c0552 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -27,6 +27,7 @@ import ( "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" ) @@ -339,6 +340,19 @@ func (st *StateTransition) preCheck() error { return st.buyGas() } +// tryConsumeGas tries to subtract gas from gasPool, setting the result in gasPool +// if subtracting more gas than remains in gasPool, set gasPool = 0 and return false +// otherwise, do the subtraction setting the result in gasPool and return true +func tryConsumeGas(gasPool *uint64, gas uint64) bool { + if *gasPool < gas { + *gasPool = 0 + return false + } + + *gasPool -= gas + return true +} + // TransitionDb will transition the state by applying the current message and // returning the evm execution result with following fields. // @@ -389,6 +403,32 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { } st.gasRemaining -= gas + if rules.IsPrague { + targetAddr := msg.To + originAddr := msg.From + + statelessGasOrigin := st.evm.Accesses.TouchTxOriginAndComputeGas(originAddr.Bytes()) + if !tryConsumeGas(&st.gasRemaining, statelessGasOrigin) { + return nil, fmt.Errorf("%w: Insufficient funds to cover witness access costs for transaction: have %d, want %d", ErrInsufficientBalanceWitness, st.gasRemaining, gas) + } + originNonce := st.evm.StateDB.GetNonce(originAddr) + + if msg.To != nil { + statelessGasDest := st.evm.Accesses.TouchTxExistingAndComputeGas(targetAddr.Bytes(), msg.Value.Sign() != 0) + if !tryConsumeGas(&st.gasRemaining, statelessGasDest) { + return nil, fmt.Errorf("%w: Insufficient funds to cover witness access costs for transaction: have %d, want %d", ErrInsufficientBalanceWitness, st.gasRemaining, gas) + } + + // ensure the code size ends up in the access witness + st.evm.StateDB.GetCodeSize(*targetAddr) + } else { + contractAddr := crypto.CreateAddress(originAddr, originNonce) + if !tryConsumeGas(&st.gasRemaining, st.evm.Accesses.TouchAndChargeContractCreateInit(contractAddr.Bytes(), msg.Value.Sign() != 0)) { + return nil, fmt.Errorf("%w: Insufficient funds to cover witness access costs for transaction: have %d, want %d", ErrInsufficientBalanceWitness, st.gasRemaining, gas) + } + } + } + // Check clause 6 if msg.Value.Sign() > 0 && !st.evm.Context.CanTransfer(st.state, msg.From, msg.Value) { return nil, fmt.Errorf("%w: address %v", ErrInsufficientFundsForTransfer, msg.From.Hex()) diff --git a/core/types/block.go b/core/types/block.go index 1b5c7fc8f90e..85778c603c30 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -29,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/rlp" + "github.com/gballet/go-verkle" ) // A BlockNonce is a 64-bit hash which proves (combined with the @@ -58,6 +59,18 @@ func (n *BlockNonce) UnmarshalText(input []byte) error { return hexutil.UnmarshalFixedText("BlockNonce", input, n[:]) } +type ExecutionWitness struct { + StateDiff verkle.StateDiff `json:"stateDiff"` + VerkleProof *verkle.VerkleProof `json:"verkleProof"` +} + +func (ew *ExecutionWitness) Copy() *ExecutionWitness { + return &ExecutionWitness{ + StateDiff: ew.StateDiff.Copy(), + VerkleProof: ew.VerkleProof.Copy(), + } +} + //go:generate go run github.com/fjl/gencodec -type Header -field-override headerMarshaling -out gen_header_json.go //go:generate go run ../../rlp/rlpgen -type Header -out gen_header_rlp.go @@ -90,6 +103,8 @@ type Header struct { // ExcessBlobGas was added by EIP-4844 and is ignored in legacy headers. ExcessBlobGas *uint64 `json:"excessBlobGas" rlp:"optional"` + + ExecutionWitness *ExecutionWitness `json:"executionWitness" rlp:"-"` } // field type overrides for gencodec @@ -297,6 +312,9 @@ func CopyHeader(h *Header) *Header { cpy.BlobGasUsed = new(uint64) *cpy.BlobGasUsed = *h.BlobGasUsed } + if h.ExecutionWitness != nil { + cpy.ExecutionWitness = h.ExecutionWitness.Copy() + } return &cpy } @@ -394,6 +412,8 @@ func (b *Block) BlobGasUsed() *uint64 { return blobGasUsed } +func (b *Block) ExecutionWitness() *ExecutionWitness { return b.header.ExecutionWitness } + // Size returns the true RLP encoded storage size of the block, either by encoding // and returning it, or returning a previously cached value. func (b *Block) Size() uint64 { @@ -412,6 +432,18 @@ func (b *Block) SanityCheck() error { return b.header.SanityCheck() } +func (b *Block) SetVerkleProof(vp *verkle.VerkleProof, statediff verkle.StateDiff) { + b.header.ExecutionWitness = &ExecutionWitness{statediff, vp} + if statediff == nil { + b.header.ExecutionWitness.StateDiff = []verkle.StemStateDiff{} + } + if vp == nil { + b.header.ExecutionWitness.VerkleProof = &verkle.VerkleProof{ + IPAProof: &verkle.IPAProof{}, + } + } +} + type writeCounter uint64 func (c *writeCounter) Write(b []byte) (int, error) { diff --git a/core/types/state_account.go b/core/types/state_account.go index 314f4943ecab..0cb751685afe 100644 --- a/core/types/state_account.go +++ b/core/types/state_account.go @@ -58,6 +58,11 @@ func (acct *StateAccount) Copy() *StateAccount { } } +// HasStorage returns true if the account has a non-empty storage tree. +func (acc *StateAccount) HasStorage() bool { + return len(acc.Root) == 32 && acc.Root != EmptyRootHash +} + // SlimAccount is a modified version of an Account, where the root is replaced // with a byte slice. This format can be used to represent full-consensus format // or slim format which replaces the empty root and code hash as nil byte slice. diff --git a/core/vm/common.go b/core/vm/common.go index 90ba4a4ad15b..ba75950e370b 100644 --- a/core/vm/common.go +++ b/core/vm/common.go @@ -63,6 +63,18 @@ func getData(data []byte, start uint64, size uint64) []byte { return common.RightPadBytes(data[start:end], int(size)) } +func getDataAndAdjustedBounds(data []byte, start uint64, size uint64) (codeCopyPadded []byte, actualStart uint64, sizeNonPadded uint64) { + length := uint64(len(data)) + if start > length { + start = length + } + end := start + size + if end > length { + end = length + } + return common.RightPadBytes(data[start:end], int(size)), start, end - start +} + // toWordSize returns the ceiled word size required for memory expansion. func toWordSize(size uint64) uint64 { if size > math.MaxUint64-31 { diff --git a/core/vm/contract.go b/core/vm/contract.go index bb0902969ec7..1aa650d7d4f4 100644 --- a/core/vm/contract.go +++ b/core/vm/contract.go @@ -58,6 +58,9 @@ type Contract struct { CodeAddr *common.Address Input []byte + // is the execution frame represented by this object a contract deployment + IsDeployment bool + Gas uint64 value *big.Int } @@ -93,12 +96,12 @@ func (c *Contract) validJumpdest(dest *uint256.Int) bool { if OpCode(c.Code[udest]) != JUMPDEST { return false } - return c.isCode(udest) + return c.IsCode(udest) } -// isCode returns true if the provided PC location is an actual opcode, as +// IsCode returns true if the provided PC location is an actual opcode, as // opposed to a data-segment following a PUSHN operation. -func (c *Contract) isCode(udest uint64) bool { +func (c *Contract) IsCode(udest uint64) bool { // Do we already have an analysis laying around? if c.analysis != nil { return c.analysis.codeSegment(udest) diff --git a/core/vm/contracts.go b/core/vm/contracts.go index 574bb9bef6a8..85f68c0cc273 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -150,6 +150,8 @@ func init() { // ActivePrecompiles returns the precompiles enabled with the current configuration. func ActivePrecompiles(rules params.Rules) []common.Address { switch { + case rules.IsPrague: + return PrecompiledAddressesBerlin case rules.IsCancun: return PrecompiledAddressesCancun case rules.IsBerlin: diff --git a/core/vm/evm.go b/core/vm/evm.go index 40e2f3554f46..a04258b0df22 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -21,6 +21,7 @@ import ( "sync/atomic" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" @@ -40,6 +41,8 @@ type ( func (evm *EVM) precompile(addr common.Address) (PrecompiledContract, bool) { var precompiles map[common.Address]PrecompiledContract switch { + case evm.chainRules.IsPrague: + precompiles = PrecompiledContractsBerlin case evm.chainRules.IsCancun: precompiles = PrecompiledContractsCancun case evm.chainRules.IsBerlin: @@ -81,9 +84,10 @@ type BlockContext struct { // All fields can change between transactions. type TxContext struct { // Message information - Origin common.Address // Provides information for ORIGIN - GasPrice *big.Int // Provides information for GASPRICE - BlobHashes []common.Hash // Provides information for BLOBHASH + Origin common.Address // Provides information for ORIGIN + GasPrice *big.Int // Provides information for GASPRICE + BlobHashes []common.Hash // Provides information for BLOBHASH + Accesses *state.AccessWitness // Capture all state accesses for this tx } // EVM is the Ethereum Virtual Machine base object and provides @@ -133,6 +137,9 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig chainConfig: chainConfig, chainRules: chainConfig.Rules(blockCtx.BlockNumber, blockCtx.Random != nil, blockCtx.Time), } + if txCtx.Accesses == nil && chainConfig.IsPrague(blockCtx.BlockNumber, blockCtx.Time) { + evm.Accesses = evm.StateDB.(*state.StateDB).NewAccessWitness() + } evm.interpreter = NewEVMInterpreter(evm) return evm } @@ -140,6 +147,9 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig // Reset resets the EVM with a new transaction context.Reset // This is not threadsafe and should only be done very cautiously. func (evm *EVM) Reset(txCtx TxContext, statedb StateDB) { + if txCtx.Accesses == nil && evm.chainRules.IsPrague { + txCtx.Accesses = evm.StateDB.(*state.StateDB).NewAccessWitness() + } evm.TxContext = txCtx evm.StateDB = statedb } @@ -168,6 +178,20 @@ func (evm *EVM) SetBlockContext(blockCtx BlockContext) { evm.chainRules = evm.chainConfig.Rules(num, blockCtx.Random != nil, timestamp) } +// tryConsumeGas tries to subtract gas from gasPool, setting the result in gasPool +// if subtracting more gas than remains in gasPool, set gasPool = 0 and return false +// otherwise, do the subtraction setting the result in gasPool and return true +func tryConsumeGas(gasPool *uint64, gas uint64) bool { + // XXX check this is still needed as a func + if *gasPool < gas { + *gasPool = 0 + return false + } + + *gasPool -= gas + return true +} + // Call executes the contract associated with the addr with the given input as // parameters. It also handles any necessary value transfer required and takes // the necessary steps to create accounts and reverses the state in case of an @@ -185,8 +209,13 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas p, isPrecompile := evm.precompile(addr) debug := evm.Config.Tracer != nil + var creation bool if !evm.StateDB.Exist(addr) { if !isPrecompile && evm.chainRules.IsEIP158 && value.Sign() == 0 { + if evm.chainRules.IsPrague { + // proof of absence + tryConsumeGas(&gas, evm.Accesses.TouchAndChargeProofOfAbsence(caller.Address().Bytes())) + } // Calling a non existing account, don't do anything, but ping the tracer if debug { if evm.depth == 0 { @@ -200,6 +229,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas return nil, gas, nil } evm.StateDB.CreateAccount(addr) + creation = true } evm.Context.Transfer(evm.StateDB, caller.Address(), addr, value) @@ -225,6 +255,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas // Initialise a new contract and set the code that is to be used by the EVM. // The contract is a scoped environment for this execution context only. code := evm.StateDB.GetCode(addr) + if len(code) == 0 { ret, err = nil, nil // gas is unchanged } else { @@ -233,6 +264,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas // The depth-check is already done, and precompiles handled above contract := NewContract(caller, AccountRef(addrCopy), value, gas) contract.SetCallCode(&addrCopy, evm.StateDB.GetCodeHash(addrCopy), code) + contract.IsDeployment = creation ret, err = evm.interpreter.Run(contract, input, false) gas = contract.Gas } @@ -445,12 +477,14 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, if evm.chainRules.IsEIP158 { evm.StateDB.SetNonce(address, 1) } + evm.Context.Transfer(evm.StateDB, caller.Address(), address, value) // Initialise a new contract and set the code that is to be used by the EVM. // The contract is a scoped environment for this execution context only. contract := NewContract(caller, AccountRef(address), value, gas) contract.SetCodeOptionalHash(&address, codeAndHash) + contract.IsDeployment = true if evm.Config.Tracer != nil { if evm.depth == 0 { @@ -495,6 +529,13 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, } } + if err == nil && evm.chainRules.IsPrague { + if !contract.UseGas(evm.Accesses.TouchAndChargeContractCreateCompleted(address.Bytes()[:])) { + evm.StateDB.RevertToSnapshot(snapshot) + err = ErrOutOfGas + } + } + if evm.Config.Tracer != nil { if evm.depth == 0 { evm.Config.Tracer.CaptureEnd(ret, gas-contract.Gas, err) diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go index 5153c8b7a3de..f367c3c92978 100644 --- a/core/vm/gas_table.go +++ b/core/vm/gas_table.go @@ -21,7 +21,10 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" + trieUtils "github.com/ethereum/go-ethereum/trie/utils" + "github.com/holiman/uint256" ) // memoryGasCost calculates the quadratic gas for memory expansion. It does so @@ -95,7 +98,31 @@ var ( gasReturnDataCopy = memoryCopierGas(2) ) +func gasExtCodeSize(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + usedGas := uint64(0) + slot := stack.Back(0) + if evm.chainRules.IsPrague { + usedGas += evm.TxContext.Accesses.TouchAddressOnReadAndComputeGas(slot.Bytes(), uint256.Int{}, trieUtils.CodeSizeLeafKey) + } + + return usedGas, nil +} + +func gasSLoad(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + usedGas := uint64(0) + + if evm.chainRules.IsPrague { + where := stack.Back(0) + treeIndex, subIndex := trieUtils.GetTreeKeyStorageSlotTreeIndexes(where.Bytes()) + usedGas += evm.Accesses.TouchAddressOnReadAndComputeGas(contract.Address().Bytes(), *treeIndex, subIndex) + } + + return usedGas, nil +} + func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + // Apply the witness access costs, err is nil + accessGas, _ := gasSLoad(evm, contract, stack, mem, memorySize) var ( y, x = stack.Back(1), stack.Back(0) current = evm.StateDB.GetState(contract.Address(), x.Bytes32()) @@ -111,12 +138,12 @@ func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySi // 3. From a non-zero to a non-zero (CHANGE) switch { case current == (common.Hash{}) && y.Sign() != 0: // 0 => non 0 - return params.SstoreSetGas, nil + return params.SstoreSetGas + accessGas, nil case current != (common.Hash{}) && y.Sign() == 0: // non 0 => 0 evm.StateDB.AddRefund(params.SstoreRefundGas) - return params.SstoreClearGas, nil + return params.SstoreClearGas + accessGas, nil default: // non 0 => non 0 (or 0 => 0) - return params.SstoreResetGas, nil + return params.SstoreResetGas + accessGas, nil } } @@ -369,6 +396,7 @@ func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize transfersValue = !stack.Back(2).IsZero() address = common.Address(stack.Back(1).Bytes20()) ) + if evm.chainRules.IsEIP158 { if transfersValue && evm.StateDB.Empty(address) { gas += params.CallNewAccountGas @@ -395,6 +423,21 @@ func gasCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { return 0, ErrGasUintOverflow } + if evm.chainRules.IsPrague { + if _, isPrecompile := evm.precompile(address); !isPrecompile { + gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeMessageCall(address.Bytes()[:])) + if overflow { + return 0, ErrGasUintOverflow + } + } + if transfersValue { + gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeValueTransfer(contract.Address().Bytes()[:], address.Bytes()[:])) + if overflow { + return 0, ErrGasUintOverflow + } + } + } + return gas, nil } @@ -420,6 +463,15 @@ func gasCallCode(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memory if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { return 0, ErrGasUintOverflow } + if evm.chainRules.IsPrague { + address := common.Address(stack.Back(1).Bytes20()) + if _, isPrecompile := evm.precompile(address); !isPrecompile { + gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeMessageCall(address.Bytes())) + if overflow { + return 0, ErrGasUintOverflow + } + } + } return gas, nil } @@ -436,6 +488,15 @@ func gasDelegateCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { return 0, ErrGasUintOverflow } + if evm.chainRules.IsPrague { + address := common.Address(stack.Back(1).Bytes20()) + if _, isPrecompile := evm.precompile(address); !isPrecompile { + gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeMessageCall(address.Bytes())) + if overflow { + return 0, ErrGasUintOverflow + } + } + } return gas, nil } @@ -452,6 +513,15 @@ func gasStaticCall(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memo if gas, overflow = math.SafeAdd(gas, evm.callGasTemp); overflow { return 0, ErrGasUintOverflow } + if evm.chainRules.IsPrague { + address := common.Address(stack.Back(1).Bytes20()) + if _, isPrecompile := evm.precompile(address); !isPrecompile { + gas, overflow = math.SafeAdd(gas, evm.Accesses.TouchAndChargeMessageCall(address.Bytes())) + if overflow { + return 0, ErrGasUintOverflow + } + } + } return gas, nil } @@ -472,6 +542,12 @@ func gasSelfdestruct(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me } } + if evm.chainRules.IsPrague { + // TODO turn this into a panic (when we are sure this method + // will never execute when verkle is enabled) + log.Warn("verkle witness accumulation not supported for selfdestruct") + } + if !evm.StateDB.HasSelfDestructed(contract.Address()) { evm.StateDB.AddRefund(params.SelfdestructRefundGas) } diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 2105201fce42..84edacb1b9bc 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -18,9 +18,12 @@ package vm import ( "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" + trieUtils "github.com/ethereum/go-ethereum/trie/utils" "github.com/holiman/uint256" ) @@ -341,7 +344,12 @@ func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeConte func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { slot := scope.Stack.peek() - slot.SetUint64(uint64(interpreter.evm.StateDB.GetCodeSize(slot.Bytes20()))) + cs := uint64(interpreter.evm.StateDB.GetCodeSize(slot.Bytes20())) + if interpreter.evm.chainRules.IsPrague { + statelessGas := interpreter.evm.Accesses.TouchAddressOnReadAndComputeGas(slot.Bytes(), uint256.Int{}, trieUtils.CodeSizeLeafKey) + scope.Contract.UseGas(statelessGas) + } + slot.SetUint64(cs) return nil, nil } @@ -362,12 +370,49 @@ func opCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ if overflow { uint64CodeOffset = 0xffffffffffffffff } - codeCopy := getData(scope.Contract.Code, uint64CodeOffset, length.Uint64()) - scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy) + contractAddr := scope.Contract.Address() + paddedCodeCopy, copyOffset, nonPaddedCopyLength := getDataAndAdjustedBounds(scope.Contract.Code, uint64CodeOffset, length.Uint64()) + if interpreter.evm.chainRules.IsPrague { + scope.Contract.UseGas(touchCodeChunksRangeOnReadAndChargeGas(contractAddr[:], copyOffset, nonPaddedCopyLength, uint64(len(scope.Contract.Code)), interpreter.evm.Accesses)) + } + scope.Memory.Set(memOffset.Uint64(), uint64(len(paddedCodeCopy)), paddedCodeCopy) return nil, nil } +// touchCodeChunksRangeOnReadAndChargeGas is a helper function to touch every chunk in a code range and charge witness gas costs +func touchCodeChunksRangeOnReadAndChargeGas(contractAddr []byte, startPC, size uint64, codeLen uint64, accesses *state.AccessWitness) uint64 { + // note that in the case where the copied code is outside the range of the + // contract code but touches the last leaf with contract code in it, + // we don't include the last leaf of code in the AccessWitness. The + // reason that we do not need the last leaf is the account's code size + // is already in the AccessWitness so a stateless verifier can see that + // the code from the last leaf is not needed. + if (codeLen == 0 && size == 0) || startPC > codeLen { + return 0 + } + + // endPC is the last PC that must be touched. + endPC := startPC + size - 1 + if startPC+size > codeLen { + endPC = codeLen + } + + var statelessGasCharged uint64 + for chunkNumber := startPC / 31; chunkNumber <= endPC/31; chunkNumber++ { + treeIndex := *uint256.NewInt((chunkNumber + 128) / 256) + subIndex := byte((chunkNumber + 128) % 256) + gas := accesses.TouchAddressOnReadAndComputeGas(contractAddr, treeIndex, subIndex) + var overflow bool + statelessGasCharged, overflow = math.SafeAdd(statelessGasCharged, gas) + if overflow { + panic("overflow when adding gas") + } + } + + return statelessGasCharged +} + func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { var ( stack = scope.Stack @@ -381,8 +426,20 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) uint64CodeOffset = 0xffffffffffffffff } addr := common.Address(a.Bytes20()) - codeCopy := getData(interpreter.evm.StateDB.GetCode(addr), uint64CodeOffset, length.Uint64()) - scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy) + if interpreter.evm.chainRules.IsPrague { + code := interpreter.evm.StateDB.GetCode(addr) + contract := &Contract{ + Code: code, + self: AccountRef(addr), + } + paddedCodeCopy, copyOffset, nonPaddedCopyLength := getDataAndAdjustedBounds(code, uint64CodeOffset, length.Uint64()) + gas := touchCodeChunksRangeOnReadAndChargeGas(addr[:], copyOffset, nonPaddedCopyLength, uint64(len(contract.Code)), interpreter.evm.Accesses) + scope.Contract.UseGas(gas) + scope.Memory.Set(memOffset.Uint64(), length.Uint64(), paddedCodeCopy) + } else { + codeCopy := getData(interpreter.evm.StateDB.GetCode(addr), uint64CodeOffset, length.Uint64()) + scope.Memory.Set(memOffset.Uint64(), length.Uint64(), codeCopy) + } return nil, nil } @@ -514,6 +571,7 @@ func opSload(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by loc := scope.Stack.peek() hash := common.Hash(loc.Bytes32()) val := interpreter.evm.StateDB.GetState(scope.Contract.Address(), hash) + loc.SetBytes(val.Bytes()) return nil, nil } @@ -583,6 +641,13 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b input = scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) gas = scope.Contract.Gas ) + if interpreter.evm.chainRules.IsPrague { + contractAddress := crypto.CreateAddress(scope.Contract.Address(), interpreter.evm.StateDB.GetNonce(scope.Contract.Address())) + statelessGas := interpreter.evm.Accesses.TouchAndChargeContractCreateInit(contractAddress.Bytes()[:], value.Sign() != 0) + if !tryConsumeGas(&gas, statelessGas) { + return nil, ErrExecutionReverted + } + } if interpreter.evm.chainRules.IsEIP150 { gas -= gas / 64 } @@ -630,6 +695,15 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([] input = scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) gas = scope.Contract.Gas ) + if interpreter.evm.chainRules.IsPrague { + codeAndHash := &codeAndHash{code: input} + contractAddress := crypto.CreateAddress2(scope.Contract.Address(), salt.Bytes32(), codeAndHash.Hash().Bytes()) + statelessGas := interpreter.evm.Accesses.TouchAndChargeContractCreateInit(contractAddress.Bytes()[:], endowment.Sign() != 0) + if !tryConsumeGas(&gas, statelessGas) { + return nil, ErrExecutionReverted + } + } + // Apply EIP150 gas -= gas / 64 scope.Contract.UseGas(gas) @@ -884,6 +958,14 @@ func opPush1(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by *pc += 1 if *pc < codeLen { scope.Stack.push(integer.SetUint64(uint64(scope.Contract.Code[*pc]))) + + if interpreter.evm.chainRules.IsPrague && *pc%31 == 0 { + // touch next chunk if PUSH1 is at the boundary. if so, *pc has + // advanced past this boundary. + contractAddr := scope.Contract.Address() + statelessGas := touchCodeChunksRangeOnReadAndChargeGas(contractAddr[:], *pc+1, uint64(1), uint64(len(scope.Contract.Code)), interpreter.evm.Accesses) + scope.Contract.UseGas(statelessGas) + } } else { scope.Stack.push(integer.Clear()) } @@ -905,6 +987,12 @@ func makePush(size uint64, pushByteSize int) executionFunc { endMin = startMin + pushByteSize } + if interpreter.evm.chainRules.IsPrague { + contractAddr := scope.Contract.Address() + statelessGas := touchCodeChunksRangeOnReadAndChargeGas(contractAddr[:], uint64(startMin), uint64(pushByteSize), uint64(len(scope.Contract.Code)), interpreter.evm.Accesses) + scope.Contract.UseGas(statelessGas) + } + integer := new(uint256.Int) scope.Stack.push(integer.SetBytes(common.RightPadBytes( scope.Contract.Code[startMin:endMin], pushByteSize))) diff --git a/core/vm/interface.go b/core/vm/interface.go index 26814d3d2f0e..0a02a0181c05 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -20,6 +20,7 @@ import ( "math/big" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" ) @@ -78,6 +79,9 @@ type StateDB interface { AddLog(*types.Log) AddPreimage(common.Hash, []byte) + + Witness() *state.AccessWitness + SetWitness(*state.AccessWitness) } // CallContext provides a basic interface for the EVM calling conventions. The EVM diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index 873337850e6f..1bc0e80dfc44 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -56,6 +56,9 @@ func NewEVMInterpreter(evm *EVM) *EVMInterpreter { // If jump table was not initialised we set the default one. var table *JumpTable switch { + case evm.chainRules.IsPrague: + // TODO replace with prooper instruction set when fork is specified + table = &shanghaiInstructionSet case evm.chainRules.IsCancun: table = &cancunInstructionSet case evm.chainRules.IsShanghai: @@ -165,6 +168,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( } }() } + // The Interpreter main run loop (contextual). This loop runs until either an // explicit STOP, RETURN or SELFDESTRUCT is executed, an error occurred during // the execution of one of the operations or until the done flag is set by the @@ -174,6 +178,14 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( // Capture pre-execution values for tracing. logged, pcCopy, gasCopy = false, pc, contract.Gas } + + if in.evm.chainRules.IsPrague && !contract.IsDeployment { + // if the PC ends up in a new "chunk" of verkleized code, charge the + // associated costs. + contractAddr := contract.Address() + contract.Gas -= touchCodeChunksRangeOnReadAndChargeGas(contractAddr[:], pc, 1, uint64(len(contract.Code)), in.evm.TxContext.Accesses) + } + // Get the operation from the jump table and validate the stack to ensure there are // enough stack items available to perform the operation. op = contract.GetOp(pc) diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index 702b18661545..0a881236f64e 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -470,6 +470,7 @@ func newFrontierInstructionSet() JumpTable { EXTCODESIZE: { execute: opExtCodeSize, constantGas: params.ExtcodeSizeGasFrontier, + dynamicGas: gasExtCodeSize, minStack: minStack(1, 1), maxStack: maxStack(1, 1), }, @@ -550,6 +551,7 @@ func newFrontierInstructionSet() JumpTable { SLOAD: { execute: opSload, constantGas: params.SloadGasFrontier, + dynamicGas: gasSLoad, minStack: minStack(1, 1), maxStack: maxStack(1, 1), }, diff --git a/core/vm/jump_table_export.go b/core/vm/jump_table_export.go index 6ea47d63a281..2ceb75e73273 100644 --- a/core/vm/jump_table_export.go +++ b/core/vm/jump_table_export.go @@ -26,10 +26,8 @@ import ( // the rules. func LookupInstructionSet(rules params.Rules) (JumpTable, error) { switch { - case rules.IsVerkle: - return newCancunInstructionSet(), errors.New("verkle-fork not defined yet") case rules.IsPrague: - return newCancunInstructionSet(), errors.New("prague-fork not defined yet") + return newShanghaiInstructionSet(), errors.New("prague-fork not defined yet") case rules.IsCancun: return newCancunInstructionSet(), nil case rules.IsShanghai: diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go index 04c6409ebd86..4d4fe8aed3e6 100644 --- a/core/vm/operations_acl.go +++ b/core/vm/operations_acl.go @@ -22,6 +22,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie/utils" ) func makeGasSStoreFunc(clearingRefund uint64) gasFunc { @@ -51,6 +52,11 @@ func makeGasSStoreFunc(clearingRefund uint64) gasFunc { } value := common.Hash(y.Bytes32()) + if evm.chainRules.IsPrague { + treeIndex, subIndex := utils.GetTreeKeyStorageSlotTreeIndexes(x.Bytes()) + cost += evm.Accesses.TouchAddressOnWriteAndComputeGas(contract.Address().Bytes(), *treeIndex, subIndex) + } + if current == value { // noop (1) // EIP 2200 original clause: // return params.SloadGasEIP2200, nil @@ -103,14 +109,23 @@ func makeGasSStoreFunc(clearingRefund uint64) gasFunc { func gasSLoadEIP2929(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { loc := stack.peek() slot := common.Hash(loc.Bytes32()) + var gasUsed uint64 + + if evm.chainRules.IsPrague { + where := stack.Back(0) + treeIndex, subIndex := utils.GetTreeKeyStorageSlotTreeIndexes(where.Bytes()) + addr := contract.Address() + gasUsed += evm.Accesses.TouchAddressOnReadAndComputeGas(addr.Bytes(), *treeIndex, subIndex) + } + // Check slot presence in the access list if _, slotPresent := evm.StateDB.SlotInAccessList(contract.Address(), slot); !slotPresent { // If the caller cannot afford the cost, this change will be rolled back // If he does afford it, we can skip checking the same thing later on, during execution evm.StateDB.AddSlotToAccessList(contract.Address(), slot) - return params.ColdSloadCostEIP2929, nil + return gasUsed + params.ColdSloadCostEIP2929, nil } - return params.WarmStorageReadCostEIP2929, nil + return gasUsed + params.WarmStorageReadCostEIP2929, nil } // gasExtCodeCopyEIP2929 implements extcodecopy according to EIP-2929 diff --git a/eth/backend.go b/eth/backend.go index 667200bcedda..a6c80159077d 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -198,8 +198,8 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { if config.OverrideCancun != nil { overrides.OverrideCancun = config.OverrideCancun } - if config.OverrideVerkle != nil { - overrides.OverrideVerkle = config.OverrideVerkle + if config.OverridePrague != nil { + overrides.OverridePrague = config.OverridePrague } eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, config.Genesis, &overrides, eth.engine, vmConfig, eth.shouldPreserve, &config.TxLookupLimit) if err != nil { diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go index 1a221941427a..63079415fc14 100644 --- a/eth/catalyst/api.go +++ b/eth/catalyst/api.go @@ -528,6 +528,17 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashe log.Warn("Invalid timestamp", "parent", block.Time(), "block", block.Time()) return api.invalid(errors.New("invalid timestamp"), parent.Header()), nil } + // Trigger the start of the verkle conversion if we're at the right block + if api.eth.BlockChain().Config().IsPrague(block.Number(), block.Time()) && !api.eth.BlockChain().Config().IsPrague(parent.Number(), parent.Time()) { + parent := api.eth.BlockChain().GetHeaderByNumber(block.NumberU64() - 1) + if !api.eth.BlockChain().Config().IsPrague(parent.Number, parent.Time) { + api.eth.BlockChain().StartVerkleTransition(parent.Root, common.Hash{}, api.eth.BlockChain().Config(), nil) + } + } + // Reset db merge state in case of a reorg + if !api.eth.BlockChain().Config().IsPrague(block.Number(), block.Time()) { + api.eth.BlockChain().ReorgThroughVerkleTransition() + } // Another cornercase: if the node is in snap sync mode, but the CL client // tries to make it import a block. That should be denied as pushing something // into the database directly will conflict with the assumptions of snap sync diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 4bc8b8dc6c6e..4606b60408dd 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -157,7 +157,7 @@ type Config struct { OverrideCancun *uint64 `toml:",omitempty"` // OverrideVerkle (TODO: remove after the fork) - OverrideVerkle *uint64 `toml:",omitempty"` + OverridePrague *uint64 `toml:",omitempty"` } // CreateConsensusEngine creates a consensus engine for the given chain config. diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go index 324fbe380ea3..2ad499a53485 100644 --- a/eth/ethconfig/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -52,7 +52,7 @@ func (c Config) MarshalTOML() (interface{}, error) { RPCEVMTimeout time.Duration RPCTxFeeCap float64 OverrideCancun *uint64 `toml:",omitempty"` - OverrideVerkle *uint64 `toml:",omitempty"` + OverridePrague *uint64 `toml:",omitempty"` } var enc Config enc.Genesis = c.Genesis @@ -90,7 +90,7 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.RPCEVMTimeout = c.RPCEVMTimeout enc.RPCTxFeeCap = c.RPCTxFeeCap enc.OverrideCancun = c.OverrideCancun - enc.OverrideVerkle = c.OverrideVerkle + enc.OverridePrague = c.OverridePrague return &enc, nil } @@ -132,7 +132,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { RPCEVMTimeout *time.Duration RPCTxFeeCap *float64 OverrideCancun *uint64 `toml:",omitempty"` - OverrideVerkle *uint64 `toml:",omitempty"` + OverridePrague *uint64 `toml:",omitempty"` } var dec Config if err := unmarshal(&dec); err != nil { @@ -243,8 +243,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.OverrideCancun != nil { c.OverrideCancun = dec.OverrideCancun } - if dec.OverrideVerkle != nil { - c.OverrideVerkle = dec.OverrideVerkle + if dec.OverridePrague != nil { + c.OverridePrague = dec.OverridePrague } return nil } diff --git a/eth/tracers/api.go b/eth/tracers/api.go index 740a38ab9fbf..5e90180df8d5 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -1020,10 +1020,6 @@ func overrideConfig(original *params.ChainConfig, override *params.ChainConfig) copy.PragueTime = timestamp canon = false } - if timestamp := override.VerkleTime; timestamp != nil { - copy.VerkleTime = timestamp - canon = false - } return copy, canon } diff --git a/eth/tracers/js/tracer_test.go b/eth/tracers/js/tracer_test.go index bf6427faf673..c68c81e511e9 100644 --- a/eth/tracers/js/tracer_test.go +++ b/eth/tracers/js/tracer_test.go @@ -267,14 +267,17 @@ func TestEnterExit(t *testing.T) { if _, err := newJsTracer("{step: function() {}, fault: function() {}, result: function() { return null; }, enter: function() {}, exit: function() {}}", new(tracers.Context), nil); err != nil { t.Fatal(err) } + // test that the enter and exit method are correctly invoked and the values passed tracer, err := newJsTracer("{enters: 0, exits: 0, enterGas: 0, gasUsed: 0, step: function() {}, fault: function() {}, result: function() { return {enters: this.enters, exits: this.exits, enterGas: this.enterGas, gasUsed: this.gasUsed} }, enter: function(frame) { this.enters++; this.enterGas = frame.getGas(); }, exit: function(res) { this.exits++; this.gasUsed = res.getGasUsed(); }}", new(tracers.Context), nil) if err != nil { t.Fatal(err) } + scope := &vm.ScopeContext{ Contract: vm.NewContract(&account{}, &account{}, big.NewInt(0), 0), } + tracer.CaptureEnter(vm.CALL, scope.Contract.Caller(), scope.Contract.Address(), []byte{}, 1000, new(big.Int)) tracer.CaptureExit([]byte{}, 400, nil) diff --git a/go.mod b/go.mod index 7694f6eeaf47..86b978b071bb 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,8 @@ require ( github.com/cespare/cp v0.1.0 github.com/cloudflare/cloudflare-go v0.14.0 github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 - github.com/consensys/gnark-crypto v0.10.0 + github.com/consensys/gnark-crypto v0.12.1 + github.com/crate-crypto/go-ipa v0.0.0-20231021143427-ff2c8f728e84 github.com/crate-crypto/go-kzg-4844 v0.3.0 github.com/davecgh/go-spew v1.1.1 github.com/deckarep/golang-set/v2 v2.1.0 @@ -25,7 +26,7 @@ require ( github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 github.com/fsnotify/fsnotify v1.6.0 github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff - github.com/gballet/go-verkle v0.0.0-20230607174250-df487255f46b + github.com/gballet/go-verkle v0.1.1-0.20231020124853-d124d1998b1a github.com/go-stack/stack v1.8.1 github.com/gofrs/flock v0.8.1 github.com/golang-jwt/jwt/v4 v4.3.0 @@ -56,17 +57,17 @@ require ( github.com/rs/cors v1.7.0 github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible github.com/status-im/keycard-go v0.2.0 - github.com/stretchr/testify v1.8.1 + github.com/stretchr/testify v1.8.2 github.com/supranational/blst v0.3.11 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 github.com/tyler-smith/go-bip39 v1.1.0 github.com/urfave/cli/v2 v2.24.1 go.uber.org/automaxprocs v1.5.2 - golang.org/x/crypto v0.9.0 + golang.org/x/crypto v0.10.0 golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc - golang.org/x/sync v0.3.0 - golang.org/x/sys v0.9.0 - golang.org/x/text v0.9.0 + golang.org/x/sync v0.4.0 + golang.org/x/sys v0.13.0 + golang.org/x/text v0.10.0 golang.org/x/time v0.3.0 golang.org/x/tools v0.9.1 gopkg.in/natefinch/lumberjack.v2 v2.0.0 @@ -85,14 +86,13 @@ require ( github.com/aws/aws-sdk-go-v2/service/sts v1.1.1 // indirect github.com/aws/smithy-go v1.1.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.7.0 // indirect + github.com/bits-and-blooms/bitset v1.10.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect github.com/cockroachdb/redact v1.1.3 // indirect github.com/consensys/bavard v0.1.13 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect - github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect github.com/deepmap/oapi-codegen v1.8.2 // indirect github.com/dlclark/regexp2 v1.7.0 // indirect diff --git a/go.sum b/go.sum index a54b22e3dc0d..7b2cb2ecd2b5 100644 --- a/go.sum +++ b/go.sum @@ -43,8 +43,8 @@ github.com/aws/smithy-go v1.1.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo= -github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88= +github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k= github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= @@ -75,8 +75,8 @@ github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZ github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= -github.com/consensys/gnark-crypto v0.10.0 h1:zRh22SR7o4K35SoNqouS9J/TKHTyU2QWaj5ldehyXtA= -github.com/consensys/gnark-crypto v0.10.0/go.mod h1:Iq/P3HHl0ElSjsg2E1gsMwhAyxnxoKK5nVyZKd+/KhU= +github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M= +github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -84,8 +84,8 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80 h1:DuBDHVjgGMPki7bAyh91+3cF1Vh34sAEdH8JQgbc2R0= -github.com/crate-crypto/go-ipa v0.0.0-20230601170251-1830d0757c80/go.mod h1:gzbVz57IDJgQ9rLQwfSk696JGWof8ftznEL9GoAv3NI= +github.com/crate-crypto/go-ipa v0.0.0-20231021143427-ff2c8f728e84 h1:hq7cWcDOjZ54zL6Ua6Cv27Jnm6vSs4G5W9W3/b2oz1c= +github.com/crate-crypto/go-ipa v0.0.0-20231021143427-ff2c8f728e84/go.mod h1:J+gsi6D4peY0kyhaklyXFRVHOQWI2I5uU0c2+/90HYc= github.com/crate-crypto/go-kzg-4844 v0.3.0 h1:UBlWE0CgyFqqzTI+IFyCzA7A3Zw4iip6uzRv5NIXG0A= github.com/crate-crypto/go-kzg-4844 v0.3.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -144,8 +144,8 @@ github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILD github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= -github.com/gballet/go-verkle v0.0.0-20230607174250-df487255f46b h1:vMT47RYsrftsHSTQhqXwC3BYflo38OLC3Y4LtXtLyU0= -github.com/gballet/go-verkle v0.0.0-20230607174250-df487255f46b/go.mod h1:CDncRYVRSDqwakm282WEkjfaAj1hxU/v5RXxk5nXOiI= +github.com/gballet/go-verkle v0.1.1-0.20231020124853-d124d1998b1a h1:kqhR2nTIep0lw7zJBp2ju+fWbqP3PojUw3L+jv1qBK4= +github.com/gballet/go-verkle v0.1.1-0.20231020124853-d124d1998b1a/go.mod h1:7JamHhSTnnHDhcI3G8r4sWaD9XlleriqVlC3FeAQJKM= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= @@ -302,7 +302,6 @@ github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4F github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= -github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -424,8 +423,8 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= @@ -476,8 +475,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= -golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/crypto v0.10.0 h1:LKqV2xt9+kDzSTfOhx4FrkEBcMrAgHSYgzywV9zcGmM= +golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU= golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= @@ -530,9 +529,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -565,15 +563,14 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= -golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -585,8 +582,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58= +golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 52dbaf164cd7..8937dc37aead 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -1051,6 +1051,11 @@ func doCall(ctx context.Context, b Backend, args TransactionArgs, state *state.S } evm, vmError := b.GetEVM(ctx, msg, state, header, &vm.Config{NoBaseFee: true}, &blockCtx) + // Set witness if trie is verkle + if state.Database().TrieDB().IsVerkle() { + state.SetWitness(state.NewAccessWitness()) + } + // Wait for the context to be done and cancel the evm. Even if the // EVM has finished, cancelling may be done (repeatedly) go func() { @@ -1214,7 +1219,7 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr result, err := doCall(ctx, b, args, state, header, nil, nil, 0, gasCap) if err != nil { - if errors.Is(err, core.ErrIntrinsicGas) { + if errors.Is(err, core.ErrIntrinsicGas) || errors.Is(err, core.ErrInsufficientBalanceWitness) { return true, nil, nil // Special case, raise gas limit } return true, nil, err // Bail out diff --git a/les/client.go b/les/client.go index 132c857aa529..691635be0c59 100644 --- a/les/client.go +++ b/les/client.go @@ -95,8 +95,8 @@ func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) { if config.OverrideCancun != nil { overrides.OverrideCancun = config.OverrideCancun } - if config.OverrideVerkle != nil { - overrides.OverrideVerkle = config.OverrideVerkle + if config.OverridePrague != nil { + overrides.OverridePrague = config.OverridePrague } chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, trie.NewDatabase(chainDb), config.Genesis, &overrides) if _, isCompat := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !isCompat { diff --git a/les/server_requests.go b/les/server_requests.go index 30ff2cd05fb4..6f3e5e0f0335 100644 --- a/les/server_requests.go +++ b/les/server_requests.go @@ -430,7 +430,7 @@ func handleGetProofs(msg Decoder) (serveRequestFn, uint64, uint64, error) { p.bumpInvalid() continue } - trie, err = statedb.OpenStorageTrie(root, address, account.Root) + trie, err = statedb.OpenStorageTrie(root, address, account.Root, nil) if trie == nil || err != nil { p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "account", address, "root", account.Root, "err", err) continue diff --git a/light/odr_test.go b/light/odr_test.go index 79f901bbdb68..ffe4031ce944 100644 --- a/light/odr_test.go +++ b/light/odr_test.go @@ -87,7 +87,7 @@ func (odr *testOdr) Retrieve(ctx context.Context, req OdrRequest) error { t state.Trie ) if len(req.Id.AccountAddress) > 0 { - t, err = odr.serverState.OpenStorageTrie(req.Id.StateRoot, common.BytesToAddress(req.Id.AccountAddress), req.Id.Root) + t, err = odr.serverState.OpenStorageTrie(req.Id.StateRoot, common.BytesToAddress(req.Id.AccountAddress), req.Id.Root, nil) } else { t, err = odr.serverState.OpenTrie(req.Id.Root) } diff --git a/light/trie.go b/light/trie.go index 4967cc74e5ba..53d54615d909 100644 --- a/light/trie.go +++ b/light/trie.go @@ -27,6 +27,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" @@ -55,7 +56,7 @@ func (db *odrDatabase) OpenTrie(root common.Hash) (state.Trie, error) { return &odrTrie{db: db, id: db.id}, nil } -func (db *odrDatabase) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash) (state.Trie, error) { +func (db *odrDatabase) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, _ state.Trie) (state.Trie, error) { return &odrTrie{db: db, id: StorageTrieID(db.id, address, root)}, nil } @@ -100,6 +101,70 @@ func (db *odrDatabase) DiskDB() ethdb.KeyValueStore { panic("not implemented") } +func (db *odrDatabase) StartVerkleTransition(originalRoot common.Hash, translatedRoot common.Hash, chainConfig *params.ChainConfig, _ *uint64) { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) ReorgThroughVerkleTransition() { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) EndVerkleTransition() { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) InTransition() bool { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) Transitioned() bool { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) SetCurrentSlotHash(hash common.Hash) { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) GetCurrentAccountAddress() *common.Address { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) SetCurrentAccountAddress(_ common.Address) { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) GetCurrentAccountHash() common.Hash { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) GetCurrentSlotHash() common.Hash { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) SetStorageProcessed(_ bool) { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) GetStorageProcessed() bool { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) GetCurrentPreimageOffset() int64 { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) SetCurrentPreimageOffset(_ int64) { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) AddRootTranslation(originalRoot common.Hash, translatedRoot common.Hash) { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) SetLastMerkleRoot(root common.Hash) { + panic("not implemented") // TODO: Implement +} + type odrTrie struct { db *odrDatabase id *TrieID @@ -230,6 +295,10 @@ func (t *odrTrie) do(key []byte, fn func() error) error { } } +func (t *odrTrie) IsVerkle() bool { + return false +} + type nodeIterator struct { trie.NodeIterator t *odrTrie diff --git a/miner/worker.go b/miner/worker.go index 97967ea2f18b..124c93212262 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -697,13 +697,7 @@ func (w *worker) resultLoop() { } // makeEnv creates a new environment for the sealing block. -func (w *worker) makeEnv(parent *types.Header, header *types.Header, coinbase common.Address) (*environment, error) { - // Retrieve the parent state to execute on top and start a prefetcher for - // the miner to speed block sealing up a bit. - state, err := w.chain.StateAt(parent.Root) - if err != nil { - return nil, err - } +func (w *worker) makeEnv(parent *types.Header, header *types.Header, coinbase common.Address, state *state.StateDB) (*environment, error) { state.StartPrefetcher("miner") // Note the passed coinbase may be different with header.Coinbase. @@ -895,6 +889,24 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) { header.GasLimit = core.CalcGasLimit(parentGasLimit, w.config.GasCeil) } } + + // Trigger the start of the verkle conversion if we're at the right block + if w.chain.Config().IsPrague(header.Number, header.Time) { + parent := w.chain.GetHeaderByNumber(header.Number.Uint64() - 1) + if !w.chain.Config().IsPrague(parent.Number, parent.Time) { + w.chain.StartVerkleTransition(parent.Root, common.Hash{}, w.chain.Config(), nil) + } + } + + // Retrieve the parent state to execute on top and start a prefetcher for + // the miner to speed block sealing up a bit. + state, err := w.chain.StateAt(parent.Root) + if err != nil { + return nil, err + } + if w.chain.Config().IsPrague(header.Number, header.Time) { + core.OverlayVerkleTransition(state) + } // Run the consensus preparation with the default or customized consensus engine. if err := w.engine.Prepare(w.chain, header); err != nil { log.Error("Failed to prepare header for sealing", "err", err) @@ -903,7 +915,7 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) { // Could potentially happen if starting to mine in an odd state. // Note genParams.coinbase can be different with header.Coinbase // since clique algorithm can modify the coinbase field in header. - env, err := w.makeEnv(parent, header, genParams.coinbase) + env, err := w.makeEnv(parent, header, genParams.coinbase, state) if err != nil { log.Error("Failed to create sealing context", "err", err) return nil, err @@ -1050,6 +1062,7 @@ func (w *worker) commit(env *environment, interval func(), update bool, start ti if err != nil { return err } + // If we're post merge, just ignore if !w.isTTDReached(block.Header()) { select { diff --git a/miner/worker_test.go b/miner/worker_test.go index 80557d99bfcf..d8a5f8437228 100644 --- a/miner/worker_test.go +++ b/miner/worker_test.go @@ -18,6 +18,7 @@ package miner import ( "math/big" + "math/rand" "sync/atomic" "testing" "time" @@ -129,6 +130,7 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine default: t.Fatalf("unexpected consensus engine type: %T", engine) } + // I no longer see a call to GenerateChain so this probably broke state_processor_test.go chain, err := core.NewBlockChain(db, &core.CacheConfig{TrieDirtyDisabled: true}, gspec, nil, engine, vm.Config{}, nil, nil) if err != nil { t.Fatalf("core.NewBlockChain failed: %v", err) @@ -147,6 +149,23 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine func (b *testWorkerBackend) BlockChain() *core.BlockChain { return b.chain } func (b *testWorkerBackend) TxPool() *txpool.TxPool { return b.txPool } +//nolint:unused +func (b *testWorkerBackend) newRandomVerkleUncle() *types.Block { + var parent *types.Block + cur := b.chain.CurrentBlock() + if cur.Number.Uint64() == 0 { + parent = b.chain.Genesis() + } else { + parent = b.chain.GetBlockByHash(b.chain.CurrentBlock().ParentHash) + } + blocks, _, _, _ := core.GenerateVerkleChain(b.chain.Config(), parent, b.chain.Engine(), b.db, 1, func(i int, gen *core.BlockGen) { + var addr = make([]byte, common.AddressLength) + rand.Read(addr) + gen.SetCoinbase(common.BytesToAddress(addr)) + }) + return blocks[0] +} + func (b *testWorkerBackend) newRandomTx(creation bool) *types.Transaction { var tx *types.Transaction gasPrice := big.NewInt(10 * params.InitialBaseFee) diff --git a/params/config.go b/params/config.go index 75c8fd89d09f..19e633a71def 100644 --- a/params/config.go +++ b/params/config.go @@ -128,7 +128,6 @@ var ( ShanghaiTime: nil, CancunTime: nil, PragueTime: nil, - VerkleTime: nil, TerminalTotalDifficulty: nil, TerminalTotalDifficultyPassed: true, Ethash: new(EthashConfig), @@ -179,7 +178,6 @@ var ( ShanghaiTime: nil, CancunTime: nil, PragueTime: nil, - VerkleTime: nil, TerminalTotalDifficulty: nil, TerminalTotalDifficultyPassed: false, Ethash: nil, @@ -209,7 +207,6 @@ var ( ShanghaiTime: nil, CancunTime: nil, PragueTime: nil, - VerkleTime: nil, TerminalTotalDifficulty: nil, TerminalTotalDifficultyPassed: false, Ethash: new(EthashConfig), @@ -239,7 +236,6 @@ var ( ShanghaiTime: nil, CancunTime: nil, PragueTime: nil, - VerkleTime: nil, TerminalTotalDifficulty: nil, TerminalTotalDifficultyPassed: false, Ethash: new(EthashConfig), @@ -289,7 +285,6 @@ type ChainConfig struct { ShanghaiTime *uint64 `json:"shanghaiTime,omitempty"` // Shanghai switch time (nil = no fork, 0 = already on shanghai) CancunTime *uint64 `json:"cancunTime,omitempty"` // Cancun switch time (nil = no fork, 0 = already on cancun) PragueTime *uint64 `json:"pragueTime,omitempty"` // Prague switch time (nil = no fork, 0 = already on prague) - VerkleTime *uint64 `json:"verkleTime,omitempty"` // Verkle switch time (nil = no fork, 0 = already on verkle) // TerminalTotalDifficulty is the amount of total difficulty reached by // the network that triggers the consensus upgrade. @@ -304,6 +299,9 @@ type ChainConfig struct { Ethash *EthashConfig `json:"ethash,omitempty"` Clique *CliqueConfig `json:"clique,omitempty"` IsDevMode bool `json:"isDev,omitempty"` + + // Proof in block + ProofInBlocks bool `json:"proofInBlocks,omitempty"` } // EthashConfig is the consensus engine configs for proof-of-work based sealing. @@ -411,9 +409,6 @@ func (c *ChainConfig) Description() string { if c.PragueTime != nil { banner += fmt.Sprintf(" - Prague: @%-10v\n", *c.PragueTime) } - if c.VerkleTime != nil { - banner += fmt.Sprintf(" - Verkle: @%-10v\n", *c.VerkleTime) - } return banner } @@ -512,11 +507,6 @@ func (c *ChainConfig) IsPrague(num *big.Int, time uint64) bool { return c.IsLondon(num) && isTimestampForked(c.PragueTime, time) } -// IsVerkle returns whether num is either equal to the Verkle fork time or greater. -func (c *ChainConfig) IsVerkle(num *big.Int, time uint64) bool { - return c.IsLondon(num) && isTimestampForked(c.VerkleTime, time) -} - // CheckCompatible checks whether scheduled fork transitions have been imported // with a mismatching chain configuration. func (c *ChainConfig) CheckCompatible(newcfg *ChainConfig, height uint64, time uint64) *ConfigCompatError { @@ -571,7 +561,6 @@ func (c *ChainConfig) CheckConfigForkOrder() error { {name: "shanghaiTime", timestamp: c.ShanghaiTime}, {name: "cancunTime", timestamp: c.CancunTime, optional: true}, {name: "pragueTime", timestamp: c.PragueTime, optional: true}, - {name: "verkleTime", timestamp: c.VerkleTime, optional: true}, } { if lastFork.name != "" { switch { @@ -675,9 +664,6 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, headNumber *big.Int, if isForkTimestampIncompatible(c.PragueTime, newcfg.PragueTime, headTimestamp) { return newTimestampCompatError("Prague fork timestamp", c.PragueTime, newcfg.PragueTime) } - if isForkTimestampIncompatible(c.VerkleTime, newcfg.VerkleTime, headTimestamp) { - return newTimestampCompatError("Verkle fork timestamp", c.VerkleTime, newcfg.VerkleTime) - } return nil } @@ -823,7 +809,6 @@ type Rules struct { IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool IsBerlin, IsLondon bool IsMerge, IsShanghai, IsCancun, IsPrague bool - IsVerkle bool } // Rules ensures c's ChainID is not nil. @@ -848,6 +833,5 @@ func (c *ChainConfig) Rules(num *big.Int, isMerge bool, timestamp uint64) Rules IsShanghai: c.IsShanghai(num, timestamp), IsCancun: c.IsCancun(num, timestamp), IsPrague: c.IsPrague(num, timestamp), - IsVerkle: c.IsVerkle(num, timestamp), } } diff --git a/params/verkle_params.go b/params/verkle_params.go new file mode 100644 index 000000000000..93d4f7cd6476 --- /dev/null +++ b/params/verkle_params.go @@ -0,0 +1,36 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package params + +// Verkle tree EIP: costs associated to witness accesses +var ( + WitnessBranchReadCost uint64 = 1900 + WitnessChunkReadCost uint64 = 200 + WitnessBranchWriteCost uint64 = 3000 + WitnessChunkWriteCost uint64 = 500 + WitnessChunkFillCost uint64 = 6200 +) + +// ClearVerkleWitnessCosts sets all witness costs to 0, which is necessary +// for historical block replay simulations. +func ClearVerkleWitnessCosts() { + WitnessBranchReadCost = 0 + WitnessChunkReadCost = 0 + WitnessBranchWriteCost = 0 + WitnessChunkWriteCost = 0 + WitnessChunkFillCost = 0 +} diff --git a/trie/database.go b/trie/database.go index 49a884fd7f39..a6f7d98913a6 100644 --- a/trie/database.go +++ b/trie/database.go @@ -18,6 +18,7 @@ package trie import ( "errors" + "sync" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" @@ -32,6 +33,7 @@ type Config struct { Cache int // Memory allowance (MB) to use for caching trie nodes in memory Preimages bool // Flag whether the preimage of trie key is recorded PathDB *pathdb.Config // Configs for experimental path-based scheme, not used yet. + Verkle bool // Testing hooks OnCommit func(states *triestate.Set) // Hook invoked when commit is performed @@ -75,6 +77,10 @@ type Database struct { diskdb ethdb.Database // Persistent database to store the snapshot preimages *preimageStore // The store for caching preimages backend backend // The backend for managing trie nodes + + // Items used for root conversion during the verkle transition + addrToRoot map[common.Address]common.Hash + addrToRootLock sync.RWMutex } // prepare initializes the database with provided configs, but the @@ -240,3 +246,41 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) { } return hdb.Node(hash) } + +func (db *Database) HasStorageRootConversion(addr common.Address) bool { + db.addrToRootLock.RLock() + defer db.addrToRootLock.RUnlock() + if db.addrToRoot == nil { + return false + } + _, ok := db.addrToRoot[addr] + return ok +} + +func (db *Database) SetStorageRootConversion(addr common.Address, root common.Hash) { + db.addrToRootLock.Lock() + defer db.addrToRootLock.Unlock() + if db.addrToRoot == nil { + db.addrToRoot = make(map[common.Address]common.Hash) + } + db.addrToRoot[addr] = root +} + +func (db *Database) StorageRootConversion(addr common.Address) common.Hash { + db.addrToRootLock.RLock() + defer db.addrToRootLock.RUnlock() + if db.addrToRoot == nil { + return common.Hash{} + } + return db.addrToRoot[addr] +} + +func (db *Database) ClearStorageRootConversion(addr common.Address) { + db.addrToRootLock.Lock() + defer db.addrToRootLock.Unlock() + delete(db.addrToRoot, addr) +} + +func (db *Database) IsVerkle() bool { + return db.config != nil && db.config.Verkle +} diff --git a/trie/secure_trie.go b/trie/secure_trie.go index 7f0685e30666..f4a999c2f68f 100644 --- a/trie/secure_trie.go +++ b/trie/secure_trie.go @@ -288,3 +288,7 @@ func (t *StateTrie) getSecKeyCache() map[string][]byte { } return t.secKeyCache } + +func (t *StateTrie) IsVerkle() bool { + return false +} diff --git a/trie/transition.go b/trie/transition.go new file mode 100644 index 000000000000..083aa57db1ac --- /dev/null +++ b/trie/transition.go @@ -0,0 +1,193 @@ +// Copyright 2021 go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/gballet/go-verkle" +) + +type TransitionTrie struct { + overlay *VerkleTrie + base *SecureTrie + storage bool +} + +func NewTransitionTree(base *SecureTrie, overlay *VerkleTrie, st bool) *TransitionTrie { + return &TransitionTrie{ + overlay: overlay, + base: base, + storage: st, + } +} + +func (t *TransitionTrie) Base() *SecureTrie { + return t.base +} + +// TODO(gballet/jsign): consider removing this API. +func (t *TransitionTrie) Overlay() *VerkleTrie { + return t.overlay +} + +// GetKey returns the sha3 preimage of a hashed key that was previously used +// to store a value. +// +// TODO(fjl): remove this when StateTrie is removed +func (t *TransitionTrie) GetKey(key []byte) []byte { + if key := t.overlay.GetKey(key); key != nil { + return key + } + return t.base.GetKey(key) +} + +// Get returns the value for key stored in the trie. The value bytes must +// not be modified by the caller. If a node was not found in the database, a +// trie.MissingNodeError is returned. +func (t *TransitionTrie) GetStorage(addr common.Address, key []byte) ([]byte, error) { + if val, err := t.overlay.GetStorage(addr, key); len(val) != 0 || err != nil { + return val, nil + } + // TODO also insert value into overlay + return t.base.GetStorage(addr, key) +} + +// GetAccount abstract an account read from the trie. +func (t *TransitionTrie) GetAccount(address common.Address) (*types.StateAccount, error) { + data, err := t.overlay.GetAccount(address) + if err != nil { + // WORKAROUND, see the definition of errDeletedAccount + // for an explainer of why this if is needed. + if err == errDeletedAccount { + return nil, nil + } + return nil, err + } + if data != nil { + if t.overlay.db.HasStorageRootConversion(address) { + data.Root = t.overlay.db.StorageRootConversion(address) + } + return data, nil + } + // TODO also insert value into overlay + return t.base.GetAccount(address) +} + +// Update associates key with value in the trie. If value has length zero, any +// existing value is deleted from the trie. The value bytes must not be modified +// by the caller while they are stored in the trie. If a node was not found in the +// database, a trie.MissingNodeError is returned. +func (t *TransitionTrie) UpdateStorage(address common.Address, key []byte, value []byte) error { + var v []byte + if len(value) >= 32 { + v = value[:32] + } else { + var val [32]byte + copy(val[32-len(value):], value[:]) + v = val[:] + } + return t.overlay.UpdateStorage(address, key, v) +} + +// UpdateAccount abstract an account write to the trie. +func (t *TransitionTrie) UpdateAccount(addr common.Address, account *types.StateAccount) error { + if account.Root != (common.Hash{}) && account.Root != types.EmptyRootHash { + t.overlay.db.SetStorageRootConversion(addr, account.Root) + } + return t.overlay.UpdateAccount(addr, account) +} + +// Delete removes any existing value for key from the trie. If a node was not +// found in the database, a trie.MissingNodeError is returned. +func (t *TransitionTrie) DeleteStorage(addr common.Address, key []byte) error { + return t.overlay.DeleteStorage(addr, key) +} + +// DeleteAccount abstracts an account deletion from the trie. +func (t *TransitionTrie) DeleteAccount(key common.Address) error { + return t.overlay.DeleteAccount(key) +} + +// Hash returns the root hash of the trie. It does not write to the database and +// can be used even if the trie doesn't have one. +func (t *TransitionTrie) Hash() common.Hash { + return t.overlay.Hash() +} + +// Commit collects all dirty nodes in the trie and replace them with the +// corresponding node hash. All collected nodes(including dirty leaves if +// collectLeaf is true) will be encapsulated into a nodeset for return. +// The returned nodeset can be nil if the trie is clean(nothing to commit). +// Once the trie is committed, it's not usable anymore. A new trie must +// be created with new root and updated trie database for following usage +func (t *TransitionTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) { + // Just return if the trie is a storage trie: otherwise, + // the overlay trie will be committed as many times as + // there are storage tries. This would kill performance. + if t.storage { + return common.Hash{}, nil, nil + } + return t.overlay.Commit(collectLeaf) +} + +// NodeIterator returns an iterator that returns nodes of the trie. Iteration +// starts at the key after the given start key. +func (t *TransitionTrie) NodeIterator(startKey []byte) (NodeIterator, error) { + panic("not implemented") // TODO: Implement +} + +// Prove constructs a Merkle proof for key. The result contains all encoded nodes +// on the path to the value at key. The value itself is also included in the last +// node and can be retrieved by verifying the proof. +// +// If the trie does not contain a value for key, the returned proof contains all +// nodes of the longest existing prefix of the key (at least the root), ending +// with the node that proves the absence of the key. +func (t *TransitionTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error { + panic("not implemented") // TODO: Implement +} + +// IsVerkle returns true if the trie is verkle-tree based +func (t *TransitionTrie) IsVerkle() bool { + // For all intents and purposes, the calling code should treat this as a verkle trie + return true +} + +func (t *TransitionTrie) UpdateStem(key []byte, values [][]byte) error { + trie := t.overlay + switch root := trie.root.(type) { + case *verkle.InternalNode: + return root.InsertStem(key, values, t.overlay.FlatdbNodeResolver) + default: + panic("invalid root type") + } +} + +func (t *TransitionTrie) Copy() *TransitionTrie { + return &TransitionTrie{ + overlay: t.overlay.Copy(), + base: t.base.Copy(), + storage: t.storage, + } +} + +func (t *TransitionTrie) UpdateContractCode(addr common.Address, codeHash common.Hash, code []byte) error { + return t.overlay.UpdateContractCode(addr, codeHash, code) +} diff --git a/trie/utils/verkle.go b/trie/utils/verkle.go new file mode 100644 index 000000000000..17fdf1ade343 --- /dev/null +++ b/trie/utils/verkle.go @@ -0,0 +1,300 @@ +// Copyright 2021 go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package utils + +import ( + "encoding/binary" + "sync" + + "github.com/crate-crypto/go-ipa/bandersnatch/fr" + "github.com/gballet/go-verkle" + "github.com/holiman/uint256" +) + +const ( + VersionLeafKey = 0 + BalanceLeafKey = 1 + NonceLeafKey = 2 + CodeKeccakLeafKey = 3 + CodeSizeLeafKey = 4 +) + +var ( + zero = uint256.NewInt(0) + VerkleNodeWidthLog2 = 8 + HeaderStorageOffset = uint256.NewInt(64) + mainStorageOffsetLshVerkleNodeWidth = new(uint256.Int).Lsh(uint256.NewInt(256), 31-uint(VerkleNodeWidthLog2)) + CodeOffset = uint256.NewInt(128) + MainStorageOffset = new(uint256.Int).Lsh(uint256.NewInt(256), 31) + VerkleNodeWidth = uint256.NewInt(256) + codeStorageDelta = uint256.NewInt(0).Sub(CodeOffset, HeaderStorageOffset) + + getTreePolyIndex0Point *verkle.Point +) + +type PointCache struct { + cache map[string]*verkle.Point + lock sync.RWMutex +} + +func NewPointCache() *PointCache { + return &PointCache{ + cache: make(map[string]*verkle.Point), + } +} + +func (pc *PointCache) GetTreeKeyHeader(addr []byte) *verkle.Point { + pc.lock.RLock() + point, ok := pc.cache[string(addr)] + pc.lock.RUnlock() + if ok { + return point + } + + point = EvaluateAddressPoint(addr) + pc.lock.Lock() + pc.cache[string(addr)] = point + pc.lock.Unlock() + return point +} + +func (pc *PointCache) GetTreeKeyVersionCached(addr []byte) []byte { + p := pc.GetTreeKeyHeader(addr) + v := PointToHash(p, VersionLeafKey) + return v[:] +} + +func init() { + // The byte array is the Marshalled output of the point computed as such: + //cfg, _ := verkle.GetConfig() + //verkle.FromLEBytes(&getTreePolyIndex0Fr[0], []byte{2, 64}) + //= cfg.CommitToPoly(getTreePolyIndex0Fr[:], 1) + getTreePolyIndex0Point = new(verkle.Point) + err := getTreePolyIndex0Point.SetBytes([]byte{34, 25, 109, 242, 193, 5, 144, 224, 76, 52, 189, 92, 197, 126, 9, 145, 27, 152, 199, 130, 165, 3, 210, 27, 193, 131, 142, 28, 110, 26, 16, 191}) + if err != nil { + panic(err) + } +} + +// GetTreeKey performs both the work of the spec's get_tree_key function, and that +// of pedersen_hash: it builds the polynomial in pedersen_hash without having to +// create a mostly zero-filled buffer and "type cast" it to a 128-long 16-byte +// array. Since at most the first 5 coefficients of the polynomial will be non-zero, +// these 5 coefficients are created directly. +func GetTreeKey(address []byte, treeIndex *uint256.Int, subIndex byte) []byte { + if len(address) < 32 { + var aligned [32]byte + address = append(aligned[:32-len(address)], address...) + } + + // poly = [2+256*64, address_le_low, address_le_high, tree_index_le_low, tree_index_le_high] + var poly [5]fr.Element + + // 32-byte address, interpreted as two little endian + // 16-byte numbers. + verkle.FromLEBytes(&poly[1], address[:16]) + verkle.FromLEBytes(&poly[2], address[16:]) + + // treeIndex must be interpreted as a 32-byte aligned little-endian integer. + // e.g: if treeIndex is 0xAABBCC, we need the byte representation to be 0xCCBBAA00...00. + // poly[3] = LE({CC,BB,AA,00...0}) (16 bytes), poly[4]=LE({00,00,...}) (16 bytes). + // + // To avoid unnecessary endianness conversions for go-ipa, we do some trick: + // - poly[3]'s byte representation is the same as the *top* 16 bytes (trieIndexBytes[16:]) of + // 32-byte aligned big-endian representation (BE({00,...,AA,BB,CC})). + // - poly[4]'s byte representation is the same as the *low* 16 bytes (trieIndexBytes[:16]) of + // the 32-byte aligned big-endian representation (BE({00,00,...}). + trieIndexBytes := treeIndex.Bytes32() + verkle.FromBytes(&poly[3], trieIndexBytes[16:]) + verkle.FromBytes(&poly[4], trieIndexBytes[:16]) + + cfg := verkle.GetConfig() + ret := cfg.CommitToPoly(poly[:], 0) + + // add a constant point corresponding to poly[0]=[2+256*64]. + ret.Add(ret, getTreePolyIndex0Point) + + return PointToHash(ret, subIndex) +} + +func GetTreeKeyAccountLeaf(address []byte, leaf byte) []byte { + return GetTreeKey(address, zero, leaf) +} + +func GetTreeKeyVersion(address []byte) []byte { + return GetTreeKey(address, zero, VersionLeafKey) +} + +func GetTreeKeyVersionWithEvaluatedAddress(addrp *verkle.Point) []byte { + return GetTreeKeyWithEvaluatedAddess(addrp, zero, VersionLeafKey) +} + +func GetTreeKeyBalance(address []byte) []byte { + return GetTreeKey(address, zero, BalanceLeafKey) +} + +func GetTreeKeyNonce(address []byte) []byte { + return GetTreeKey(address, zero, NonceLeafKey) +} + +func GetTreeKeyCodeKeccak(address []byte) []byte { + return GetTreeKey(address, zero, CodeKeccakLeafKey) +} + +func GetTreeKeyCodeSize(address []byte) []byte { + return GetTreeKey(address, zero, CodeSizeLeafKey) +} + +func GetTreeKeyCodeChunk(address []byte, chunk *uint256.Int) []byte { + treeIndex, subIndex := GetTreeKeyCodeChunkIndices(chunk) + return GetTreeKey(address, treeIndex, subIndex) +} + +func GetTreeKeyCodeChunkIndices(chunk *uint256.Int) (*uint256.Int, byte) { + chunkOffset := new(uint256.Int).Add(CodeOffset, chunk) + treeIndex := new(uint256.Int).Div(chunkOffset, VerkleNodeWidth) + subIndexMod := new(uint256.Int).Mod(chunkOffset, VerkleNodeWidth) + var subIndex byte + if len(subIndexMod) != 0 { + subIndex = byte(subIndexMod[0]) + } + return treeIndex, subIndex +} + +func GetTreeKeyCodeChunkWithEvaluatedAddress(addressPoint *verkle.Point, chunk *uint256.Int) []byte { + chunkOffset := new(uint256.Int).Add(CodeOffset, chunk) + treeIndex := new(uint256.Int).Div(chunkOffset, VerkleNodeWidth) + subIndexMod := new(uint256.Int).Mod(chunkOffset, VerkleNodeWidth) + var subIndex byte + if len(subIndexMod) != 0 { + subIndex = byte(subIndexMod[0]) + } + return GetTreeKeyWithEvaluatedAddess(addressPoint, treeIndex, subIndex) +} + +func GetTreeKeyStorageSlot(address []byte, storageKey *uint256.Int) []byte { + pos := storageKey.Clone() + if storageKey.Cmp(codeStorageDelta) < 0 { + pos.Add(HeaderStorageOffset, storageKey) + } else { + pos.Add(MainStorageOffset, storageKey) + } + treeIndex := new(uint256.Int).Div(pos, VerkleNodeWidth) + + // calculate the sub_index, i.e. the index in the stem tree. + // Because the modulus is 256, it's the last byte of treeIndex + subIndexMod := new(uint256.Int).Mod(pos, VerkleNodeWidth) + var subIndex byte + if len(subIndexMod) != 0 { + // uint256 is broken into 4 little-endian quads, + // each with native endianness. Extract the least + // significant byte. + subIndex = byte(subIndexMod[0]) + } + return GetTreeKey(address, treeIndex, subIndex) +} + +func PointToHash(evaluated *verkle.Point, suffix byte) []byte { + // The output of Byte() is big engian for banderwagon. This + // introduces an imbalance in the tree, because hashes are + // elements of a 253-bit field. This means more than half the + // tree would be empty. To avoid this problem, use a little + // endian commitment and chop the MSB. + retb := evaluated.Bytes() + for i := 0; i < 16; i++ { + retb[31-i], retb[i] = retb[i], retb[31-i] + } + retb[31] = suffix + return retb[:] +} + +func GetTreeKeyWithEvaluatedAddess(evaluated *verkle.Point, treeIndex *uint256.Int, subIndex byte) []byte { + var poly [5]fr.Element + + poly[0].SetZero() + poly[1].SetZero() + poly[2].SetZero() + + // little-endian, 32-byte aligned treeIndex + var index [32]byte + for i := 0; i < len(treeIndex); i++ { + binary.LittleEndian.PutUint64(index[i*8:(i+1)*8], treeIndex[i]) + } + verkle.FromLEBytes(&poly[3], index[:16]) + verkle.FromLEBytes(&poly[4], index[16:]) + + cfg := verkle.GetConfig() + ret := cfg.CommitToPoly(poly[:], 0) + + // add the pre-evaluated address + ret.Add(ret, evaluated) + + return PointToHash(ret, subIndex) +} + +func EvaluateAddressPoint(address []byte) *verkle.Point { + if len(address) < 32 { + var aligned [32]byte + address = append(aligned[:32-len(address)], address...) + } + var poly [3]fr.Element + + poly[0].SetZero() + + // 32-byte address, interpreted as two little endian + // 16-byte numbers. + verkle.FromLEBytes(&poly[1], address[:16]) + verkle.FromLEBytes(&poly[2], address[16:]) + + cfg := verkle.GetConfig() + ret := cfg.CommitToPoly(poly[:], 0) + + // add a constant point + ret.Add(ret, getTreePolyIndex0Point) + + return ret +} + +func GetTreeKeyStorageSlotWithEvaluatedAddress(evaluated *verkle.Point, storageKey []byte) []byte { + treeIndex, subIndex := GetTreeKeyStorageSlotTreeIndexes(storageKey) + return GetTreeKeyWithEvaluatedAddess(evaluated, treeIndex, subIndex) +} + +func GetTreeKeyStorageSlotTreeIndexes(storageKey []byte) (*uint256.Int, byte) { + var pos uint256.Int + pos.SetBytes(storageKey) + + // If the storage slot is in the header, we need to add the header offset. + if pos.Cmp(codeStorageDelta) < 0 { + // This addition is always safe; it can't ever overflow since pos. + +package utils + +import ( + "crypto/sha256" + "encoding/hex" + "math/big" + "math/rand" + "testing" + + "github.com/gballet/go-verkle" + "github.com/holiman/uint256" +) + +func TestGetTreeKey(t *testing.T) { + var addr [32]byte + for i := 0; i < 16; i++ { + addr[1+2*i] = 0xff + } + n := uint256.NewInt(1) + n = n.Lsh(n, 129) + n.Add(n, uint256.NewInt(3)) + tk := GetTreeKey(addr[:], n, 1) + + got := hex.EncodeToString(tk) + exp := "f42f932f43faf5d14b292b9009c45c28da61dbf66e20dbedc2e02dfd64ff5a01" + if got != exp { + t.Fatalf("Generated trie key is incorrect: %s != %s", got, exp) + } +} + +func TestConstantPoint(t *testing.T) { + var expectedPoly [1]verkle.Fr + + cfg := verkle.GetConfig() + verkle.FromLEBytes(&expectedPoly[0], []byte{2, 64}) + expected := cfg.CommitToPoly(expectedPoly[:], 1) + + if !expected.Equal(getTreePolyIndex0Point) { + t.Fatalf("Marshalled constant value is incorrect: %x != %x", expected.Bytes(), getTreePolyIndex0Point.Bytes()) + } +} + +func BenchmarkPedersenHash(b *testing.B) { + var addr, v [32]byte + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + rand.Read(v[:]) + rand.Read(addr[:]) + GetTreeKeyCodeSize(addr[:]) + } +} + +func sha256GetTreeKeyCodeSize(addr []byte) []byte { + digest := sha256.New() + digest.Write(addr) + treeIndexBytes := new(big.Int).Bytes() + var payload [32]byte + copy(payload[:len(treeIndexBytes)], treeIndexBytes) + digest.Write(payload[:]) + h := digest.Sum(nil) + h[31] = CodeKeccakLeafKey + return h +} + +func BenchmarkSha256Hash(b *testing.B) { + var addr, v [32]byte + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + rand.Read(v[:]) + rand.Read(addr[:]) + sha256GetTreeKeyCodeSize(addr[:]) + } +} diff --git a/trie/verkle.go b/trie/verkle.go new file mode 100644 index 000000000000..d0a914fde87b --- /dev/null +++ b/trie/verkle.go @@ -0,0 +1,501 @@ +// Copyright 2021 go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/trie/trienode" + "github.com/ethereum/go-ethereum/trie/utils" + "github.com/gballet/go-verkle" + "github.com/holiman/uint256" +) + +// VerkleTrie is a wrapper around VerkleNode that implements the trie.Trie +// interface so that Verkle trees can be reused verbatim. +type VerkleTrie struct { + root verkle.VerkleNode + db *Database + pointCache *utils.PointCache + ended bool +} + +func (vt *VerkleTrie) ToDot() string { + return verkle.ToDot(vt.root) +} + +func NewVerkleTrie(root verkle.VerkleNode, db *Database, pointCache *utils.PointCache, ended bool) *VerkleTrie { + return &VerkleTrie{ + root: root, + db: db, + pointCache: pointCache, + ended: ended, + } +} + +func (trie *VerkleTrie) FlatdbNodeResolver(path []byte) ([]byte, error) { + return trie.db.diskdb.Get(append(FlatDBVerkleNodeKeyPrefix, path...)) +} + +func (trie *VerkleTrie) InsertMigratedLeaves(leaves []verkle.LeafNode) error { + return trie.root.(*verkle.InternalNode).InsertMigratedLeaves(leaves, trie.FlatdbNodeResolver) +} + +var ( + errInvalidRootType = errors.New("invalid node type for root") + + // WORKAROUND: this special error is returned if it has been + // detected that the account was deleted in the verkle tree. + // This is needed in case an account was translated while it + // was in the MPT, and was selfdestructed in verkle mode. + // + // This is only a problem for replays, and this code is not + // needed after SELFDESTRUCT has been removed. + errDeletedAccount = errors.New("account deleted in VKT") + + FlatDBVerkleNodeKeyPrefix = []byte("flat-") // prefix for flatdb keys +) + +// GetKey returns the sha3 preimage of a hashed key that was previously used +// to store a value. +func (trie *VerkleTrie) GetKey(key []byte) []byte { + return key +} + +// Get returns the value for key stored in the trie. The value bytes must +// not be modified by the caller. If a node was not found in the database, a +// trie.MissingNodeError is returned. +func (trie *VerkleTrie) GetStorage(addr common.Address, key []byte) ([]byte, error) { + pointEval := trie.pointCache.GetTreeKeyHeader(addr[:]) + k := utils.GetTreeKeyStorageSlotWithEvaluatedAddress(pointEval, key) + return trie.root.Get(k, trie.FlatdbNodeResolver) +} + +// GetWithHashedKey returns the value, assuming that the key has already +// been hashed. +func (trie *VerkleTrie) GetWithHashedKey(key []byte) ([]byte, error) { + return trie.root.Get(key, trie.FlatdbNodeResolver) +} + +func (t *VerkleTrie) GetAccount(addr common.Address) (*types.StateAccount, error) { + acc := &types.StateAccount{} + versionkey := t.pointCache.GetTreeKeyVersionCached(addr[:]) + var ( + values [][]byte + err error + ) + switch t.root.(type) { + case *verkle.InternalNode: + values, err = t.root.(*verkle.InternalNode).GetStem(versionkey[:31], t.FlatdbNodeResolver) + default: + return nil, errInvalidRootType + } + if err != nil { + return nil, fmt.Errorf("GetAccount (%x) error: %v", addr, err) + } + + if values == nil { + return nil, nil + } + if len(values[utils.NonceLeafKey]) > 0 { + acc.Nonce = binary.LittleEndian.Uint64(values[utils.NonceLeafKey]) + } + // if the account has been deleted, then values[10] will be 0 and not nil. If it has + // been recreated after that, then its code keccak will NOT be 0. So return `nil` if + // the nonce, and values[10], and code keccak is 0. + + if acc.Nonce == 0 && len(values) > 10 && len(values[10]) > 0 && bytes.Equal(values[utils.CodeKeccakLeafKey], zero[:]) { + if !t.ended { + return nil, errDeletedAccount + } else { + return nil, nil + } + } + var balance [32]byte + copy(balance[:], values[utils.BalanceLeafKey]) + for i := 0; i < len(balance)/2; i++ { + balance[len(balance)-i-1], balance[i] = balance[i], balance[len(balance)-i-1] + } + // var balance [32]byte + // if len(values[utils.BalanceLeafKey]) > 0 { + // for i := 0; i < len(balance); i++ { + // balance[len(balance)-i-1] = values[utils.BalanceLeafKey][i] + // } + // } + acc.Balance = new(big.Int).SetBytes(balance[:]) + acc.CodeHash = values[utils.CodeKeccakLeafKey] + // TODO fix the code size as well + + return acc, nil +} + +var zero [32]byte + +func (t *VerkleTrie) UpdateAccount(addr common.Address, acc *types.StateAccount) error { + var ( + err error + nonce, balance [32]byte + values = make([][]byte, verkle.NodeWidth) + stem = t.pointCache.GetTreeKeyVersionCached(addr[:]) + ) + + // Only evaluate the polynomial once + values[utils.VersionLeafKey] = zero[:] + values[utils.NonceLeafKey] = nonce[:] + values[utils.BalanceLeafKey] = balance[:] + values[utils.CodeKeccakLeafKey] = acc.CodeHash[:] + + binary.LittleEndian.PutUint64(nonce[:], acc.Nonce) + bbytes := acc.Balance.Bytes() + if len(bbytes) > 0 { + for i, b := range bbytes { + balance[len(bbytes)-i-1] = b + } + } + + switch root := t.root.(type) { + case *verkle.InternalNode: + err = root.InsertStem(stem, values, t.FlatdbNodeResolver) + default: + return errInvalidRootType + } + if err != nil { + return fmt.Errorf("UpdateAccount (%x) error: %v", addr, err) + } + // TODO figure out if the code size needs to be updated, too + + return nil +} + +func (trie *VerkleTrie) UpdateStem(key []byte, values [][]byte) error { + switch root := trie.root.(type) { + case *verkle.InternalNode: + return root.InsertStem(key, values, trie.FlatdbNodeResolver) + default: + panic("invalid root type") + } +} + +// Update associates key with value in the trie. If value has length zero, any +// existing value is deleted from the trie. The value bytes must not be modified +// by the caller while they are stored in the trie. If a node was not found in the +// database, a trie.MissingNodeError is returned. +func (trie *VerkleTrie) UpdateStorage(address common.Address, key, value []byte) error { + k := utils.GetTreeKeyStorageSlotWithEvaluatedAddress(trie.pointCache.GetTreeKeyHeader(address[:]), key) + var v [32]byte + if len(value) >= 32 { + copy(v[:], value[:32]) + } else { + copy(v[32-len(value):], value[:]) + } + return trie.root.Insert(k, v[:], trie.FlatdbNodeResolver) +} + +func (t *VerkleTrie) DeleteAccount(addr common.Address) error { + var ( + err error + values = make([][]byte, verkle.NodeWidth) + stem = t.pointCache.GetTreeKeyVersionCached(addr[:]) + ) + + for i := 0; i < verkle.NodeWidth; i++ { + values[i] = zero[:] + } + + switch root := t.root.(type) { + case *verkle.InternalNode: + err = root.InsertStem(stem, values, t.FlatdbNodeResolver) + default: + return errInvalidRootType + } + if err != nil { + return fmt.Errorf("DeleteAccount (%x) error: %v", addr, err) + } + // TODO figure out if the code size needs to be updated, too + + return nil +} + +// Delete removes any existing value for key from the trie. If a node was not +// found in the database, a trie.MissingNodeError is returned. +func (trie *VerkleTrie) DeleteStorage(addr common.Address, key []byte) error { + pointEval := trie.pointCache.GetTreeKeyHeader(addr[:]) + k := utils.GetTreeKeyStorageSlotWithEvaluatedAddress(pointEval, key) + var zero [32]byte + return trie.root.Insert(k, zero[:], trie.FlatdbNodeResolver) +} + +// Hash returns the root hash of the trie. It does not write to the database and +// can be used even if the trie doesn't have one. +func (trie *VerkleTrie) Hash() common.Hash { + return trie.root.Commit().Bytes() +} + +func nodeToDBKey(n verkle.VerkleNode) []byte { + ret := n.Commitment().Bytes() + return ret[:] +} + +// Commit writes all nodes to the trie's memory database, tracking the internal +// and external (for account tries) references. +func (trie *VerkleTrie) Commit(_ bool) (common.Hash, *trienode.NodeSet, error) { + root, ok := trie.root.(*verkle.InternalNode) + if !ok { + return common.Hash{}, nil, errors.New("unexpected root node type") + } + nodes, err := root.BatchSerialize() + if err != nil { + return common.Hash{}, nil, fmt.Errorf("serializing tree nodes: %s", err) + } + + batch := trie.db.diskdb.NewBatch() + path := make([]byte, 0, len(FlatDBVerkleNodeKeyPrefix)+32) + path = append(path, FlatDBVerkleNodeKeyPrefix...) + for _, node := range nodes { + path := append(path[:len(FlatDBVerkleNodeKeyPrefix)], node.Path...) + + if err := batch.Put(path, node.SerializedBytes); err != nil { + return common.Hash{}, nil, fmt.Errorf("put node to disk: %s", err) + } + + if batch.ValueSize() >= ethdb.IdealBatchSize { + batch.Write() + batch.Reset() + } + } + batch.Write() + + return trie.Hash(), nil, nil +} + +// NodeIterator returns an iterator that returns nodes of the trie. Iteration +// starts at the key after the given start key. +func (trie *VerkleTrie) NodeIterator(startKey []byte) (NodeIterator, error) { + return newVerkleNodeIterator(trie, nil) +} + +// Prove constructs a Merkle proof for key. The result contains all encoded nodes +// on the path to the value at key. The value itself is also included in the last +// node and can be retrieved by verifying the proof. +// +// If the trie does not contain a value for key, the returned proof contains all +// nodes of the longest existing prefix of the key (at least the root), ending +// with the node that proves the absence of the key. +func (trie *VerkleTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error { + panic("not implemented") +} + +func (trie *VerkleTrie) Copy() *VerkleTrie { + return &VerkleTrie{ + root: trie.root.Copy(), + db: trie.db, + pointCache: trie.pointCache, + } +} + +func (trie *VerkleTrie) IsVerkle() bool { + return true +} + +func ProveAndSerialize(pretrie, posttrie *VerkleTrie, keys [][]byte, resolver verkle.NodeResolverFn) (*verkle.VerkleProof, verkle.StateDiff, error) { + var postroot verkle.VerkleNode + if posttrie != nil { + postroot = posttrie.root + } + proof, _, _, _, err := verkle.MakeVerkleMultiProof(pretrie.root, postroot, keys, resolver) + if err != nil { + return nil, nil, err + } + + p, kvps, err := verkle.SerializeProof(proof) + if err != nil { + return nil, nil, err + } + + return p, kvps, nil +} + +type set = map[string]struct{} + +func addKey(s set, key []byte) { + s[string(key)] = struct{}{} +} + +func DeserializeAndVerifyVerkleProof(vp *verkle.VerkleProof, root []byte, statediff verkle.StateDiff) error { + rootC := new(verkle.Point) + rootC.SetBytes(root) + + var others set = set{} // Mark when an "other" stem has been seen + + proof, err := verkle.DeserializeProof(vp, statediff) + if err != nil { + return fmt.Errorf("verkle proof deserialization error: %w", err) + } + + for _, stem := range proof.PoaStems { + addKey(others, stem) + } + + pretree, err := verkle.PreStateTreeFromProof(proof, rootC) + if err != nil { + return fmt.Errorf("error rebuilding the pre-tree from proof: %w", err) + } + // TODO this should not be necessary, remove it + // after the new proof generation code has stabilized. + for _, stemdiff := range statediff { + for _, suffixdiff := range stemdiff.SuffixDiffs { + var key [32]byte + copy(key[:31], stemdiff.Stem[:]) + key[31] = suffixdiff.Suffix + + val, err := pretree.Get(key[:], nil) + if err != nil { + return fmt.Errorf("could not find key %x in tree rebuilt from proof: %w", key, err) + } + if len(val) > 0 { + if !bytes.Equal(val, suffixdiff.CurrentValue[:]) { + return fmt.Errorf("could not find correct value at %x in tree rebuilt from proof: %x != %x", key, val, *suffixdiff.CurrentValue) + } + } else { + if suffixdiff.CurrentValue != nil && len(suffixdiff.CurrentValue) != 0 { + return fmt.Errorf("could not find correct value at %x in tree rebuilt from proof: %x != %x", key, val, *suffixdiff.CurrentValue) + } + } + } + } + + posttree, err := verkle.PostStateTreeFromStateDiff(pretree, statediff) + if err != nil { + return fmt.Errorf("error rebuilding the post-tree from proof: %w", err) + } + + return verkle.VerifyVerkleProofWithPreAndPostTrie(proof, pretree, posttree) +} + +// ChunkedCode represents a sequence of 32-bytes chunks of code (31 bytes of which +// are actual code, and 1 byte is the pushdata offset). +type ChunkedCode []byte + +// Copy the values here so as to avoid an import cycle +const ( + PUSH1 = byte(0x60) + PUSH3 = byte(0x62) + PUSH4 = byte(0x63) + PUSH7 = byte(0x66) + PUSH21 = byte(0x74) + PUSH30 = byte(0x7d) + PUSH32 = byte(0x7f) +) + +// ChunkifyCode generates the chunked version of an array representing EVM bytecode +func ChunkifyCode(code []byte) ChunkedCode { + var ( + chunkOffset = 0 // offset in the chunk + chunkCount = len(code) / 31 + codeOffset = 0 // offset in the code + ) + if len(code)%31 != 0 { + chunkCount++ + } + chunks := make([]byte, chunkCount*32) + for i := 0; i < chunkCount; i++ { + // number of bytes to copy, 31 unless + // the end of the code has been reached. + end := 31 * (i + 1) + if len(code) < end { + end = len(code) + } + + // Copy the code itself + copy(chunks[i*32+1:], code[31*i:end]) + + // chunk offset = taken from the + // last chunk. + if chunkOffset > 31 { + // skip offset calculation if push + // data covers the whole chunk + chunks[i*32] = 31 + chunkOffset = 1 + continue + } + chunks[32*i] = byte(chunkOffset) + chunkOffset = 0 + + // Check each instruction and update the offset + // it should be 0 unless a PUSHn overflows. + for ; codeOffset < end; codeOffset++ { + if code[codeOffset] >= PUSH1 && code[codeOffset] <= PUSH32 { + codeOffset += int(code[codeOffset] - PUSH1 + 1) + if codeOffset+1 >= 31*(i+1) { + codeOffset++ + chunkOffset = codeOffset - 31*(i+1) + break + } + } + } + } + + return chunks +} + +func (t *VerkleTrie) SetStorageRootConversion(addr common.Address, root common.Hash) { + t.db.SetStorageRootConversion(addr, root) +} + +func (t *VerkleTrie) ClearStrorageRootConversion(addr common.Address) { + t.db.ClearStorageRootConversion(addr) +} + +func (t *VerkleTrie) UpdateContractCode(addr common.Address, codeHash common.Hash, code []byte) error { + var ( + chunks = ChunkifyCode(code) + values [][]byte + key []byte + err error + ) + for i, chunknr := 0, uint64(0); i < len(chunks); i, chunknr = i+32, chunknr+1 { + groupOffset := (chunknr + 128) % 256 + if groupOffset == 0 /* start of new group */ || chunknr == 0 /* first chunk in header group */ { + values = make([][]byte, verkle.NodeWidth) + key = utils.GetTreeKeyCodeChunkWithEvaluatedAddress(t.pointCache.GetTreeKeyHeader(addr[:]), uint256.NewInt(chunknr)) + } + values[groupOffset] = chunks[i : i+32] + + // Reuse the calculated key to also update the code size. + if i == 0 { + cs := make([]byte, 32) + binary.LittleEndian.PutUint64(cs, uint64(len(code))) + values[utils.CodeSizeLeafKey] = cs + } + + if groupOffset == 255 || len(chunks)-i <= 32 { + err = t.UpdateStem(key[:31], values) + + if err != nil { + return fmt.Errorf("UpdateContractCode (addr=%x) error: %w", addr[:], err) + } + } + } + return nil +} diff --git a/trie/verkle_iterator.go b/trie/verkle_iterator.go new file mode 100644 index 000000000000..c5f59a0f5937 --- /dev/null +++ b/trie/verkle_iterator.go @@ -0,0 +1,218 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "github.com/ethereum/go-ethereum/common" + + "github.com/gballet/go-verkle" +) + +type verkleNodeIteratorState struct { + Node verkle.VerkleNode + Index int +} + +type verkleNodeIterator struct { + trie *VerkleTrie + current verkle.VerkleNode + lastErr error + + stack []verkleNodeIteratorState +} + +func newVerkleNodeIterator(trie *VerkleTrie, start []byte) (NodeIterator, error) { + if trie.Hash() == zero { + return new(nodeIterator), nil + } + it := &verkleNodeIterator{trie: trie, current: trie.root} + // it.err = it.seek(start) + return it, nil +} + +// Next moves the iterator to the next node. If the parameter is false, any child +// nodes will be skipped. +func (it *verkleNodeIterator) Next(descend bool) bool { + if it.lastErr == errIteratorEnd { + it.lastErr = errIteratorEnd + return false + } + + if len(it.stack) == 0 { + it.stack = append(it.stack, verkleNodeIteratorState{Node: it.trie.root, Index: 0}) + it.current = it.trie.root + + return true + } + + switch node := it.current.(type) { + case *verkle.InternalNode: + context := &it.stack[len(it.stack)-1] + + // Look for the next non-empty child + children := node.Children() + for ; context.Index < len(children); context.Index++ { + if _, ok := children[context.Index].(verkle.Empty); !ok { + it.stack = append(it.stack, verkleNodeIteratorState{Node: children[context.Index], Index: 0}) + it.current = children[context.Index] + return it.Next(descend) + } + } + + // Reached the end of this node, go back to the parent, if + // this isn't root. + if len(it.stack) == 1 { + it.lastErr = errIteratorEnd + return false + } + it.stack = it.stack[:len(it.stack)-1] + it.current = it.stack[len(it.stack)-1].Node + it.stack[len(it.stack)-1].Index++ + return it.Next(descend) + case *verkle.LeafNode: + // Look for the next non-empty value + for i := it.stack[len(it.stack)-1].Index; i < 256; i++ { + if node.Value(i) != nil { + it.stack[len(it.stack)-1].Index = i + 1 + return true + } + } + + // go back to parent to get the next leaf + it.stack = it.stack[:len(it.stack)-1] + it.current = it.stack[len(it.stack)-1].Node + it.stack[len(it.stack)-1].Index++ + return it.Next(descend) + case *verkle.HashedNode: + // resolve the node + data, err := it.trie.db.diskdb.Get(nodeToDBKey(node)) + if err != nil { + panic(err) + } + it.current, err = verkle.ParseNode(data, byte(len(it.stack)-1)) + if err != nil { + panic(err) + } + + // update the stack and parent with the resolved node + it.stack[len(it.stack)-1].Node = it.current + parent := &it.stack[len(it.stack)-2] + parent.Node.(*verkle.InternalNode).SetChild(parent.Index, it.current) + return true + default: + panic("invalid node type") + } +} + +// Error returns the error status of the iterator. +func (it *verkleNodeIterator) Error() error { + if it.lastErr == errIteratorEnd { + return nil + } + return it.lastErr +} + +// Hash returns the hash of the current node. +func (it *verkleNodeIterator) Hash() common.Hash { + return it.current.Commit().Bytes() +} + +// Parent returns the hash of the parent of the current node. The hash may be the one +// grandparent if the immediate parent is an internal node with no hash. +func (it *verkleNodeIterator) Parent() common.Hash { + return it.stack[len(it.stack)-1].Node.Commit().Bytes() +} + +// Path returns the hex-encoded path to the current node. +// Callers must not retain references to the return value after calling Next. +// For leaf nodes, the last element of the path is the 'terminator symbol' 0x10. +func (it *verkleNodeIterator) Path() []byte { + if it.Leaf() { + return it.LeafKey() + } + var path []byte + for i, state := range it.stack { + // skip the last byte + if i <= len(it.stack)-1 { + break + } + path = append(path, byte(state.Index)) + } + return path +} + +func (it *verkleNodeIterator) NodeBlob() []byte { + panic("not completely implemented") +} + +// Leaf returns true iff the current node is a leaf node. +func (it *verkleNodeIterator) Leaf() bool { + _, ok := it.current.(*verkle.LeafNode) + return ok +} + +// LeafKey returns the key of the leaf. The method panics if the iterator is not +// positioned at a leaf. Callers must not retain references to the value after +// calling Next. +func (it *verkleNodeIterator) LeafKey() []byte { + leaf, ok := it.current.(*verkle.LeafNode) + if !ok { + panic("Leaf() called on an verkle node iterator not at a leaf location") + } + + return leaf.Key(it.stack[len(it.stack)-1].Index - 1) +} + +// LeafBlob returns the content of the leaf. The method panics if the iterator +// is not positioned at a leaf. Callers must not retain references to the value +// after calling Next. +func (it *verkleNodeIterator) LeafBlob() []byte { + leaf, ok := it.current.(*verkle.LeafNode) + if !ok { + panic("LeafBlob() called on an verkle node iterator not at a leaf location") + } + + return leaf.Value(it.stack[len(it.stack)-1].Index - 1) +} + +// LeafProof returns the Merkle proof of the leaf. The method panics if the +// iterator is not positioned at a leaf. Callers must not retain references +// to the value after calling Next. +func (it *verkleNodeIterator) LeafProof() [][]byte { + _, ok := it.current.(*verkle.LeafNode) + if !ok { + panic("LeafProof() called on an verkle node iterator not at a leaf location") + } + + // return it.trie.Prove(leaf.Key()) + panic("not completely implemented") +} + +// AddResolver sets an intermediate database to use for looking up trie nodes +// before reaching into the real persistent layer. +// +// This is not required for normal operation, rather is an optimization for +// cases where trie nodes can be recovered from some external mechanism without +// reading from disk. In those cases, this resolver allows short circuiting +// accesses and returning them from memory. +// +// Before adding a similar mechanism to any other place in Geth, consider +// making trie.Database an interface and wrapping at that level. It's a huge +// refactor, but it could be worth it if another occurrence arises. +func (it *verkleNodeIterator) AddResolver(NodeResolver) { + // Not implemented, but should not panic +} diff --git a/trie/verkle_iterator_test.go b/trie/verkle_iterator_test.go new file mode 100644 index 000000000000..1fd3fd76a6d9 --- /dev/null +++ b/trie/verkle_iterator_test.go @@ -0,0 +1,68 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/trie/utils" + "github.com/gballet/go-verkle" +) + +func TestVerkleIterator(t *testing.T) { + trie := NewVerkleTrie(verkle.New(), NewDatabase(rawdb.NewMemoryDatabase()), utils.NewPointCache(), true) + account0 := &types.StateAccount{ + Nonce: 1, + Balance: big.NewInt(2), + Root: types.EmptyRootHash, + CodeHash: nil, + } + // NOTE: the code size isn't written to the trie via TryUpdateAccount + // so it will be missing from the test nodes. + trie.UpdateAccount(common.Address{}, account0) + account1 := &types.StateAccount{ + Nonce: 1337, + Balance: big.NewInt(2000), + Root: types.EmptyRootHash, + CodeHash: nil, + } + // This address is meant to hash to a value that has the same first byte as 0xbf + var clash = common.HexToAddress("69fd8034cdb20934dedffa7dccb4fb3b8062a8be") + trie.UpdateAccount(clash, account1) + + // Manually go over every node to check that we get all + // the correct nodes. + it, err := trie.NodeIterator(nil) + if err != nil { + t.Fatal(err) + } + var leafcount int + for it.Next(true) { + t.Logf("Node: %x", it.Path()) + if it.Leaf() { + leafcount++ + t.Logf("\tLeaf: %x", it.LeafKey()) + } + } + if leafcount != 6 { + t.Fatalf("invalid leaf count: %d != 6", leafcount) + } +} diff --git a/trie/verkle_test.go b/trie/verkle_test.go new file mode 100644 index 000000000000..df7a68ccee80 --- /dev/null +++ b/trie/verkle_test.go @@ -0,0 +1,381 @@ +// Copyright 2021 go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "bytes" + "encoding/hex" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/trie/utils" + "github.com/gballet/go-verkle" +) + +func TestReproduceTree(t *testing.T) { + presentKeys := [][]byte{ + common.Hex2Bytes("318dea512b6f3237a2d4763cf49bf26de3b617fb0cabe38a97807a5549df4d01"), + common.Hex2Bytes("e6ed6c222e3985050b4fc574b136b0a42c63538e9ab970995cd418ba8e526400"), + common.Hex2Bytes("18fb432d3b859ec3a1803854e8cceea75d092e52d0d4a4398d13022496745a02"), + common.Hex2Bytes("318dea512b6f3237a2d4763cf49bf26de3b617fb0cabe38a97807a5549df4d02"), + common.Hex2Bytes("18fb432d3b859ec3a1803854e8cceea75d092e52d0d4a4398d13022496745a04"), + common.Hex2Bytes("e6ed6c222e3985050b4fc574b136b0a42c63538e9ab970995cd418ba8e526402"), + common.Hex2Bytes("e6ed6c222e3985050b4fc574b136b0a42c63538e9ab970995cd418ba8e526403"), + common.Hex2Bytes("18fb432d3b859ec3a1803854e8cceea75d092e52d0d4a4398d13022496745a00"), + common.Hex2Bytes("18fb432d3b859ec3a1803854e8cceea75d092e52d0d4a4398d13022496745a03"), + common.Hex2Bytes("e6ed6c222e3985050b4fc574b136b0a42c63538e9ab970995cd418ba8e526401"), + common.Hex2Bytes("e6ed6c222e3985050b4fc574b136b0a42c63538e9ab970995cd418ba8e526404"), + common.Hex2Bytes("318dea512b6f3237a2d4763cf49bf26de3b617fb0cabe38a97807a5549df4d00"), + common.Hex2Bytes("18fb432d3b859ec3a1803854e8cceea75d092e52d0d4a4398d13022496745a01"), + } + + absentKeys := [][]byte{ + common.Hex2Bytes("318dea512b6f3237a2d4763cf49bf26de3b617fb0cabe38a97807a5549df4d03"), + common.Hex2Bytes("318dea512b6f3237a2d4763cf49bf26de3b617fb0cabe38a97807a5549df4d04"), + } + + values := [][]byte{ + common.Hex2Bytes("320122e8584be00d000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("0300000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"), + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"), + common.Hex2Bytes("1bc176f2790c91e6000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("e703000000000000000000000000000000000000000000000000000000000000"), + } + + root := verkle.New() + + for i, key := range presentKeys { + root.Insert(key, values[i], nil) + } + root.Commit() + + proof, Cs, zis, yis, err := verkle.MakeVerkleMultiProof(root, nil, append(presentKeys, absentKeys...), nil) + if err != nil { + t.Fatalf("could not create proof: %v", err) + } + cfg := verkle.GetConfig() + if ok, err := verkle.VerifyVerkleProof(proof, Cs, zis, yis, cfg); !ok || err != nil { + t.Fatalf("could not verify proof: %v", err) + } + + t.Log("commitments returned by proof:") + for i, c := range Cs { + t.Logf("%d %x", i, c.Bytes()) + } + + p, _, err := verkle.SerializeProof(proof) + if err != nil { + t.Fatal(err) + } + t.Logf("serialized: %v", p) + t.Logf("tree: %s\n%x\n", verkle.ToDot(root), root.Commitment().Bytes()) +} + +func TestChunkifyCodeTestnet(t *testing.T) { + code, _ := hex.DecodeString("6080604052348015600f57600080fd5b506004361060285760003560e01c806381ca91d314602d575b600080fd5b60336047565b604051603e9190605a565b60405180910390f35b60005481565b6054816073565b82525050565b6000602082019050606d6000830184604d565b92915050565b600081905091905056fea264697066735822122000382db0489577c1646ea2147a05f92f13f32336a32f1f82c6fb10b63e19f04064736f6c63430008070033") + chunks := ChunkifyCode(code) + if len(chunks) != 32*(len(code)/31+1) { + t.Fatalf("invalid length %d != %d", len(chunks), 32*(len(code)/31+1)) + } + if chunks[0] != 0 { + t.Fatalf("invalid offset in first chunk %d != 0", chunks[0]) + } + t.Logf("%x\n", chunks[0]) + for i := 32; i < len(chunks); i += 32 { + chunk := chunks[i : 32+i] + if chunk[0] != 0 && i != 5*32 { + t.Fatalf("invalid offset in chunk #%d %d != 0", i+1, chunk[0]) + } + if i == 4 && chunk[0] != 12 { + t.Fatalf("invalid offset in chunk #%d %d != 0", i+1, chunk[0]) + } + } + t.Logf("code=%x, chunks=%x\n", code, chunks) + + code, _ = hex.DecodeString("608060405234801561001057600080fd5b506004361061002b5760003560e01c8063f566852414610030575b600080fd5b61003861004e565b6040516100459190610146565b60405180910390f35b6000600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166381ca91d36040518163ffffffff1660e01b815260040160206040518083038186803b1580156100b857600080fd5b505afa1580156100cc573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906100f0919061010a565b905090565b60008151905061010481610170565b92915050565b6000602082840312156101205761011f61016b565b5b600061012e848285016100f5565b91505092915050565b61014081610161565b82525050565b600060208201905061015b6000830184610137565b92915050565b6000819050919050565b600080fd5b61017981610161565b811461018457600080fd5b5056fea2646970667358221220d8add45a339f741a94b4fe7f22e101b560dc8a5874cbd957a884d8c9239df86264736f6c63430008070033") + chunks = ChunkifyCode(code) + if len(chunks) != 32*((len(code)+30)/31) { + t.Fatalf("invalid length %d", len(chunks)) + } + if chunks[0] != 0 { + t.Fatalf("invalid offset in first chunk %d != 0", chunks[0]) + } + t.Logf("%x\n", chunks[0]) + expected := []byte{0, 1, 0, 13, 0, 0, 1, 0, 0, 0, 0, 0, 0, 3} + for i := 32; i < len(chunks); i += 32 { + chunk := chunks[i : 32+i] + t.Log(i, i/32, chunk[0]) + if chunk[0] != expected[i/32-1] { + t.Fatalf("invalid offset in chunk #%d %d != %d", i/32-1, chunk[0], expected[i/32-1]) + } + } + t.Logf("code=%x, chunks=%x\n", code, chunks) + + code, _ = hex.DecodeString("6080604052348015600f57600080fd5b506004361060285760003560e01c8063ab5ed15014602d575b600080fd5b60336047565b604051603e9190605d565b60405180910390f35b60006001905090565b6057816076565b82525050565b6000602082019050607060008301846050565b92915050565b600081905091905056fea2646970667358221220163c79eab5630c3dbe22f7cc7692da08575198dda76698ae8ee2e3bfe62af3de64736f6c63430008070033") + chunks = ChunkifyCode(code) + if len(chunks) != 32*((len(code)+30)/31) { + t.Fatalf("invalid length %d", len(chunks)) + } + if chunks[0] != 0 { + t.Fatalf("invalid offset in first chunk %d != 0", chunks[0]) + } + expected = []byte{0, 0, 0, 0, 13} + for i := 32; i < len(chunks); i += 32 { + chunk := chunks[i : 32+i] + if chunk[0] != expected[i/32-1] { + t.Fatalf("invalid offset in chunk #%d %d != %d", i/32-1, chunk[0], expected[i/32-1]) + } + } + t.Logf("code=%x, chunks=%x\n", code, chunks) +} + +func TestChunkifyCodeSimple(t *testing.T) { + code := []byte{ + 0, PUSH4, 1, 2, 3, 4, PUSH3, 58, 68, 12, PUSH21, 1, 2, 3, 4, 5, 6, + 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + // Second 31 bytes + 0, PUSH21, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + PUSH7, 1, 2, 3, 4, 5, 6, 7, + // Third 31 bytes + PUSH30, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, + } + t.Logf("code=%x", code) + chunks := ChunkifyCode(code) + if len(chunks) != 96 { + t.Fatalf("invalid length %d", len(chunks)) + } + if chunks[0] != 0 { + t.Fatalf("invalid offset in first chunk %d != 0", chunks[0]) + } + if chunks[32] != 1 { + t.Fatalf("invalid offset in second chunk %d != 1, chunk=%x", chunks[32], chunks[32:64]) + } + if chunks[64] != 0 { + t.Fatalf("invalid offset in third chunk %d != 0", chunks[64]) + } + t.Logf("code=%x, chunks=%x\n", code, chunks) +} + +func TestChunkifyCodeFuzz(t *testing.T) { + code := []byte{ + 3, PUSH32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + } + chunks := ChunkifyCode(code) + if len(chunks) != 32 { + t.Fatalf("invalid length %d", len(chunks)) + } + if chunks[0] != 0 { + t.Fatalf("invalid offset in first chunk %d != 0", chunks[0]) + } + t.Logf("code=%x, chunks=%x\n", code, chunks) + + code = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, PUSH32, + } + chunks = ChunkifyCode(code) + if len(chunks) != 32 { + t.Fatalf("invalid length %d", len(chunks)) + } + if chunks[0] != 0 { + t.Fatalf("invalid offset in first chunk %d != 0", chunks[0]) + } + t.Logf("code=%x, chunks=%x\n", code, chunks) + + code = []byte{ + PUSH4, PUSH32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + } + chunks = ChunkifyCode(code) + if len(chunks) != 64 { + t.Fatalf("invalid length %d", len(chunks)) + } + if chunks[0] != 0 { + t.Fatalf("invalid offset in first chunk %d != 0", chunks[0]) + } + if chunks[32] != 0 { + t.Fatalf("invalid offset in second chunk %d != 0, chunk=%x", chunks[32], chunks[32:64]) + } + t.Logf("code=%x, chunks=%x\n", code, chunks) + + code = []byte{ + PUSH4, PUSH32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + } + chunks = ChunkifyCode(code) + if len(chunks) != 64 { + t.Fatalf("invalid length %d", len(chunks)) + } + if chunks[0] != 0 { + t.Fatalf("invalid offset in first chunk %d != 0", chunks[0]) + } + if chunks[32] != 0 { + t.Fatalf("invalid offset in second chunk %d != 0, chunk=%x", chunks[32], chunks[32:64]) + } + t.Logf("code=%x, chunks=%x\n", code, chunks) +} + +// This test case checks what happens when two keys whose absence is being proven start with the +// same byte (0x0b in this case). Only one 'extension status' should be declared. +func TestReproduceCondrieuStemAggregationInProofOfAbsence(t *testing.T) { + presentKeys := [][]byte{ + common.Hex2Bytes("6766d007d8fd90ea45b2ac9027ff04fa57e49527f11010a12a73f58ffa580800"), + common.Hex2Bytes("6766d007d8fd90ea45b2ac9027ff04fa57e49527f11010a12a73f58ffa580801"), + common.Hex2Bytes("6766d007d8fd90ea45b2ac9027ff04fa57e49527f11010a12a73f58ffa580802"), + common.Hex2Bytes("6766d007d8fd90ea45b2ac9027ff04fa57e49527f11010a12a73f58ffa580803"), + common.Hex2Bytes("6766d007d8fd90ea45b2ac9027ff04fa57e49527f11010a12a73f58ffa580804"), + common.Hex2Bytes("9f2a59ea98d7cb610eff49447571e1610188937ce9266c6b4ded1b6ee37ecd00"), + common.Hex2Bytes("9f2a59ea98d7cb610eff49447571e1610188937ce9266c6b4ded1b6ee37ecd01"), + common.Hex2Bytes("9f2a59ea98d7cb610eff49447571e1610188937ce9266c6b4ded1b6ee37ecd02"), + common.Hex2Bytes("9f2a59ea98d7cb610eff49447571e1610188937ce9266c6b4ded1b6ee37ecd03"), + } + + absentKeys := [][]byte{ + common.Hex2Bytes("089783b59ef47adbdf85546c92d9b93ffd2f4803093ee93727bb42a1537dfb00"), + common.Hex2Bytes("089783b59ef47adbdf85546c92d9b93ffd2f4803093ee93727bb42a1537dfb01"), + common.Hex2Bytes("089783b59ef47adbdf85546c92d9b93ffd2f4803093ee93727bb42a1537dfb02"), + common.Hex2Bytes("089783b59ef47adbdf85546c92d9b93ffd2f4803093ee93727bb42a1537dfb03"), + common.Hex2Bytes("089783b59ef47adbdf85546c92d9b93ffd2f4803093ee93727bb42a1537dfb04"), + common.Hex2Bytes("0b373ba3992dde5cfee854e1a786559ba0b6a13d376550c1ed58c00dc9706f00"), + common.Hex2Bytes("0b373ba3992dde5cfee854e1a786559ba0b6a13d376550c1ed58c00dc9706f01"), + common.Hex2Bytes("0b373ba3992dde5cfee854e1a786559ba0b6a13d376550c1ed58c00dc9706f02"), + common.Hex2Bytes("0b373ba3992dde5cfee854e1a786559ba0b6a13d376550c1ed58c00dc9706f03"), + common.Hex2Bytes("0b373ba3992dde5cfee854e1a786559ba0b6a13d376550c1ed58c00dc9706f04"), + common.Hex2Bytes("0b373ba3992dde5cfee854e1a786559ba0b6a13d376550c1ed58c00dc9706f80"), + common.Hex2Bytes("0b373ba3992dde5cfee854e1a786559ba0b6a13d376550c1ed58c00dc9706f81"), + common.Hex2Bytes("0b373ba3992dde5cfee854e1a786559ba0b6a13d376550c1ed58c00dc9706f82"), + common.Hex2Bytes("0b373ba3992dde5cfee854e1a786559ba0b6a13d376550c1ed58c00dc9706f83"), + common.Hex2Bytes("0bb7fda24b2ea0de0f791b27f8a040fcc79f8e1e2dfe50443bc632543ba5e700"), + common.Hex2Bytes("0bb7fda24b2ea0de0f791b27f8a040fcc79f8e1e2dfe50443bc632543ba5e702"), + common.Hex2Bytes("0bb7fda24b2ea0de0f791b27f8a040fcc79f8e1e2dfe50443bc632543ba5e703"), + common.Hex2Bytes("3aeba70b6afb762af4a507c8ec10747479d797c6ec11c14f92b5699634bd18d4"), + } + + values := [][]byte{ + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("53bfa56cfcaddf191e0200000000000000000000000000000000000000000000"), + common.Hex2Bytes("0700000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"), + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("389a890a6ce3e618843300000000000000000000000000000000000000000000"), + common.Hex2Bytes("0200000000000000000000000000000000000000000000000000000000000000"), + common.Hex2Bytes("c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"), + } + + root := verkle.New() + + for i, key := range presentKeys { + root.Insert(key, values[i], nil) + } + root.Commit() + + proof, Cs, zis, yis, _ := verkle.MakeVerkleMultiProof(root, nil, append(presentKeys, absentKeys...), nil) + cfg := verkle.GetConfig() + if ok, err := verkle.VerifyVerkleProof(proof, Cs, zis, yis, cfg); !ok || err != nil { + t.Fatal("could not verify proof") + } + + t.Log("commitments returned by proof:") + for i, c := range Cs { + t.Logf("%d %x", i, c.Bytes()) + } + + p, _, err := verkle.SerializeProof(proof) + if err != nil { + t.Fatal(err) + } + t.Logf("serialized: %p", p) + t.Logf("tree: %s\n%x\n", verkle.ToDot(root), root.Commitment().Bytes()) + + t.Logf("%d", len(proof.ExtStatus)) + if len(proof.ExtStatus) != 5 { + t.Fatalf("invalid number of declared stems: %d != 5", len(proof.ExtStatus)) + } +} + +// Cover the case in which a stem is both used for a proof of absence, and for a proof of presence. +func TestReproduceCondrieuPoAStemConflictWithAnotherStem(t *testing.T) { + presentKeys := [][]byte{ + common.Hex2Bytes("6766d007d8fd90ea45b2ac9027ff04fa57e49527f11010a12a73f58ffa580800"), + } + + absentKeys := [][]byte{ + common.Hex2Bytes("6766d007d8fd90ea45b2ac9027ff04fa57e49527f11010a12a73008ffa580800"), + // the key differs from the key present... ^^ here + } + + values := [][]byte{ + common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000"), + } + + root := verkle.New() + + for i, key := range presentKeys { + root.Insert(key, values[i], nil) + } + root.Commit() + + proof, Cs, zis, yis, _ := verkle.MakeVerkleMultiProof(root, nil, append(presentKeys, absentKeys...), nil) + cfg := verkle.GetConfig() + if ok, err := verkle.VerifyVerkleProof(proof, Cs, zis, yis, cfg); !ok || err != nil { + t.Fatal("could not verify proof") + } + + t.Log("commitments returned by proof:") + for i, c := range Cs { + t.Logf("%d %x", i, c.Bytes()) + } + + p, _, err := verkle.SerializeProof(proof) + if err != nil { + t.Fatal(err) + } + t.Logf("serialized: %p", p) + t.Logf("tree: %s\n%x\n", verkle.ToDot(root), root.Commitment().Bytes()) + + t.Logf("%d", len(proof.ExtStatus)) + if len(proof.PoaStems) != 0 { + t.Fatal("a proof-of-absence stem was declared, when there was no need") + } +} + +func TestEmptyKeySetInProveAndSerialize(t *testing.T) { + tree := verkle.New() + verkle.MakeVerkleMultiProof(tree, nil, [][]byte{}, nil) +} + +func TestGetTreeKeys(t *testing.T) { + addr := common.Hex2Bytes("71562b71999873DB5b286dF957af199Ec94617f7") + target := common.Hex2Bytes("274cde18dd9dbb04caf16ad5ee969c19fe6ca764d5688b5e1d419f4ac6cd1600") + key := utils.GetTreeKeyVersion(addr) + t.Logf("key=%x", key) + t.Logf("actualKey=%x", target) + if !bytes.Equal(key, target) { + t.Fatalf("differing output %x != %x", key, target) + } +}