diff --git a/.github/workflows/qa-clean-exit-block-downloading.yml b/.github/workflows/qa-clean-exit-block-downloading.yml index 345bdfa4c1b..1f3c9f406a0 100644 --- a/.github/workflows/qa-clean-exit-block-downloading.yml +++ b/.github/workflows/qa-clean-exit-block-downloading.yml @@ -49,7 +49,7 @@ jobs: set +e # Disable exit on error # Run Erigon, send ctrl-c and check logs - python3 $ERIGON_QA_PATH/test_system/qa-tests/clean-exit/run_and_check_clean_exit.py ${{ github.workspace }}/build/bin $ERIGON_TESTBED_DATA_DIR $WORKING_TIME_SECONDS Erigon3 + python3 $ERIGON_QA_PATH/test_system/qa-tests/clean-exit/run_and_check_clean_exit.py ${{ github.workspace }}/build/bin $ERIGON_TESTBED_DATA_DIR $WORKING_TIME_SECONDS # Capture monitoring script exit status test_exit_status=$? diff --git a/.github/workflows/qa-clean-exit-snapshot-downloading.yml b/.github/workflows/qa-clean-exit-snapshot-downloading.yml index 0ba3307b397..79819132cd1 100644 --- a/.github/workflows/qa-clean-exit-snapshot-downloading.yml +++ b/.github/workflows/qa-clean-exit-snapshot-downloading.yml @@ -45,7 +45,7 @@ jobs: set +e # Disable exit on error # Run Erigon, send ctrl-c and check logs - python3 $ERIGON_QA_PATH/test_system/qa-tests/clean-exit/run_and_check_clean_exit.py ${{ github.workspace }}/build/bin $ERIGON_DATA_DIR $WORKING_TIME_SECONDS Erigon3 + python3 $ERIGON_QA_PATH/test_system/qa-tests/clean-exit/run_and_check_clean_exit.py ${{ github.workspace }}/build/bin $ERIGON_DATA_DIR $WORKING_TIME_SECONDS # Capture monitoring script exit status test_exit_status=$? diff --git a/.github/workflows/qa-snap-download.yml b/.github/workflows/qa-snap-download.yml index 2b5039fa015..146cfa66fbd 100644 --- a/.github/workflows/qa-snap-download.yml +++ b/.github/workflows/qa-snap-download.yml @@ -39,7 +39,7 @@ jobs: set +e # Disable exit on error # Run Erigon, monitor snapshot downloading and check logs - python3 $ERIGON_QA_PATH/test_system/qa-tests/snap-download/run_and_check_snap_download.py ${{ github.workspace }}/build/bin $ERIGON_DATA_DIR $TOTAL_TIME_SECONDS Erigon3 + python3 $ERIGON_QA_PATH/test_system/qa-tests/snap-download/run_and_check_snap_download.py ${{ github.workspace }}/build/bin $ERIGON_DATA_DIR $TOTAL_TIME_SECONDS # Capture monitoring script exit status test_exit_status=$? diff --git a/.github/workflows/qa-tip-tracking.yml b/.github/workflows/qa-tip-tracking.yml index 0ac40b50cef..c9a45413276 100644 --- a/.github/workflows/qa-tip-tracking.yml +++ b/.github/workflows/qa-tip-tracking.yml @@ -45,7 +45,7 @@ jobs: # 1. Launch the testbed Erigon instance # 2. Allow time for the Erigon to achieve synchronization # 3. Begin timing the duration that Erigon maintains synchronization - python3 $ERIGON_QA_PATH/test_system/qa-tests/tip-tracking/run_and_check_tip_tracking.py ${{ github.workspace }}/build/bin $ERIGON_TESTBED_DATA_DIR $TRACKING_TIME_SECONDS $TOTAL_TIME_SECONDS Erigon3 + python3 $ERIGON_QA_PATH/test_system/qa-tests/tip-tracking/run_and_check_tip_tracking.py ${{ github.workspace }}/build/bin $ERIGON_TESTBED_DATA_DIR $TRACKING_TIME_SECONDS $TOTAL_TIME_SECONDS # Capture monitoring script exit status test_exit_status=$? diff --git a/Makefile b/Makefile index 0dcbf5f9df5..44afb3bac80 100644 --- a/Makefile +++ b/Makefile @@ -211,7 +211,7 @@ mocks: mocks-clean ## mocks-clean: cleans all generated test mocks mocks-clean: - grep -r -l --exclude-dir="erigon-lib" --exclude-dir="tests" --exclude-dir="*$(GOBINREL)*" "^// Code generated by MockGen. DO NOT EDIT.$$" . | xargs rm -r + grep -r -l --exclude-dir="erigon-lib" --exclude-dir="*$(GOBINREL)*" "^// Code generated by MockGen. DO NOT EDIT.$$" . | xargs rm -r ## solc: generate all solidity contracts solc: diff --git a/README.md b/README.md index 54fca1c2cd4..d6a6efd23fd 100644 --- a/README.md +++ b/README.md @@ -329,8 +329,7 @@ Engine API. #### Caplin's Usage. -Caplin is be enabled by default. to disable it and enable the Engine API, use the `--externalcl` flag. from that point -on, an external Consensus Layer will not be need +Caplin is be enabled by default. to disable it and enable the Engine API, use the `--externalcl` flag. from that point on, an external Consensus Layer will not be need anymore. Caplin also has an archivial mode for historical states and blocks. it can be enabled through the `--caplin.archive` @@ -603,6 +602,7 @@ In order to configure the ports, use: |-----------|------|----------|---------|---------------| | REST | 5555 | TCP | REST | Public | + #### `shared` ports | Component | Port | Protocol | Purpose | Should Expose | @@ -786,14 +786,12 @@ Supported networks: all (except Mumbai). stage_trace_index - E3 can execute 1 historical transaction - without executing it's block - because history/indices have transaction-granularity, instead of block-granularity. -- E3 doesn't store Logs (aka Receipts) - it always re-executing historical txn (but it's cheaper then in E2 - see point - above). Also Logs LRU added in E2 (release/2.60) and E3: https://github.com/ledgerwatch/erigon/pull/10112 +- Doesn't store Receipts/Logs - it always re-executing historical transactions - but re-execution is cheaper (see point + above). We would like to see how it will impact users - welcome feedback. Likely we will try add some small LRU-cache here. Likely later we will add optional flag "to persist receipts". -- `--sync.loop.block.limit` is enabled by default. (Default: `2_000`. - Set `--sync.loop.block.limit=10_000_000 --batchSize=1g` to increase sync speed on good hardware). -- datadir/chaindata is small now - to prevent it's grow: we recommend set `--batchSize <= 1G`. And it's fine - to `rm -rf chaindata` -- can symlink/mount latest state to fast drive and history to cheap drive +- More cold-start-friendly and os-pre-fetch-friendly. +- datadir/chaindata is small now - to prevent it's grow: we recommend set --batchSize <= 1G. Probably 512mb is + enough. ### E3 datadir structure @@ -810,7 +808,7 @@ datadir # There is 4 domains: account, storage, code, commitment ``` -### E3 can store state on fast disk and history on cheap disk +### E3 can store state on fast disk and history on slow disk If you can afford store datadir on 1 nvme-raid - great. If can't - it's possible to store history on cheap drive. @@ -867,24 +865,3 @@ du -hsc /erigon/snapshots/* 1.3T /erigon/snapshots/idx 3.7T total ``` - -### E3 other perf trics - -- `--sync.loop.block.limit=10_000_000 --batchSize=1g` - likely will help for sync speed. -- on cloud-drives (good throughput, bad latency) - can enable OS's brain to pre-fetch some data (`madv_normal` instead - of `madv_random`). For `snapshots/domain` folder (latest - state) `KV_MADV_NORMAL_NO_LAST_LVL=accounts,storage,commitment` (or if have enough - RAM: `KV_MADV_NORMAL=accounts,storage,commitment`). For `chaindata` folder (latest updates) `MDBX_READAHEAD=true`. - For all files - `SNAPSHOT_MADV_RND=false` - -- can lock latest state in RAM - to prevent from eviction (node may face high historical RPC traffic without impacting - Chain-Tip perf): - -``` -vmtouch -vdlw /mnt/erigon/snapshots/domain/*bt -ls /mnt/erigon/snapshots/domain/*.kv | parallel vmtouch -vdlw - -# if it failing with "can't allocate memory", try: -sync && sudo sysctl vm.drop_caches=3 -echo 1 > /proc/sys/vm/compact_memory -``` diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index 1f822ca40d3..2e5ec3d4b50 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -46,6 +46,7 @@ import ( "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/event" @@ -714,7 +715,7 @@ func (b *SimulatedBackend) callContract(_ context.Context, call ethereum.CallMsg } // Set infinite balance to the fake caller account. from := statedb.GetOrNewStateObject(call.From) - from.SetBalance(uint256.NewInt(0).SetAllOne()) + from.SetBalance(uint256.NewInt(0).SetAllOne(), tracing.BalanceChangeUnspecified) // Execute the call. msg := callMsg{call} diff --git a/cl/abstract/beacon_state.go b/cl/abstract/beacon_state.go index c10eacac9f7..cc77a206181 100644 --- a/cl/abstract/beacon_state.go +++ b/cl/abstract/beacon_state.go @@ -61,7 +61,6 @@ type BeaconStateSSZ interface { HashSSZ() (out [32]byte, err error) } -//go:generate mockgen -typed=true -destination=./mock_services/beacon_state_mutator_mock.go -package=mock_services . BeaconStateMutator type BeaconStateMutator interface { SetVersion(version clparams.StateVersion) SetSlot(slot uint64) @@ -105,7 +104,7 @@ type BeaconStateMutator interface { SetValidatorInactivityScore(index int, score uint64) error SetCurrentEpochParticipationFlags(flags []cltypes.ParticipationFlags) SetPreviousEpochParticipationFlags(flags []cltypes.ParticipationFlags) - SetPreviousEpochAttestations(attestations *solid.ListSSZ[*solid.PendingAttestation]) // temporarily skip this mock + SetPreviousEpochAttestations(attestations *solid.ListSSZ[*solid.PendingAttestation]) AddEth1DataVote(vote *cltypes.Eth1Data) AddValidator(validator solid.Validator, balance uint64) @@ -193,14 +192,8 @@ type BeaconStateMinimal interface { PreviousEpochAttestationsLength() int } -// BeaconStateReader is an interface for reading the beacon state. -// -//go:generate mockgen -typed=true -destination=./mock_services/beacon_state_reader_mock.go -package=mock_services . BeaconStateReader -type BeaconStateReader interface { - ValidatorPublicKey(index int) (common.Bytes48, error) - GetDomain(domainType [4]byte, epoch uint64) ([]byte, error) - CommitteeCount(epoch uint64) uint64 - ValidatorForValidatorIndex(index int) (solid.Validator, error) - Version() clparams.StateVersion - GenesisValidatorsRoot() common.Hash +// TODO figure this out +type BeaconStateCopying interface { + //CopyInto(dst *raw.BeaconState) error + //Copy() (*raw.BeaconState, error) } diff --git a/cl/abstract/mock_services/beacon_state_mutator_mock.go b/cl/abstract/mock_services/beacon_state_mutator_mock.go deleted file mode 100644 index ce2eedf4276..00000000000 --- a/cl/abstract/mock_services/beacon_state_mutator_mock.go +++ /dev/null @@ -1,2123 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ledgerwatch/erigon/cl/abstract (interfaces: BeaconStateMutator) -// -// Generated by this command: -// -// mockgen -typed=true -destination=./mock_services/beacon_state_mutator_mock.go -package=mock_services . BeaconStateMutator -// - -// Package mock_services is a generated GoMock package. -package mock_services - -import ( - reflect "reflect" - - common "github.com/ledgerwatch/erigon-lib/common" - clparams "github.com/ledgerwatch/erigon/cl/clparams" - cltypes "github.com/ledgerwatch/erigon/cl/cltypes" - solid "github.com/ledgerwatch/erigon/cl/cltypes/solid" - gomock "go.uber.org/mock/gomock" -) - -// MockBeaconStateMutator is a mock of BeaconStateMutator interface. -type MockBeaconStateMutator struct { - ctrl *gomock.Controller - recorder *MockBeaconStateMutatorMockRecorder -} - -// MockBeaconStateMutatorMockRecorder is the mock recorder for MockBeaconStateMutator. -type MockBeaconStateMutatorMockRecorder struct { - mock *MockBeaconStateMutator -} - -// NewMockBeaconStateMutator creates a new mock instance. -func NewMockBeaconStateMutator(ctrl *gomock.Controller) *MockBeaconStateMutator { - mock := &MockBeaconStateMutator{ctrl: ctrl} - mock.recorder = &MockBeaconStateMutatorMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockBeaconStateMutator) EXPECT() *MockBeaconStateMutatorMockRecorder { - return m.recorder -} - -// AddCurrentEpochAtteastation mocks base method. -func (m *MockBeaconStateMutator) AddCurrentEpochAtteastation(arg0 *solid.PendingAttestation) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddCurrentEpochAtteastation", arg0) -} - -// AddCurrentEpochAtteastation indicates an expected call of AddCurrentEpochAtteastation. -func (mr *MockBeaconStateMutatorMockRecorder) AddCurrentEpochAtteastation(arg0 any) *MockBeaconStateMutatorAddCurrentEpochAtteastationCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddCurrentEpochAtteastation", reflect.TypeOf((*MockBeaconStateMutator)(nil).AddCurrentEpochAtteastation), arg0) - return &MockBeaconStateMutatorAddCurrentEpochAtteastationCall{Call: call} -} - -// MockBeaconStateMutatorAddCurrentEpochAtteastationCall wrap *gomock.Call -type MockBeaconStateMutatorAddCurrentEpochAtteastationCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorAddCurrentEpochAtteastationCall) Return() *MockBeaconStateMutatorAddCurrentEpochAtteastationCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorAddCurrentEpochAtteastationCall) Do(f func(*solid.PendingAttestation)) *MockBeaconStateMutatorAddCurrentEpochAtteastationCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorAddCurrentEpochAtteastationCall) DoAndReturn(f func(*solid.PendingAttestation)) *MockBeaconStateMutatorAddCurrentEpochAtteastationCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// AddCurrentEpochParticipationFlags mocks base method. -func (m *MockBeaconStateMutator) AddCurrentEpochParticipationFlags(arg0 cltypes.ParticipationFlags) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddCurrentEpochParticipationFlags", arg0) -} - -// AddCurrentEpochParticipationFlags indicates an expected call of AddCurrentEpochParticipationFlags. -func (mr *MockBeaconStateMutatorMockRecorder) AddCurrentEpochParticipationFlags(arg0 any) *MockBeaconStateMutatorAddCurrentEpochParticipationFlagsCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddCurrentEpochParticipationFlags", reflect.TypeOf((*MockBeaconStateMutator)(nil).AddCurrentEpochParticipationFlags), arg0) - return &MockBeaconStateMutatorAddCurrentEpochParticipationFlagsCall{Call: call} -} - -// MockBeaconStateMutatorAddCurrentEpochParticipationFlagsCall wrap *gomock.Call -type MockBeaconStateMutatorAddCurrentEpochParticipationFlagsCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorAddCurrentEpochParticipationFlagsCall) Return() *MockBeaconStateMutatorAddCurrentEpochParticipationFlagsCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorAddCurrentEpochParticipationFlagsCall) Do(f func(cltypes.ParticipationFlags)) *MockBeaconStateMutatorAddCurrentEpochParticipationFlagsCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorAddCurrentEpochParticipationFlagsCall) DoAndReturn(f func(cltypes.ParticipationFlags)) *MockBeaconStateMutatorAddCurrentEpochParticipationFlagsCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// AddEth1DataVote mocks base method. -func (m *MockBeaconStateMutator) AddEth1DataVote(arg0 *cltypes.Eth1Data) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddEth1DataVote", arg0) -} - -// AddEth1DataVote indicates an expected call of AddEth1DataVote. -func (mr *MockBeaconStateMutatorMockRecorder) AddEth1DataVote(arg0 any) *MockBeaconStateMutatorAddEth1DataVoteCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddEth1DataVote", reflect.TypeOf((*MockBeaconStateMutator)(nil).AddEth1DataVote), arg0) - return &MockBeaconStateMutatorAddEth1DataVoteCall{Call: call} -} - -// MockBeaconStateMutatorAddEth1DataVoteCall wrap *gomock.Call -type MockBeaconStateMutatorAddEth1DataVoteCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorAddEth1DataVoteCall) Return() *MockBeaconStateMutatorAddEth1DataVoteCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorAddEth1DataVoteCall) Do(f func(*cltypes.Eth1Data)) *MockBeaconStateMutatorAddEth1DataVoteCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorAddEth1DataVoteCall) DoAndReturn(f func(*cltypes.Eth1Data)) *MockBeaconStateMutatorAddEth1DataVoteCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// AddHistoricalRoot mocks base method. -func (m *MockBeaconStateMutator) AddHistoricalRoot(arg0 common.Hash) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddHistoricalRoot", arg0) -} - -// AddHistoricalRoot indicates an expected call of AddHistoricalRoot. -func (mr *MockBeaconStateMutatorMockRecorder) AddHistoricalRoot(arg0 any) *MockBeaconStateMutatorAddHistoricalRootCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddHistoricalRoot", reflect.TypeOf((*MockBeaconStateMutator)(nil).AddHistoricalRoot), arg0) - return &MockBeaconStateMutatorAddHistoricalRootCall{Call: call} -} - -// MockBeaconStateMutatorAddHistoricalRootCall wrap *gomock.Call -type MockBeaconStateMutatorAddHistoricalRootCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorAddHistoricalRootCall) Return() *MockBeaconStateMutatorAddHistoricalRootCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorAddHistoricalRootCall) Do(f func(common.Hash)) *MockBeaconStateMutatorAddHistoricalRootCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorAddHistoricalRootCall) DoAndReturn(f func(common.Hash)) *MockBeaconStateMutatorAddHistoricalRootCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// AddHistoricalSummary mocks base method. -func (m *MockBeaconStateMutator) AddHistoricalSummary(arg0 *cltypes.HistoricalSummary) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddHistoricalSummary", arg0) -} - -// AddHistoricalSummary indicates an expected call of AddHistoricalSummary. -func (mr *MockBeaconStateMutatorMockRecorder) AddHistoricalSummary(arg0 any) *MockBeaconStateMutatorAddHistoricalSummaryCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddHistoricalSummary", reflect.TypeOf((*MockBeaconStateMutator)(nil).AddHistoricalSummary), arg0) - return &MockBeaconStateMutatorAddHistoricalSummaryCall{Call: call} -} - -// MockBeaconStateMutatorAddHistoricalSummaryCall wrap *gomock.Call -type MockBeaconStateMutatorAddHistoricalSummaryCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorAddHistoricalSummaryCall) Return() *MockBeaconStateMutatorAddHistoricalSummaryCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorAddHistoricalSummaryCall) Do(f func(*cltypes.HistoricalSummary)) *MockBeaconStateMutatorAddHistoricalSummaryCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorAddHistoricalSummaryCall) DoAndReturn(f func(*cltypes.HistoricalSummary)) *MockBeaconStateMutatorAddHistoricalSummaryCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// AddInactivityScore mocks base method. -func (m *MockBeaconStateMutator) AddInactivityScore(arg0 uint64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddInactivityScore", arg0) -} - -// AddInactivityScore indicates an expected call of AddInactivityScore. -func (mr *MockBeaconStateMutatorMockRecorder) AddInactivityScore(arg0 any) *MockBeaconStateMutatorAddInactivityScoreCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddInactivityScore", reflect.TypeOf((*MockBeaconStateMutator)(nil).AddInactivityScore), arg0) - return &MockBeaconStateMutatorAddInactivityScoreCall{Call: call} -} - -// MockBeaconStateMutatorAddInactivityScoreCall wrap *gomock.Call -type MockBeaconStateMutatorAddInactivityScoreCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorAddInactivityScoreCall) Return() *MockBeaconStateMutatorAddInactivityScoreCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorAddInactivityScoreCall) Do(f func(uint64)) *MockBeaconStateMutatorAddInactivityScoreCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorAddInactivityScoreCall) DoAndReturn(f func(uint64)) *MockBeaconStateMutatorAddInactivityScoreCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// AddPreviousEpochAttestation mocks base method. -func (m *MockBeaconStateMutator) AddPreviousEpochAttestation(arg0 *solid.PendingAttestation) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddPreviousEpochAttestation", arg0) -} - -// AddPreviousEpochAttestation indicates an expected call of AddPreviousEpochAttestation. -func (mr *MockBeaconStateMutatorMockRecorder) AddPreviousEpochAttestation(arg0 any) *MockBeaconStateMutatorAddPreviousEpochAttestationCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddPreviousEpochAttestation", reflect.TypeOf((*MockBeaconStateMutator)(nil).AddPreviousEpochAttestation), arg0) - return &MockBeaconStateMutatorAddPreviousEpochAttestationCall{Call: call} -} - -// MockBeaconStateMutatorAddPreviousEpochAttestationCall wrap *gomock.Call -type MockBeaconStateMutatorAddPreviousEpochAttestationCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorAddPreviousEpochAttestationCall) Return() *MockBeaconStateMutatorAddPreviousEpochAttestationCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorAddPreviousEpochAttestationCall) Do(f func(*solid.PendingAttestation)) *MockBeaconStateMutatorAddPreviousEpochAttestationCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorAddPreviousEpochAttestationCall) DoAndReturn(f func(*solid.PendingAttestation)) *MockBeaconStateMutatorAddPreviousEpochAttestationCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// AddPreviousEpochParticipationAt mocks base method. -func (m *MockBeaconStateMutator) AddPreviousEpochParticipationAt(arg0 int, arg1 byte) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddPreviousEpochParticipationAt", arg0, arg1) -} - -// AddPreviousEpochParticipationAt indicates an expected call of AddPreviousEpochParticipationAt. -func (mr *MockBeaconStateMutatorMockRecorder) AddPreviousEpochParticipationAt(arg0, arg1 any) *MockBeaconStateMutatorAddPreviousEpochParticipationAtCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddPreviousEpochParticipationAt", reflect.TypeOf((*MockBeaconStateMutator)(nil).AddPreviousEpochParticipationAt), arg0, arg1) - return &MockBeaconStateMutatorAddPreviousEpochParticipationAtCall{Call: call} -} - -// MockBeaconStateMutatorAddPreviousEpochParticipationAtCall wrap *gomock.Call -type MockBeaconStateMutatorAddPreviousEpochParticipationAtCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorAddPreviousEpochParticipationAtCall) Return() *MockBeaconStateMutatorAddPreviousEpochParticipationAtCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorAddPreviousEpochParticipationAtCall) Do(f func(int, byte)) *MockBeaconStateMutatorAddPreviousEpochParticipationAtCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorAddPreviousEpochParticipationAtCall) DoAndReturn(f func(int, byte)) *MockBeaconStateMutatorAddPreviousEpochParticipationAtCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// AddPreviousEpochParticipationFlags mocks base method. -func (m *MockBeaconStateMutator) AddPreviousEpochParticipationFlags(arg0 cltypes.ParticipationFlags) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddPreviousEpochParticipationFlags", arg0) -} - -// AddPreviousEpochParticipationFlags indicates an expected call of AddPreviousEpochParticipationFlags. -func (mr *MockBeaconStateMutatorMockRecorder) AddPreviousEpochParticipationFlags(arg0 any) *MockBeaconStateMutatorAddPreviousEpochParticipationFlagsCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddPreviousEpochParticipationFlags", reflect.TypeOf((*MockBeaconStateMutator)(nil).AddPreviousEpochParticipationFlags), arg0) - return &MockBeaconStateMutatorAddPreviousEpochParticipationFlagsCall{Call: call} -} - -// MockBeaconStateMutatorAddPreviousEpochParticipationFlagsCall wrap *gomock.Call -type MockBeaconStateMutatorAddPreviousEpochParticipationFlagsCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorAddPreviousEpochParticipationFlagsCall) Return() *MockBeaconStateMutatorAddPreviousEpochParticipationFlagsCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorAddPreviousEpochParticipationFlagsCall) Do(f func(cltypes.ParticipationFlags)) *MockBeaconStateMutatorAddPreviousEpochParticipationFlagsCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorAddPreviousEpochParticipationFlagsCall) DoAndReturn(f func(cltypes.ParticipationFlags)) *MockBeaconStateMutatorAddPreviousEpochParticipationFlagsCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// AddValidator mocks base method. -func (m *MockBeaconStateMutator) AddValidator(arg0 solid.Validator, arg1 uint64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AddValidator", arg0, arg1) -} - -// AddValidator indicates an expected call of AddValidator. -func (mr *MockBeaconStateMutatorMockRecorder) AddValidator(arg0, arg1 any) *MockBeaconStateMutatorAddValidatorCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddValidator", reflect.TypeOf((*MockBeaconStateMutator)(nil).AddValidator), arg0, arg1) - return &MockBeaconStateMutatorAddValidatorCall{Call: call} -} - -// MockBeaconStateMutatorAddValidatorCall wrap *gomock.Call -type MockBeaconStateMutatorAddValidatorCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorAddValidatorCall) Return() *MockBeaconStateMutatorAddValidatorCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorAddValidatorCall) Do(f func(solid.Validator, uint64)) *MockBeaconStateMutatorAddValidatorCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorAddValidatorCall) DoAndReturn(f func(solid.Validator, uint64)) *MockBeaconStateMutatorAddValidatorCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// AppendValidator mocks base method. -func (m *MockBeaconStateMutator) AppendValidator(arg0 solid.Validator) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "AppendValidator", arg0) -} - -// AppendValidator indicates an expected call of AppendValidator. -func (mr *MockBeaconStateMutatorMockRecorder) AppendValidator(arg0 any) *MockBeaconStateMutatorAppendValidatorCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppendValidator", reflect.TypeOf((*MockBeaconStateMutator)(nil).AppendValidator), arg0) - return &MockBeaconStateMutatorAppendValidatorCall{Call: call} -} - -// MockBeaconStateMutatorAppendValidatorCall wrap *gomock.Call -type MockBeaconStateMutatorAppendValidatorCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorAppendValidatorCall) Return() *MockBeaconStateMutatorAppendValidatorCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorAppendValidatorCall) Do(f func(solid.Validator)) *MockBeaconStateMutatorAppendValidatorCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorAppendValidatorCall) DoAndReturn(f func(solid.Validator)) *MockBeaconStateMutatorAppendValidatorCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// ResetCurrentEpochAttestations mocks base method. -func (m *MockBeaconStateMutator) ResetCurrentEpochAttestations() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "ResetCurrentEpochAttestations") -} - -// ResetCurrentEpochAttestations indicates an expected call of ResetCurrentEpochAttestations. -func (mr *MockBeaconStateMutatorMockRecorder) ResetCurrentEpochAttestations() *MockBeaconStateMutatorResetCurrentEpochAttestationsCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetCurrentEpochAttestations", reflect.TypeOf((*MockBeaconStateMutator)(nil).ResetCurrentEpochAttestations)) - return &MockBeaconStateMutatorResetCurrentEpochAttestationsCall{Call: call} -} - -// MockBeaconStateMutatorResetCurrentEpochAttestationsCall wrap *gomock.Call -type MockBeaconStateMutatorResetCurrentEpochAttestationsCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorResetCurrentEpochAttestationsCall) Return() *MockBeaconStateMutatorResetCurrentEpochAttestationsCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorResetCurrentEpochAttestationsCall) Do(f func()) *MockBeaconStateMutatorResetCurrentEpochAttestationsCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorResetCurrentEpochAttestationsCall) DoAndReturn(f func()) *MockBeaconStateMutatorResetCurrentEpochAttestationsCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// ResetEpochParticipation mocks base method. -func (m *MockBeaconStateMutator) ResetEpochParticipation() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "ResetEpochParticipation") -} - -// ResetEpochParticipation indicates an expected call of ResetEpochParticipation. -func (mr *MockBeaconStateMutatorMockRecorder) ResetEpochParticipation() *MockBeaconStateMutatorResetEpochParticipationCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetEpochParticipation", reflect.TypeOf((*MockBeaconStateMutator)(nil).ResetEpochParticipation)) - return &MockBeaconStateMutatorResetEpochParticipationCall{Call: call} -} - -// MockBeaconStateMutatorResetEpochParticipationCall wrap *gomock.Call -type MockBeaconStateMutatorResetEpochParticipationCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorResetEpochParticipationCall) Return() *MockBeaconStateMutatorResetEpochParticipationCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorResetEpochParticipationCall) Do(f func()) *MockBeaconStateMutatorResetEpochParticipationCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorResetEpochParticipationCall) DoAndReturn(f func()) *MockBeaconStateMutatorResetEpochParticipationCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// ResetEth1DataVotes mocks base method. -func (m *MockBeaconStateMutator) ResetEth1DataVotes() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "ResetEth1DataVotes") -} - -// ResetEth1DataVotes indicates an expected call of ResetEth1DataVotes. -func (mr *MockBeaconStateMutatorMockRecorder) ResetEth1DataVotes() *MockBeaconStateMutatorResetEth1DataVotesCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetEth1DataVotes", reflect.TypeOf((*MockBeaconStateMutator)(nil).ResetEth1DataVotes)) - return &MockBeaconStateMutatorResetEth1DataVotesCall{Call: call} -} - -// MockBeaconStateMutatorResetEth1DataVotesCall wrap *gomock.Call -type MockBeaconStateMutatorResetEth1DataVotesCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorResetEth1DataVotesCall) Return() *MockBeaconStateMutatorResetEth1DataVotesCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorResetEth1DataVotesCall) Do(f func()) *MockBeaconStateMutatorResetEth1DataVotesCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorResetEth1DataVotesCall) DoAndReturn(f func()) *MockBeaconStateMutatorResetEth1DataVotesCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// ResetHistoricalSummaries mocks base method. -func (m *MockBeaconStateMutator) ResetHistoricalSummaries() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "ResetHistoricalSummaries") -} - -// ResetHistoricalSummaries indicates an expected call of ResetHistoricalSummaries. -func (mr *MockBeaconStateMutatorMockRecorder) ResetHistoricalSummaries() *MockBeaconStateMutatorResetHistoricalSummariesCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetHistoricalSummaries", reflect.TypeOf((*MockBeaconStateMutator)(nil).ResetHistoricalSummaries)) - return &MockBeaconStateMutatorResetHistoricalSummariesCall{Call: call} -} - -// MockBeaconStateMutatorResetHistoricalSummariesCall wrap *gomock.Call -type MockBeaconStateMutatorResetHistoricalSummariesCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorResetHistoricalSummariesCall) Return() *MockBeaconStateMutatorResetHistoricalSummariesCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorResetHistoricalSummariesCall) Do(f func()) *MockBeaconStateMutatorResetHistoricalSummariesCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorResetHistoricalSummariesCall) DoAndReturn(f func()) *MockBeaconStateMutatorResetHistoricalSummariesCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// ResetPreviousEpochAttestations mocks base method. -func (m *MockBeaconStateMutator) ResetPreviousEpochAttestations() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "ResetPreviousEpochAttestations") -} - -// ResetPreviousEpochAttestations indicates an expected call of ResetPreviousEpochAttestations. -func (mr *MockBeaconStateMutatorMockRecorder) ResetPreviousEpochAttestations() *MockBeaconStateMutatorResetPreviousEpochAttestationsCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetPreviousEpochAttestations", reflect.TypeOf((*MockBeaconStateMutator)(nil).ResetPreviousEpochAttestations)) - return &MockBeaconStateMutatorResetPreviousEpochAttestationsCall{Call: call} -} - -// MockBeaconStateMutatorResetPreviousEpochAttestationsCall wrap *gomock.Call -type MockBeaconStateMutatorResetPreviousEpochAttestationsCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorResetPreviousEpochAttestationsCall) Return() *MockBeaconStateMutatorResetPreviousEpochAttestationsCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorResetPreviousEpochAttestationsCall) Do(f func()) *MockBeaconStateMutatorResetPreviousEpochAttestationsCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorResetPreviousEpochAttestationsCall) DoAndReturn(f func()) *MockBeaconStateMutatorResetPreviousEpochAttestationsCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetActivationEligibilityEpochForValidatorAtIndex mocks base method. -func (m *MockBeaconStateMutator) SetActivationEligibilityEpochForValidatorAtIndex(arg0 int, arg1 uint64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetActivationEligibilityEpochForValidatorAtIndex", arg0, arg1) -} - -// SetActivationEligibilityEpochForValidatorAtIndex indicates an expected call of SetActivationEligibilityEpochForValidatorAtIndex. -func (mr *MockBeaconStateMutatorMockRecorder) SetActivationEligibilityEpochForValidatorAtIndex(arg0, arg1 any) *MockBeaconStateMutatorSetActivationEligibilityEpochForValidatorAtIndexCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetActivationEligibilityEpochForValidatorAtIndex", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetActivationEligibilityEpochForValidatorAtIndex), arg0, arg1) - return &MockBeaconStateMutatorSetActivationEligibilityEpochForValidatorAtIndexCall{Call: call} -} - -// MockBeaconStateMutatorSetActivationEligibilityEpochForValidatorAtIndexCall wrap *gomock.Call -type MockBeaconStateMutatorSetActivationEligibilityEpochForValidatorAtIndexCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetActivationEligibilityEpochForValidatorAtIndexCall) Return() *MockBeaconStateMutatorSetActivationEligibilityEpochForValidatorAtIndexCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetActivationEligibilityEpochForValidatorAtIndexCall) Do(f func(int, uint64)) *MockBeaconStateMutatorSetActivationEligibilityEpochForValidatorAtIndexCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetActivationEligibilityEpochForValidatorAtIndexCall) DoAndReturn(f func(int, uint64)) *MockBeaconStateMutatorSetActivationEligibilityEpochForValidatorAtIndexCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetActivationEpochForValidatorAtIndex mocks base method. -func (m *MockBeaconStateMutator) SetActivationEpochForValidatorAtIndex(arg0 int, arg1 uint64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetActivationEpochForValidatorAtIndex", arg0, arg1) -} - -// SetActivationEpochForValidatorAtIndex indicates an expected call of SetActivationEpochForValidatorAtIndex. -func (mr *MockBeaconStateMutatorMockRecorder) SetActivationEpochForValidatorAtIndex(arg0, arg1 any) *MockBeaconStateMutatorSetActivationEpochForValidatorAtIndexCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetActivationEpochForValidatorAtIndex", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetActivationEpochForValidatorAtIndex), arg0, arg1) - return &MockBeaconStateMutatorSetActivationEpochForValidatorAtIndexCall{Call: call} -} - -// MockBeaconStateMutatorSetActivationEpochForValidatorAtIndexCall wrap *gomock.Call -type MockBeaconStateMutatorSetActivationEpochForValidatorAtIndexCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetActivationEpochForValidatorAtIndexCall) Return() *MockBeaconStateMutatorSetActivationEpochForValidatorAtIndexCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetActivationEpochForValidatorAtIndexCall) Do(f func(int, uint64)) *MockBeaconStateMutatorSetActivationEpochForValidatorAtIndexCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetActivationEpochForValidatorAtIndexCall) DoAndReturn(f func(int, uint64)) *MockBeaconStateMutatorSetActivationEpochForValidatorAtIndexCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetBlockRootAt mocks base method. -func (m *MockBeaconStateMutator) SetBlockRootAt(arg0 int, arg1 common.Hash) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetBlockRootAt", arg0, arg1) -} - -// SetBlockRootAt indicates an expected call of SetBlockRootAt. -func (mr *MockBeaconStateMutatorMockRecorder) SetBlockRootAt(arg0, arg1 any) *MockBeaconStateMutatorSetBlockRootAtCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBlockRootAt", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetBlockRootAt), arg0, arg1) - return &MockBeaconStateMutatorSetBlockRootAtCall{Call: call} -} - -// MockBeaconStateMutatorSetBlockRootAtCall wrap *gomock.Call -type MockBeaconStateMutatorSetBlockRootAtCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetBlockRootAtCall) Return() *MockBeaconStateMutatorSetBlockRootAtCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetBlockRootAtCall) Do(f func(int, common.Hash)) *MockBeaconStateMutatorSetBlockRootAtCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetBlockRootAtCall) DoAndReturn(f func(int, common.Hash)) *MockBeaconStateMutatorSetBlockRootAtCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetCurrentEpochParticipationFlags mocks base method. -func (m *MockBeaconStateMutator) SetCurrentEpochParticipationFlags(arg0 []cltypes.ParticipationFlags) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetCurrentEpochParticipationFlags", arg0) -} - -// SetCurrentEpochParticipationFlags indicates an expected call of SetCurrentEpochParticipationFlags. -func (mr *MockBeaconStateMutatorMockRecorder) SetCurrentEpochParticipationFlags(arg0 any) *MockBeaconStateMutatorSetCurrentEpochParticipationFlagsCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCurrentEpochParticipationFlags", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetCurrentEpochParticipationFlags), arg0) - return &MockBeaconStateMutatorSetCurrentEpochParticipationFlagsCall{Call: call} -} - -// MockBeaconStateMutatorSetCurrentEpochParticipationFlagsCall wrap *gomock.Call -type MockBeaconStateMutatorSetCurrentEpochParticipationFlagsCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetCurrentEpochParticipationFlagsCall) Return() *MockBeaconStateMutatorSetCurrentEpochParticipationFlagsCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetCurrentEpochParticipationFlagsCall) Do(f func([]cltypes.ParticipationFlags)) *MockBeaconStateMutatorSetCurrentEpochParticipationFlagsCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetCurrentEpochParticipationFlagsCall) DoAndReturn(f func([]cltypes.ParticipationFlags)) *MockBeaconStateMutatorSetCurrentEpochParticipationFlagsCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetCurrentJustifiedCheckpoint mocks base method. -func (m *MockBeaconStateMutator) SetCurrentJustifiedCheckpoint(arg0 solid.Checkpoint) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetCurrentJustifiedCheckpoint", arg0) -} - -// SetCurrentJustifiedCheckpoint indicates an expected call of SetCurrentJustifiedCheckpoint. -func (mr *MockBeaconStateMutatorMockRecorder) SetCurrentJustifiedCheckpoint(arg0 any) *MockBeaconStateMutatorSetCurrentJustifiedCheckpointCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCurrentJustifiedCheckpoint", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetCurrentJustifiedCheckpoint), arg0) - return &MockBeaconStateMutatorSetCurrentJustifiedCheckpointCall{Call: call} -} - -// MockBeaconStateMutatorSetCurrentJustifiedCheckpointCall wrap *gomock.Call -type MockBeaconStateMutatorSetCurrentJustifiedCheckpointCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetCurrentJustifiedCheckpointCall) Return() *MockBeaconStateMutatorSetCurrentJustifiedCheckpointCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetCurrentJustifiedCheckpointCall) Do(f func(solid.Checkpoint)) *MockBeaconStateMutatorSetCurrentJustifiedCheckpointCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetCurrentJustifiedCheckpointCall) DoAndReturn(f func(solid.Checkpoint)) *MockBeaconStateMutatorSetCurrentJustifiedCheckpointCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetCurrentSyncCommittee mocks base method. -func (m *MockBeaconStateMutator) SetCurrentSyncCommittee(arg0 *solid.SyncCommittee) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetCurrentSyncCommittee", arg0) -} - -// SetCurrentSyncCommittee indicates an expected call of SetCurrentSyncCommittee. -func (mr *MockBeaconStateMutatorMockRecorder) SetCurrentSyncCommittee(arg0 any) *MockBeaconStateMutatorSetCurrentSyncCommitteeCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCurrentSyncCommittee", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetCurrentSyncCommittee), arg0) - return &MockBeaconStateMutatorSetCurrentSyncCommitteeCall{Call: call} -} - -// MockBeaconStateMutatorSetCurrentSyncCommitteeCall wrap *gomock.Call -type MockBeaconStateMutatorSetCurrentSyncCommitteeCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetCurrentSyncCommitteeCall) Return() *MockBeaconStateMutatorSetCurrentSyncCommitteeCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetCurrentSyncCommitteeCall) Do(f func(*solid.SyncCommittee)) *MockBeaconStateMutatorSetCurrentSyncCommitteeCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetCurrentSyncCommitteeCall) DoAndReturn(f func(*solid.SyncCommittee)) *MockBeaconStateMutatorSetCurrentSyncCommitteeCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetEffectiveBalanceForValidatorAtIndex mocks base method. -func (m *MockBeaconStateMutator) SetEffectiveBalanceForValidatorAtIndex(arg0 int, arg1 uint64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetEffectiveBalanceForValidatorAtIndex", arg0, arg1) -} - -// SetEffectiveBalanceForValidatorAtIndex indicates an expected call of SetEffectiveBalanceForValidatorAtIndex. -func (mr *MockBeaconStateMutatorMockRecorder) SetEffectiveBalanceForValidatorAtIndex(arg0, arg1 any) *MockBeaconStateMutatorSetEffectiveBalanceForValidatorAtIndexCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetEffectiveBalanceForValidatorAtIndex", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetEffectiveBalanceForValidatorAtIndex), arg0, arg1) - return &MockBeaconStateMutatorSetEffectiveBalanceForValidatorAtIndexCall{Call: call} -} - -// MockBeaconStateMutatorSetEffectiveBalanceForValidatorAtIndexCall wrap *gomock.Call -type MockBeaconStateMutatorSetEffectiveBalanceForValidatorAtIndexCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetEffectiveBalanceForValidatorAtIndexCall) Return() *MockBeaconStateMutatorSetEffectiveBalanceForValidatorAtIndexCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetEffectiveBalanceForValidatorAtIndexCall) Do(f func(int, uint64)) *MockBeaconStateMutatorSetEffectiveBalanceForValidatorAtIndexCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetEffectiveBalanceForValidatorAtIndexCall) DoAndReturn(f func(int, uint64)) *MockBeaconStateMutatorSetEffectiveBalanceForValidatorAtIndexCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetEpochParticipationForValidatorIndex mocks base method. -func (m *MockBeaconStateMutator) SetEpochParticipationForValidatorIndex(arg0 bool, arg1 int, arg2 cltypes.ParticipationFlags) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetEpochParticipationForValidatorIndex", arg0, arg1, arg2) -} - -// SetEpochParticipationForValidatorIndex indicates an expected call of SetEpochParticipationForValidatorIndex. -func (mr *MockBeaconStateMutatorMockRecorder) SetEpochParticipationForValidatorIndex(arg0, arg1, arg2 any) *MockBeaconStateMutatorSetEpochParticipationForValidatorIndexCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetEpochParticipationForValidatorIndex", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetEpochParticipationForValidatorIndex), arg0, arg1, arg2) - return &MockBeaconStateMutatorSetEpochParticipationForValidatorIndexCall{Call: call} -} - -// MockBeaconStateMutatorSetEpochParticipationForValidatorIndexCall wrap *gomock.Call -type MockBeaconStateMutatorSetEpochParticipationForValidatorIndexCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetEpochParticipationForValidatorIndexCall) Return() *MockBeaconStateMutatorSetEpochParticipationForValidatorIndexCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetEpochParticipationForValidatorIndexCall) Do(f func(bool, int, cltypes.ParticipationFlags)) *MockBeaconStateMutatorSetEpochParticipationForValidatorIndexCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetEpochParticipationForValidatorIndexCall) DoAndReturn(f func(bool, int, cltypes.ParticipationFlags)) *MockBeaconStateMutatorSetEpochParticipationForValidatorIndexCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetEth1Data mocks base method. -func (m *MockBeaconStateMutator) SetEth1Data(arg0 *cltypes.Eth1Data) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetEth1Data", arg0) -} - -// SetEth1Data indicates an expected call of SetEth1Data. -func (mr *MockBeaconStateMutatorMockRecorder) SetEth1Data(arg0 any) *MockBeaconStateMutatorSetEth1DataCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetEth1Data", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetEth1Data), arg0) - return &MockBeaconStateMutatorSetEth1DataCall{Call: call} -} - -// MockBeaconStateMutatorSetEth1DataCall wrap *gomock.Call -type MockBeaconStateMutatorSetEth1DataCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetEth1DataCall) Return() *MockBeaconStateMutatorSetEth1DataCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetEth1DataCall) Do(f func(*cltypes.Eth1Data)) *MockBeaconStateMutatorSetEth1DataCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetEth1DataCall) DoAndReturn(f func(*cltypes.Eth1Data)) *MockBeaconStateMutatorSetEth1DataCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetEth1DepositIndex mocks base method. -func (m *MockBeaconStateMutator) SetEth1DepositIndex(arg0 uint64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetEth1DepositIndex", arg0) -} - -// SetEth1DepositIndex indicates an expected call of SetEth1DepositIndex. -func (mr *MockBeaconStateMutatorMockRecorder) SetEth1DepositIndex(arg0 any) *MockBeaconStateMutatorSetEth1DepositIndexCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetEth1DepositIndex", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetEth1DepositIndex), arg0) - return &MockBeaconStateMutatorSetEth1DepositIndexCall{Call: call} -} - -// MockBeaconStateMutatorSetEth1DepositIndexCall wrap *gomock.Call -type MockBeaconStateMutatorSetEth1DepositIndexCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetEth1DepositIndexCall) Return() *MockBeaconStateMutatorSetEth1DepositIndexCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetEth1DepositIndexCall) Do(f func(uint64)) *MockBeaconStateMutatorSetEth1DepositIndexCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetEth1DepositIndexCall) DoAndReturn(f func(uint64)) *MockBeaconStateMutatorSetEth1DepositIndexCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetExitEpochForValidatorAtIndex mocks base method. -func (m *MockBeaconStateMutator) SetExitEpochForValidatorAtIndex(arg0 int, arg1 uint64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetExitEpochForValidatorAtIndex", arg0, arg1) -} - -// SetExitEpochForValidatorAtIndex indicates an expected call of SetExitEpochForValidatorAtIndex. -func (mr *MockBeaconStateMutatorMockRecorder) SetExitEpochForValidatorAtIndex(arg0, arg1 any) *MockBeaconStateMutatorSetExitEpochForValidatorAtIndexCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetExitEpochForValidatorAtIndex", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetExitEpochForValidatorAtIndex), arg0, arg1) - return &MockBeaconStateMutatorSetExitEpochForValidatorAtIndexCall{Call: call} -} - -// MockBeaconStateMutatorSetExitEpochForValidatorAtIndexCall wrap *gomock.Call -type MockBeaconStateMutatorSetExitEpochForValidatorAtIndexCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetExitEpochForValidatorAtIndexCall) Return() *MockBeaconStateMutatorSetExitEpochForValidatorAtIndexCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetExitEpochForValidatorAtIndexCall) Do(f func(int, uint64)) *MockBeaconStateMutatorSetExitEpochForValidatorAtIndexCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetExitEpochForValidatorAtIndexCall) DoAndReturn(f func(int, uint64)) *MockBeaconStateMutatorSetExitEpochForValidatorAtIndexCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetFinalizedCheckpoint mocks base method. -func (m *MockBeaconStateMutator) SetFinalizedCheckpoint(arg0 solid.Checkpoint) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetFinalizedCheckpoint", arg0) -} - -// SetFinalizedCheckpoint indicates an expected call of SetFinalizedCheckpoint. -func (mr *MockBeaconStateMutatorMockRecorder) SetFinalizedCheckpoint(arg0 any) *MockBeaconStateMutatorSetFinalizedCheckpointCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFinalizedCheckpoint", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetFinalizedCheckpoint), arg0) - return &MockBeaconStateMutatorSetFinalizedCheckpointCall{Call: call} -} - -// MockBeaconStateMutatorSetFinalizedCheckpointCall wrap *gomock.Call -type MockBeaconStateMutatorSetFinalizedCheckpointCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetFinalizedCheckpointCall) Return() *MockBeaconStateMutatorSetFinalizedCheckpointCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetFinalizedCheckpointCall) Do(f func(solid.Checkpoint)) *MockBeaconStateMutatorSetFinalizedCheckpointCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetFinalizedCheckpointCall) DoAndReturn(f func(solid.Checkpoint)) *MockBeaconStateMutatorSetFinalizedCheckpointCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetFork mocks base method. -func (m *MockBeaconStateMutator) SetFork(arg0 *cltypes.Fork) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetFork", arg0) -} - -// SetFork indicates an expected call of SetFork. -func (mr *MockBeaconStateMutatorMockRecorder) SetFork(arg0 any) *MockBeaconStateMutatorSetForkCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFork", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetFork), arg0) - return &MockBeaconStateMutatorSetForkCall{Call: call} -} - -// MockBeaconStateMutatorSetForkCall wrap *gomock.Call -type MockBeaconStateMutatorSetForkCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetForkCall) Return() *MockBeaconStateMutatorSetForkCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetForkCall) Do(f func(*cltypes.Fork)) *MockBeaconStateMutatorSetForkCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetForkCall) DoAndReturn(f func(*cltypes.Fork)) *MockBeaconStateMutatorSetForkCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetInactivityScores mocks base method. -func (m *MockBeaconStateMutator) SetInactivityScores(arg0 []uint64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetInactivityScores", arg0) -} - -// SetInactivityScores indicates an expected call of SetInactivityScores. -func (mr *MockBeaconStateMutatorMockRecorder) SetInactivityScores(arg0 any) *MockBeaconStateMutatorSetInactivityScoresCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetInactivityScores", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetInactivityScores), arg0) - return &MockBeaconStateMutatorSetInactivityScoresCall{Call: call} -} - -// MockBeaconStateMutatorSetInactivityScoresCall wrap *gomock.Call -type MockBeaconStateMutatorSetInactivityScoresCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetInactivityScoresCall) Return() *MockBeaconStateMutatorSetInactivityScoresCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetInactivityScoresCall) Do(f func([]uint64)) *MockBeaconStateMutatorSetInactivityScoresCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetInactivityScoresCall) DoAndReturn(f func([]uint64)) *MockBeaconStateMutatorSetInactivityScoresCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetJustificationBits mocks base method. -func (m *MockBeaconStateMutator) SetJustificationBits(arg0 cltypes.JustificationBits) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetJustificationBits", arg0) -} - -// SetJustificationBits indicates an expected call of SetJustificationBits. -func (mr *MockBeaconStateMutatorMockRecorder) SetJustificationBits(arg0 any) *MockBeaconStateMutatorSetJustificationBitsCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetJustificationBits", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetJustificationBits), arg0) - return &MockBeaconStateMutatorSetJustificationBitsCall{Call: call} -} - -// MockBeaconStateMutatorSetJustificationBitsCall wrap *gomock.Call -type MockBeaconStateMutatorSetJustificationBitsCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetJustificationBitsCall) Return() *MockBeaconStateMutatorSetJustificationBitsCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetJustificationBitsCall) Do(f func(cltypes.JustificationBits)) *MockBeaconStateMutatorSetJustificationBitsCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetJustificationBitsCall) DoAndReturn(f func(cltypes.JustificationBits)) *MockBeaconStateMutatorSetJustificationBitsCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetLatestBlockHeader mocks base method. -func (m *MockBeaconStateMutator) SetLatestBlockHeader(arg0 *cltypes.BeaconBlockHeader) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetLatestBlockHeader", arg0) -} - -// SetLatestBlockHeader indicates an expected call of SetLatestBlockHeader. -func (mr *MockBeaconStateMutatorMockRecorder) SetLatestBlockHeader(arg0 any) *MockBeaconStateMutatorSetLatestBlockHeaderCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLatestBlockHeader", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetLatestBlockHeader), arg0) - return &MockBeaconStateMutatorSetLatestBlockHeaderCall{Call: call} -} - -// MockBeaconStateMutatorSetLatestBlockHeaderCall wrap *gomock.Call -type MockBeaconStateMutatorSetLatestBlockHeaderCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetLatestBlockHeaderCall) Return() *MockBeaconStateMutatorSetLatestBlockHeaderCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetLatestBlockHeaderCall) Do(f func(*cltypes.BeaconBlockHeader)) *MockBeaconStateMutatorSetLatestBlockHeaderCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetLatestBlockHeaderCall) DoAndReturn(f func(*cltypes.BeaconBlockHeader)) *MockBeaconStateMutatorSetLatestBlockHeaderCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetLatestExecutionPayloadHeader mocks base method. -func (m *MockBeaconStateMutator) SetLatestExecutionPayloadHeader(arg0 *cltypes.Eth1Header) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetLatestExecutionPayloadHeader", arg0) -} - -// SetLatestExecutionPayloadHeader indicates an expected call of SetLatestExecutionPayloadHeader. -func (mr *MockBeaconStateMutatorMockRecorder) SetLatestExecutionPayloadHeader(arg0 any) *MockBeaconStateMutatorSetLatestExecutionPayloadHeaderCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLatestExecutionPayloadHeader", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetLatestExecutionPayloadHeader), arg0) - return &MockBeaconStateMutatorSetLatestExecutionPayloadHeaderCall{Call: call} -} - -// MockBeaconStateMutatorSetLatestExecutionPayloadHeaderCall wrap *gomock.Call -type MockBeaconStateMutatorSetLatestExecutionPayloadHeaderCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetLatestExecutionPayloadHeaderCall) Return() *MockBeaconStateMutatorSetLatestExecutionPayloadHeaderCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetLatestExecutionPayloadHeaderCall) Do(f func(*cltypes.Eth1Header)) *MockBeaconStateMutatorSetLatestExecutionPayloadHeaderCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetLatestExecutionPayloadHeaderCall) DoAndReturn(f func(*cltypes.Eth1Header)) *MockBeaconStateMutatorSetLatestExecutionPayloadHeaderCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetNextSyncCommittee mocks base method. -func (m *MockBeaconStateMutator) SetNextSyncCommittee(arg0 *solid.SyncCommittee) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetNextSyncCommittee", arg0) -} - -// SetNextSyncCommittee indicates an expected call of SetNextSyncCommittee. -func (mr *MockBeaconStateMutatorMockRecorder) SetNextSyncCommittee(arg0 any) *MockBeaconStateMutatorSetNextSyncCommitteeCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNextSyncCommittee", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetNextSyncCommittee), arg0) - return &MockBeaconStateMutatorSetNextSyncCommitteeCall{Call: call} -} - -// MockBeaconStateMutatorSetNextSyncCommitteeCall wrap *gomock.Call -type MockBeaconStateMutatorSetNextSyncCommitteeCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetNextSyncCommitteeCall) Return() *MockBeaconStateMutatorSetNextSyncCommitteeCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetNextSyncCommitteeCall) Do(f func(*solid.SyncCommittee)) *MockBeaconStateMutatorSetNextSyncCommitteeCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetNextSyncCommitteeCall) DoAndReturn(f func(*solid.SyncCommittee)) *MockBeaconStateMutatorSetNextSyncCommitteeCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetNextWithdrawalIndex mocks base method. -func (m *MockBeaconStateMutator) SetNextWithdrawalIndex(arg0 uint64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetNextWithdrawalIndex", arg0) -} - -// SetNextWithdrawalIndex indicates an expected call of SetNextWithdrawalIndex. -func (mr *MockBeaconStateMutatorMockRecorder) SetNextWithdrawalIndex(arg0 any) *MockBeaconStateMutatorSetNextWithdrawalIndexCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNextWithdrawalIndex", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetNextWithdrawalIndex), arg0) - return &MockBeaconStateMutatorSetNextWithdrawalIndexCall{Call: call} -} - -// MockBeaconStateMutatorSetNextWithdrawalIndexCall wrap *gomock.Call -type MockBeaconStateMutatorSetNextWithdrawalIndexCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetNextWithdrawalIndexCall) Return() *MockBeaconStateMutatorSetNextWithdrawalIndexCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetNextWithdrawalIndexCall) Do(f func(uint64)) *MockBeaconStateMutatorSetNextWithdrawalIndexCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetNextWithdrawalIndexCall) DoAndReturn(f func(uint64)) *MockBeaconStateMutatorSetNextWithdrawalIndexCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetNextWithdrawalValidatorIndex mocks base method. -func (m *MockBeaconStateMutator) SetNextWithdrawalValidatorIndex(arg0 uint64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetNextWithdrawalValidatorIndex", arg0) -} - -// SetNextWithdrawalValidatorIndex indicates an expected call of SetNextWithdrawalValidatorIndex. -func (mr *MockBeaconStateMutatorMockRecorder) SetNextWithdrawalValidatorIndex(arg0 any) *MockBeaconStateMutatorSetNextWithdrawalValidatorIndexCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNextWithdrawalValidatorIndex", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetNextWithdrawalValidatorIndex), arg0) - return &MockBeaconStateMutatorSetNextWithdrawalValidatorIndexCall{Call: call} -} - -// MockBeaconStateMutatorSetNextWithdrawalValidatorIndexCall wrap *gomock.Call -type MockBeaconStateMutatorSetNextWithdrawalValidatorIndexCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetNextWithdrawalValidatorIndexCall) Return() *MockBeaconStateMutatorSetNextWithdrawalValidatorIndexCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetNextWithdrawalValidatorIndexCall) Do(f func(uint64)) *MockBeaconStateMutatorSetNextWithdrawalValidatorIndexCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetNextWithdrawalValidatorIndexCall) DoAndReturn(f func(uint64)) *MockBeaconStateMutatorSetNextWithdrawalValidatorIndexCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetPreviousEpochParticipationFlags mocks base method. -func (m *MockBeaconStateMutator) SetPreviousEpochParticipationFlags(arg0 []cltypes.ParticipationFlags) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetPreviousEpochParticipationFlags", arg0) -} - -// SetPreviousEpochParticipationFlags indicates an expected call of SetPreviousEpochParticipationFlags. -func (mr *MockBeaconStateMutatorMockRecorder) SetPreviousEpochParticipationFlags(arg0 any) *MockBeaconStateMutatorSetPreviousEpochParticipationFlagsCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPreviousEpochParticipationFlags", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetPreviousEpochParticipationFlags), arg0) - return &MockBeaconStateMutatorSetPreviousEpochParticipationFlagsCall{Call: call} -} - -// MockBeaconStateMutatorSetPreviousEpochParticipationFlagsCall wrap *gomock.Call -type MockBeaconStateMutatorSetPreviousEpochParticipationFlagsCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetPreviousEpochParticipationFlagsCall) Return() *MockBeaconStateMutatorSetPreviousEpochParticipationFlagsCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetPreviousEpochParticipationFlagsCall) Do(f func([]cltypes.ParticipationFlags)) *MockBeaconStateMutatorSetPreviousEpochParticipationFlagsCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetPreviousEpochParticipationFlagsCall) DoAndReturn(f func([]cltypes.ParticipationFlags)) *MockBeaconStateMutatorSetPreviousEpochParticipationFlagsCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetPreviousJustifiedCheckpoint mocks base method. -func (m *MockBeaconStateMutator) SetPreviousJustifiedCheckpoint(arg0 solid.Checkpoint) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetPreviousJustifiedCheckpoint", arg0) -} - -// SetPreviousJustifiedCheckpoint indicates an expected call of SetPreviousJustifiedCheckpoint. -func (mr *MockBeaconStateMutatorMockRecorder) SetPreviousJustifiedCheckpoint(arg0 any) *MockBeaconStateMutatorSetPreviousJustifiedCheckpointCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPreviousJustifiedCheckpoint", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetPreviousJustifiedCheckpoint), arg0) - return &MockBeaconStateMutatorSetPreviousJustifiedCheckpointCall{Call: call} -} - -// MockBeaconStateMutatorSetPreviousJustifiedCheckpointCall wrap *gomock.Call -type MockBeaconStateMutatorSetPreviousJustifiedCheckpointCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetPreviousJustifiedCheckpointCall) Return() *MockBeaconStateMutatorSetPreviousJustifiedCheckpointCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetPreviousJustifiedCheckpointCall) Do(f func(solid.Checkpoint)) *MockBeaconStateMutatorSetPreviousJustifiedCheckpointCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetPreviousJustifiedCheckpointCall) DoAndReturn(f func(solid.Checkpoint)) *MockBeaconStateMutatorSetPreviousJustifiedCheckpointCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetRandaoMixAt mocks base method. -func (m *MockBeaconStateMutator) SetRandaoMixAt(arg0 int, arg1 common.Hash) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetRandaoMixAt", arg0, arg1) -} - -// SetRandaoMixAt indicates an expected call of SetRandaoMixAt. -func (mr *MockBeaconStateMutatorMockRecorder) SetRandaoMixAt(arg0, arg1 any) *MockBeaconStateMutatorSetRandaoMixAtCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetRandaoMixAt", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetRandaoMixAt), arg0, arg1) - return &MockBeaconStateMutatorSetRandaoMixAtCall{Call: call} -} - -// MockBeaconStateMutatorSetRandaoMixAtCall wrap *gomock.Call -type MockBeaconStateMutatorSetRandaoMixAtCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetRandaoMixAtCall) Return() *MockBeaconStateMutatorSetRandaoMixAtCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetRandaoMixAtCall) Do(f func(int, common.Hash)) *MockBeaconStateMutatorSetRandaoMixAtCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetRandaoMixAtCall) DoAndReturn(f func(int, common.Hash)) *MockBeaconStateMutatorSetRandaoMixAtCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetSlashingSegmentAt mocks base method. -func (m *MockBeaconStateMutator) SetSlashingSegmentAt(arg0 int, arg1 uint64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetSlashingSegmentAt", arg0, arg1) -} - -// SetSlashingSegmentAt indicates an expected call of SetSlashingSegmentAt. -func (mr *MockBeaconStateMutatorMockRecorder) SetSlashingSegmentAt(arg0, arg1 any) *MockBeaconStateMutatorSetSlashingSegmentAtCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSlashingSegmentAt", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetSlashingSegmentAt), arg0, arg1) - return &MockBeaconStateMutatorSetSlashingSegmentAtCall{Call: call} -} - -// MockBeaconStateMutatorSetSlashingSegmentAtCall wrap *gomock.Call -type MockBeaconStateMutatorSetSlashingSegmentAtCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetSlashingSegmentAtCall) Return() *MockBeaconStateMutatorSetSlashingSegmentAtCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetSlashingSegmentAtCall) Do(f func(int, uint64)) *MockBeaconStateMutatorSetSlashingSegmentAtCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetSlashingSegmentAtCall) DoAndReturn(f func(int, uint64)) *MockBeaconStateMutatorSetSlashingSegmentAtCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetSlot mocks base method. -func (m *MockBeaconStateMutator) SetSlot(arg0 uint64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetSlot", arg0) -} - -// SetSlot indicates an expected call of SetSlot. -func (mr *MockBeaconStateMutatorMockRecorder) SetSlot(arg0 any) *MockBeaconStateMutatorSetSlotCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSlot", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetSlot), arg0) - return &MockBeaconStateMutatorSetSlotCall{Call: call} -} - -// MockBeaconStateMutatorSetSlotCall wrap *gomock.Call -type MockBeaconStateMutatorSetSlotCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetSlotCall) Return() *MockBeaconStateMutatorSetSlotCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetSlotCall) Do(f func(uint64)) *MockBeaconStateMutatorSetSlotCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetSlotCall) DoAndReturn(f func(uint64)) *MockBeaconStateMutatorSetSlotCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetStateRootAt mocks base method. -func (m *MockBeaconStateMutator) SetStateRootAt(arg0 int, arg1 common.Hash) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetStateRootAt", arg0, arg1) -} - -// SetStateRootAt indicates an expected call of SetStateRootAt. -func (mr *MockBeaconStateMutatorMockRecorder) SetStateRootAt(arg0, arg1 any) *MockBeaconStateMutatorSetStateRootAtCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetStateRootAt", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetStateRootAt), arg0, arg1) - return &MockBeaconStateMutatorSetStateRootAtCall{Call: call} -} - -// MockBeaconStateMutatorSetStateRootAtCall wrap *gomock.Call -type MockBeaconStateMutatorSetStateRootAtCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetStateRootAtCall) Return() *MockBeaconStateMutatorSetStateRootAtCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetStateRootAtCall) Do(f func(int, common.Hash)) *MockBeaconStateMutatorSetStateRootAtCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetStateRootAtCall) DoAndReturn(f func(int, common.Hash)) *MockBeaconStateMutatorSetStateRootAtCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetValidatorAtIndex mocks base method. -func (m *MockBeaconStateMutator) SetValidatorAtIndex(arg0 int, arg1 solid.Validator) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetValidatorAtIndex", arg0, arg1) -} - -// SetValidatorAtIndex indicates an expected call of SetValidatorAtIndex. -func (mr *MockBeaconStateMutatorMockRecorder) SetValidatorAtIndex(arg0, arg1 any) *MockBeaconStateMutatorSetValidatorAtIndexCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetValidatorAtIndex", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetValidatorAtIndex), arg0, arg1) - return &MockBeaconStateMutatorSetValidatorAtIndexCall{Call: call} -} - -// MockBeaconStateMutatorSetValidatorAtIndexCall wrap *gomock.Call -type MockBeaconStateMutatorSetValidatorAtIndexCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetValidatorAtIndexCall) Return() *MockBeaconStateMutatorSetValidatorAtIndexCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetValidatorAtIndexCall) Do(f func(int, solid.Validator)) *MockBeaconStateMutatorSetValidatorAtIndexCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetValidatorAtIndexCall) DoAndReturn(f func(int, solid.Validator)) *MockBeaconStateMutatorSetValidatorAtIndexCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetValidatorBalance mocks base method. -func (m *MockBeaconStateMutator) SetValidatorBalance(arg0 int, arg1 uint64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetValidatorBalance", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetValidatorBalance indicates an expected call of SetValidatorBalance. -func (mr *MockBeaconStateMutatorMockRecorder) SetValidatorBalance(arg0, arg1 any) *MockBeaconStateMutatorSetValidatorBalanceCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetValidatorBalance", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetValidatorBalance), arg0, arg1) - return &MockBeaconStateMutatorSetValidatorBalanceCall{Call: call} -} - -// MockBeaconStateMutatorSetValidatorBalanceCall wrap *gomock.Call -type MockBeaconStateMutatorSetValidatorBalanceCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetValidatorBalanceCall) Return(arg0 error) *MockBeaconStateMutatorSetValidatorBalanceCall { - c.Call = c.Call.Return(arg0) - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetValidatorBalanceCall) Do(f func(int, uint64) error) *MockBeaconStateMutatorSetValidatorBalanceCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetValidatorBalanceCall) DoAndReturn(f func(int, uint64) error) *MockBeaconStateMutatorSetValidatorBalanceCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetValidatorInactivityScore mocks base method. -func (m *MockBeaconStateMutator) SetValidatorInactivityScore(arg0 int, arg1 uint64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetValidatorInactivityScore", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetValidatorInactivityScore indicates an expected call of SetValidatorInactivityScore. -func (mr *MockBeaconStateMutatorMockRecorder) SetValidatorInactivityScore(arg0, arg1 any) *MockBeaconStateMutatorSetValidatorInactivityScoreCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetValidatorInactivityScore", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetValidatorInactivityScore), arg0, arg1) - return &MockBeaconStateMutatorSetValidatorInactivityScoreCall{Call: call} -} - -// MockBeaconStateMutatorSetValidatorInactivityScoreCall wrap *gomock.Call -type MockBeaconStateMutatorSetValidatorInactivityScoreCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetValidatorInactivityScoreCall) Return(arg0 error) *MockBeaconStateMutatorSetValidatorInactivityScoreCall { - c.Call = c.Call.Return(arg0) - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetValidatorInactivityScoreCall) Do(f func(int, uint64) error) *MockBeaconStateMutatorSetValidatorInactivityScoreCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetValidatorInactivityScoreCall) DoAndReturn(f func(int, uint64) error) *MockBeaconStateMutatorSetValidatorInactivityScoreCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetValidatorIsCurrentMatchingHeadAttester mocks base method. -func (m *MockBeaconStateMutator) SetValidatorIsCurrentMatchingHeadAttester(arg0 int, arg1 bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetValidatorIsCurrentMatchingHeadAttester", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetValidatorIsCurrentMatchingHeadAttester indicates an expected call of SetValidatorIsCurrentMatchingHeadAttester. -func (mr *MockBeaconStateMutatorMockRecorder) SetValidatorIsCurrentMatchingHeadAttester(arg0, arg1 any) *MockBeaconStateMutatorSetValidatorIsCurrentMatchingHeadAttesterCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetValidatorIsCurrentMatchingHeadAttester", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetValidatorIsCurrentMatchingHeadAttester), arg0, arg1) - return &MockBeaconStateMutatorSetValidatorIsCurrentMatchingHeadAttesterCall{Call: call} -} - -// MockBeaconStateMutatorSetValidatorIsCurrentMatchingHeadAttesterCall wrap *gomock.Call -type MockBeaconStateMutatorSetValidatorIsCurrentMatchingHeadAttesterCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetValidatorIsCurrentMatchingHeadAttesterCall) Return(arg0 error) *MockBeaconStateMutatorSetValidatorIsCurrentMatchingHeadAttesterCall { - c.Call = c.Call.Return(arg0) - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetValidatorIsCurrentMatchingHeadAttesterCall) Do(f func(int, bool) error) *MockBeaconStateMutatorSetValidatorIsCurrentMatchingHeadAttesterCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetValidatorIsCurrentMatchingHeadAttesterCall) DoAndReturn(f func(int, bool) error) *MockBeaconStateMutatorSetValidatorIsCurrentMatchingHeadAttesterCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetValidatorIsCurrentMatchingSourceAttester mocks base method. -func (m *MockBeaconStateMutator) SetValidatorIsCurrentMatchingSourceAttester(arg0 int, arg1 bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetValidatorIsCurrentMatchingSourceAttester", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetValidatorIsCurrentMatchingSourceAttester indicates an expected call of SetValidatorIsCurrentMatchingSourceAttester. -func (mr *MockBeaconStateMutatorMockRecorder) SetValidatorIsCurrentMatchingSourceAttester(arg0, arg1 any) *MockBeaconStateMutatorSetValidatorIsCurrentMatchingSourceAttesterCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetValidatorIsCurrentMatchingSourceAttester", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetValidatorIsCurrentMatchingSourceAttester), arg0, arg1) - return &MockBeaconStateMutatorSetValidatorIsCurrentMatchingSourceAttesterCall{Call: call} -} - -// MockBeaconStateMutatorSetValidatorIsCurrentMatchingSourceAttesterCall wrap *gomock.Call -type MockBeaconStateMutatorSetValidatorIsCurrentMatchingSourceAttesterCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetValidatorIsCurrentMatchingSourceAttesterCall) Return(arg0 error) *MockBeaconStateMutatorSetValidatorIsCurrentMatchingSourceAttesterCall { - c.Call = c.Call.Return(arg0) - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetValidatorIsCurrentMatchingSourceAttesterCall) Do(f func(int, bool) error) *MockBeaconStateMutatorSetValidatorIsCurrentMatchingSourceAttesterCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetValidatorIsCurrentMatchingSourceAttesterCall) DoAndReturn(f func(int, bool) error) *MockBeaconStateMutatorSetValidatorIsCurrentMatchingSourceAttesterCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetValidatorIsCurrentMatchingTargetAttester mocks base method. -func (m *MockBeaconStateMutator) SetValidatorIsCurrentMatchingTargetAttester(arg0 int, arg1 bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetValidatorIsCurrentMatchingTargetAttester", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetValidatorIsCurrentMatchingTargetAttester indicates an expected call of SetValidatorIsCurrentMatchingTargetAttester. -func (mr *MockBeaconStateMutatorMockRecorder) SetValidatorIsCurrentMatchingTargetAttester(arg0, arg1 any) *MockBeaconStateMutatorSetValidatorIsCurrentMatchingTargetAttesterCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetValidatorIsCurrentMatchingTargetAttester", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetValidatorIsCurrentMatchingTargetAttester), arg0, arg1) - return &MockBeaconStateMutatorSetValidatorIsCurrentMatchingTargetAttesterCall{Call: call} -} - -// MockBeaconStateMutatorSetValidatorIsCurrentMatchingTargetAttesterCall wrap *gomock.Call -type MockBeaconStateMutatorSetValidatorIsCurrentMatchingTargetAttesterCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetValidatorIsCurrentMatchingTargetAttesterCall) Return(arg0 error) *MockBeaconStateMutatorSetValidatorIsCurrentMatchingTargetAttesterCall { - c.Call = c.Call.Return(arg0) - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetValidatorIsCurrentMatchingTargetAttesterCall) Do(f func(int, bool) error) *MockBeaconStateMutatorSetValidatorIsCurrentMatchingTargetAttesterCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetValidatorIsCurrentMatchingTargetAttesterCall) DoAndReturn(f func(int, bool) error) *MockBeaconStateMutatorSetValidatorIsCurrentMatchingTargetAttesterCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetValidatorIsPreviousMatchingHeadAttester mocks base method. -func (m *MockBeaconStateMutator) SetValidatorIsPreviousMatchingHeadAttester(arg0 int, arg1 bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetValidatorIsPreviousMatchingHeadAttester", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetValidatorIsPreviousMatchingHeadAttester indicates an expected call of SetValidatorIsPreviousMatchingHeadAttester. -func (mr *MockBeaconStateMutatorMockRecorder) SetValidatorIsPreviousMatchingHeadAttester(arg0, arg1 any) *MockBeaconStateMutatorSetValidatorIsPreviousMatchingHeadAttesterCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetValidatorIsPreviousMatchingHeadAttester", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetValidatorIsPreviousMatchingHeadAttester), arg0, arg1) - return &MockBeaconStateMutatorSetValidatorIsPreviousMatchingHeadAttesterCall{Call: call} -} - -// MockBeaconStateMutatorSetValidatorIsPreviousMatchingHeadAttesterCall wrap *gomock.Call -type MockBeaconStateMutatorSetValidatorIsPreviousMatchingHeadAttesterCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetValidatorIsPreviousMatchingHeadAttesterCall) Return(arg0 error) *MockBeaconStateMutatorSetValidatorIsPreviousMatchingHeadAttesterCall { - c.Call = c.Call.Return(arg0) - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetValidatorIsPreviousMatchingHeadAttesterCall) Do(f func(int, bool) error) *MockBeaconStateMutatorSetValidatorIsPreviousMatchingHeadAttesterCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetValidatorIsPreviousMatchingHeadAttesterCall) DoAndReturn(f func(int, bool) error) *MockBeaconStateMutatorSetValidatorIsPreviousMatchingHeadAttesterCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetValidatorIsPreviousMatchingSourceAttester mocks base method. -func (m *MockBeaconStateMutator) SetValidatorIsPreviousMatchingSourceAttester(arg0 int, arg1 bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetValidatorIsPreviousMatchingSourceAttester", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetValidatorIsPreviousMatchingSourceAttester indicates an expected call of SetValidatorIsPreviousMatchingSourceAttester. -func (mr *MockBeaconStateMutatorMockRecorder) SetValidatorIsPreviousMatchingSourceAttester(arg0, arg1 any) *MockBeaconStateMutatorSetValidatorIsPreviousMatchingSourceAttesterCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetValidatorIsPreviousMatchingSourceAttester", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetValidatorIsPreviousMatchingSourceAttester), arg0, arg1) - return &MockBeaconStateMutatorSetValidatorIsPreviousMatchingSourceAttesterCall{Call: call} -} - -// MockBeaconStateMutatorSetValidatorIsPreviousMatchingSourceAttesterCall wrap *gomock.Call -type MockBeaconStateMutatorSetValidatorIsPreviousMatchingSourceAttesterCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetValidatorIsPreviousMatchingSourceAttesterCall) Return(arg0 error) *MockBeaconStateMutatorSetValidatorIsPreviousMatchingSourceAttesterCall { - c.Call = c.Call.Return(arg0) - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetValidatorIsPreviousMatchingSourceAttesterCall) Do(f func(int, bool) error) *MockBeaconStateMutatorSetValidatorIsPreviousMatchingSourceAttesterCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetValidatorIsPreviousMatchingSourceAttesterCall) DoAndReturn(f func(int, bool) error) *MockBeaconStateMutatorSetValidatorIsPreviousMatchingSourceAttesterCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetValidatorIsPreviousMatchingTargetAttester mocks base method. -func (m *MockBeaconStateMutator) SetValidatorIsPreviousMatchingTargetAttester(arg0 int, arg1 bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetValidatorIsPreviousMatchingTargetAttester", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetValidatorIsPreviousMatchingTargetAttester indicates an expected call of SetValidatorIsPreviousMatchingTargetAttester. -func (mr *MockBeaconStateMutatorMockRecorder) SetValidatorIsPreviousMatchingTargetAttester(arg0, arg1 any) *MockBeaconStateMutatorSetValidatorIsPreviousMatchingTargetAttesterCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetValidatorIsPreviousMatchingTargetAttester", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetValidatorIsPreviousMatchingTargetAttester), arg0, arg1) - return &MockBeaconStateMutatorSetValidatorIsPreviousMatchingTargetAttesterCall{Call: call} -} - -// MockBeaconStateMutatorSetValidatorIsPreviousMatchingTargetAttesterCall wrap *gomock.Call -type MockBeaconStateMutatorSetValidatorIsPreviousMatchingTargetAttesterCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetValidatorIsPreviousMatchingTargetAttesterCall) Return(arg0 error) *MockBeaconStateMutatorSetValidatorIsPreviousMatchingTargetAttesterCall { - c.Call = c.Call.Return(arg0) - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetValidatorIsPreviousMatchingTargetAttesterCall) Do(f func(int, bool) error) *MockBeaconStateMutatorSetValidatorIsPreviousMatchingTargetAttesterCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetValidatorIsPreviousMatchingTargetAttesterCall) DoAndReturn(f func(int, bool) error) *MockBeaconStateMutatorSetValidatorIsPreviousMatchingTargetAttesterCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetValidatorMinCurrentInclusionDelayAttestation mocks base method. -func (m *MockBeaconStateMutator) SetValidatorMinCurrentInclusionDelayAttestation(arg0 int, arg1 *solid.PendingAttestation) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetValidatorMinCurrentInclusionDelayAttestation", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetValidatorMinCurrentInclusionDelayAttestation indicates an expected call of SetValidatorMinCurrentInclusionDelayAttestation. -func (mr *MockBeaconStateMutatorMockRecorder) SetValidatorMinCurrentInclusionDelayAttestation(arg0, arg1 any) *MockBeaconStateMutatorSetValidatorMinCurrentInclusionDelayAttestationCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetValidatorMinCurrentInclusionDelayAttestation", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetValidatorMinCurrentInclusionDelayAttestation), arg0, arg1) - return &MockBeaconStateMutatorSetValidatorMinCurrentInclusionDelayAttestationCall{Call: call} -} - -// MockBeaconStateMutatorSetValidatorMinCurrentInclusionDelayAttestationCall wrap *gomock.Call -type MockBeaconStateMutatorSetValidatorMinCurrentInclusionDelayAttestationCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetValidatorMinCurrentInclusionDelayAttestationCall) Return(arg0 error) *MockBeaconStateMutatorSetValidatorMinCurrentInclusionDelayAttestationCall { - c.Call = c.Call.Return(arg0) - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetValidatorMinCurrentInclusionDelayAttestationCall) Do(f func(int, *solid.PendingAttestation) error) *MockBeaconStateMutatorSetValidatorMinCurrentInclusionDelayAttestationCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetValidatorMinCurrentInclusionDelayAttestationCall) DoAndReturn(f func(int, *solid.PendingAttestation) error) *MockBeaconStateMutatorSetValidatorMinCurrentInclusionDelayAttestationCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetValidatorMinPreviousInclusionDelayAttestation mocks base method. -func (m *MockBeaconStateMutator) SetValidatorMinPreviousInclusionDelayAttestation(arg0 int, arg1 *solid.PendingAttestation) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetValidatorMinPreviousInclusionDelayAttestation", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetValidatorMinPreviousInclusionDelayAttestation indicates an expected call of SetValidatorMinPreviousInclusionDelayAttestation. -func (mr *MockBeaconStateMutatorMockRecorder) SetValidatorMinPreviousInclusionDelayAttestation(arg0, arg1 any) *MockBeaconStateMutatorSetValidatorMinPreviousInclusionDelayAttestationCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetValidatorMinPreviousInclusionDelayAttestation", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetValidatorMinPreviousInclusionDelayAttestation), arg0, arg1) - return &MockBeaconStateMutatorSetValidatorMinPreviousInclusionDelayAttestationCall{Call: call} -} - -// MockBeaconStateMutatorSetValidatorMinPreviousInclusionDelayAttestationCall wrap *gomock.Call -type MockBeaconStateMutatorSetValidatorMinPreviousInclusionDelayAttestationCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetValidatorMinPreviousInclusionDelayAttestationCall) Return(arg0 error) *MockBeaconStateMutatorSetValidatorMinPreviousInclusionDelayAttestationCall { - c.Call = c.Call.Return(arg0) - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetValidatorMinPreviousInclusionDelayAttestationCall) Do(f func(int, *solid.PendingAttestation) error) *MockBeaconStateMutatorSetValidatorMinPreviousInclusionDelayAttestationCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetValidatorMinPreviousInclusionDelayAttestationCall) DoAndReturn(f func(int, *solid.PendingAttestation) error) *MockBeaconStateMutatorSetValidatorMinPreviousInclusionDelayAttestationCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetValidatorSlashed mocks base method. -func (m *MockBeaconStateMutator) SetValidatorSlashed(arg0 int, arg1 bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetValidatorSlashed", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetValidatorSlashed indicates an expected call of SetValidatorSlashed. -func (mr *MockBeaconStateMutatorMockRecorder) SetValidatorSlashed(arg0, arg1 any) *MockBeaconStateMutatorSetValidatorSlashedCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetValidatorSlashed", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetValidatorSlashed), arg0, arg1) - return &MockBeaconStateMutatorSetValidatorSlashedCall{Call: call} -} - -// MockBeaconStateMutatorSetValidatorSlashedCall wrap *gomock.Call -type MockBeaconStateMutatorSetValidatorSlashedCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetValidatorSlashedCall) Return(arg0 error) *MockBeaconStateMutatorSetValidatorSlashedCall { - c.Call = c.Call.Return(arg0) - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetValidatorSlashedCall) Do(f func(int, bool) error) *MockBeaconStateMutatorSetValidatorSlashedCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetValidatorSlashedCall) DoAndReturn(f func(int, bool) error) *MockBeaconStateMutatorSetValidatorSlashedCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetVersion mocks base method. -func (m *MockBeaconStateMutator) SetVersion(arg0 clparams.StateVersion) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetVersion", arg0) -} - -// SetVersion indicates an expected call of SetVersion. -func (mr *MockBeaconStateMutatorMockRecorder) SetVersion(arg0 any) *MockBeaconStateMutatorSetVersionCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetVersion", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetVersion), arg0) - return &MockBeaconStateMutatorSetVersionCall{Call: call} -} - -// MockBeaconStateMutatorSetVersionCall wrap *gomock.Call -type MockBeaconStateMutatorSetVersionCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetVersionCall) Return() *MockBeaconStateMutatorSetVersionCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetVersionCall) Do(f func(clparams.StateVersion)) *MockBeaconStateMutatorSetVersionCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetVersionCall) DoAndReturn(f func(clparams.StateVersion)) *MockBeaconStateMutatorSetVersionCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetWithdrawableEpochForValidatorAtIndex mocks base method. -func (m *MockBeaconStateMutator) SetWithdrawableEpochForValidatorAtIndex(arg0 int, arg1 uint64) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetWithdrawableEpochForValidatorAtIndex", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetWithdrawableEpochForValidatorAtIndex indicates an expected call of SetWithdrawableEpochForValidatorAtIndex. -func (mr *MockBeaconStateMutatorMockRecorder) SetWithdrawableEpochForValidatorAtIndex(arg0, arg1 any) *MockBeaconStateMutatorSetWithdrawableEpochForValidatorAtIndexCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWithdrawableEpochForValidatorAtIndex", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetWithdrawableEpochForValidatorAtIndex), arg0, arg1) - return &MockBeaconStateMutatorSetWithdrawableEpochForValidatorAtIndexCall{Call: call} -} - -// MockBeaconStateMutatorSetWithdrawableEpochForValidatorAtIndexCall wrap *gomock.Call -type MockBeaconStateMutatorSetWithdrawableEpochForValidatorAtIndexCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetWithdrawableEpochForValidatorAtIndexCall) Return(arg0 error) *MockBeaconStateMutatorSetWithdrawableEpochForValidatorAtIndexCall { - c.Call = c.Call.Return(arg0) - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetWithdrawableEpochForValidatorAtIndexCall) Do(f func(int, uint64) error) *MockBeaconStateMutatorSetWithdrawableEpochForValidatorAtIndexCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetWithdrawableEpochForValidatorAtIndexCall) DoAndReturn(f func(int, uint64) error) *MockBeaconStateMutatorSetWithdrawableEpochForValidatorAtIndexCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -// SetWithdrawalCredentialForValidatorAtIndex mocks base method. -func (m *MockBeaconStateMutator) SetWithdrawalCredentialForValidatorAtIndex(arg0 int, arg1 common.Hash) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetWithdrawalCredentialForValidatorAtIndex", arg0, arg1) -} - -// SetWithdrawalCredentialForValidatorAtIndex indicates an expected call of SetWithdrawalCredentialForValidatorAtIndex. -func (mr *MockBeaconStateMutatorMockRecorder) SetWithdrawalCredentialForValidatorAtIndex(arg0, arg1 any) *MockBeaconStateMutatorSetWithdrawalCredentialForValidatorAtIndexCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWithdrawalCredentialForValidatorAtIndex", reflect.TypeOf((*MockBeaconStateMutator)(nil).SetWithdrawalCredentialForValidatorAtIndex), arg0, arg1) - return &MockBeaconStateMutatorSetWithdrawalCredentialForValidatorAtIndexCall{Call: call} -} - -// MockBeaconStateMutatorSetWithdrawalCredentialForValidatorAtIndexCall wrap *gomock.Call -type MockBeaconStateMutatorSetWithdrawalCredentialForValidatorAtIndexCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateMutatorSetWithdrawalCredentialForValidatorAtIndexCall) Return() *MockBeaconStateMutatorSetWithdrawalCredentialForValidatorAtIndexCall { - c.Call = c.Call.Return() - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateMutatorSetWithdrawalCredentialForValidatorAtIndexCall) Do(f func(int, common.Hash)) *MockBeaconStateMutatorSetWithdrawalCredentialForValidatorAtIndexCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateMutatorSetWithdrawalCredentialForValidatorAtIndexCall) DoAndReturn(f func(int, common.Hash)) *MockBeaconStateMutatorSetWithdrawalCredentialForValidatorAtIndexCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - -func (c *MockBeaconStateMutator) SetPreviousEpochAttestations(attestations *solid.ListSSZ[*solid.PendingAttestation]) { - -} diff --git a/cl/beacon/synced_data/interface.go b/cl/beacon/synced_data/interface.go index c32b4ec2c78..e7566b8eea8 100644 --- a/cl/beacon/synced_data/interface.go +++ b/cl/beacon/synced_data/interface.go @@ -1,16 +1,12 @@ package synced_data -import ( - "github.com/ledgerwatch/erigon/cl/abstract" - "github.com/ledgerwatch/erigon/cl/phase1/core/state" -) +import "github.com/ledgerwatch/erigon/cl/phase1/core/state" //go:generate mockgen -typed=true -destination=./mock_services/synced_data_mock.go -package=mock_services . SyncedData type SyncedData interface { - OnHeadState(newState *state.CachingBeaconState) error + OnHeadState(newState *state.CachingBeaconState) (err error) HeadState() *state.CachingBeaconState - HeadStateReader() abstract.BeaconStateReader - HeadStateMutator() abstract.BeaconStateMutator + HeadStateReader() state.BeaconStateReader Syncing() bool HeadSlot() uint64 } diff --git a/cl/beacon/synced_data/mock_services/synced_data_mock.go b/cl/beacon/synced_data/mock_services/synced_data_mock.go index 819fb61e280..a5111b2e485 100644 --- a/cl/beacon/synced_data/mock_services/synced_data_mock.go +++ b/cl/beacon/synced_data/mock_services/synced_data_mock.go @@ -12,7 +12,6 @@ package mock_services import ( reflect "reflect" - abstract "github.com/ledgerwatch/erigon/cl/abstract" state "github.com/ledgerwatch/erigon/cl/phase1/core/state" gomock "go.uber.org/mock/gomock" ) @@ -116,49 +115,11 @@ func (c *MockSyncedDataHeadStateCall) DoAndReturn(f func() *state.CachingBeaconS return c } -// HeadStateMutator mocks base method. -func (m *MockSyncedData) HeadStateMutator() abstract.BeaconStateMutator { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HeadStateMutator") - ret0, _ := ret[0].(abstract.BeaconStateMutator) - return ret0 -} - -// HeadStateMutator indicates an expected call of HeadStateMutator. -func (mr *MockSyncedDataMockRecorder) HeadStateMutator() *MockSyncedDataHeadStateMutatorCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadStateMutator", reflect.TypeOf((*MockSyncedData)(nil).HeadStateMutator)) - return &MockSyncedDataHeadStateMutatorCall{Call: call} -} - -// MockSyncedDataHeadStateMutatorCall wrap *gomock.Call -type MockSyncedDataHeadStateMutatorCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockSyncedDataHeadStateMutatorCall) Return(arg0 abstract.BeaconStateMutator) *MockSyncedDataHeadStateMutatorCall { - c.Call = c.Call.Return(arg0) - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockSyncedDataHeadStateMutatorCall) Do(f func() abstract.BeaconStateMutator) *MockSyncedDataHeadStateMutatorCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockSyncedDataHeadStateMutatorCall) DoAndReturn(f func() abstract.BeaconStateMutator) *MockSyncedDataHeadStateMutatorCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - // HeadStateReader mocks base method. -func (m *MockSyncedData) HeadStateReader() abstract.BeaconStateReader { +func (m *MockSyncedData) HeadStateReader() state.BeaconStateReader { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "HeadStateReader") - ret0, _ := ret[0].(abstract.BeaconStateReader) + ret0, _ := ret[0].(state.BeaconStateReader) return ret0 } @@ -175,19 +136,19 @@ type MockSyncedDataHeadStateReaderCall struct { } // Return rewrite *gomock.Call.Return -func (c *MockSyncedDataHeadStateReaderCall) Return(arg0 abstract.BeaconStateReader) *MockSyncedDataHeadStateReaderCall { +func (c *MockSyncedDataHeadStateReaderCall) Return(arg0 state.BeaconStateReader) *MockSyncedDataHeadStateReaderCall { c.Call = c.Call.Return(arg0) return c } // Do rewrite *gomock.Call.Do -func (c *MockSyncedDataHeadStateReaderCall) Do(f func() abstract.BeaconStateReader) *MockSyncedDataHeadStateReaderCall { +func (c *MockSyncedDataHeadStateReaderCall) Do(f func() state.BeaconStateReader) *MockSyncedDataHeadStateReaderCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockSyncedDataHeadStateReaderCall) DoAndReturn(f func() abstract.BeaconStateReader) *MockSyncedDataHeadStateReaderCall { +func (c *MockSyncedDataHeadStateReaderCall) DoAndReturn(f func() state.BeaconStateReader) *MockSyncedDataHeadStateReaderCall { c.Call = c.Call.DoAndReturn(f) return c } diff --git a/cl/beacon/synced_data/synced_data.go b/cl/beacon/synced_data/synced_data.go index 58bfa673f5d..9248142d902 100644 --- a/cl/beacon/synced_data/synced_data.go +++ b/cl/beacon/synced_data/synced_data.go @@ -3,7 +3,6 @@ package synced_data import ( "sync/atomic" - "github.com/ledgerwatch/erigon/cl/abstract" "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/cl/phase1/core/state" ) @@ -44,15 +43,7 @@ func (s *SyncedDataManager) HeadState() *state.CachingBeaconState { return nil } -func (s *SyncedDataManager) HeadStateReader() abstract.BeaconStateReader { - headstate := s.HeadState() - if headstate == nil { - return nil - } - return headstate -} - -func (s *SyncedDataManager) HeadStateMutator() abstract.BeaconStateMutator { +func (s *SyncedDataManager) HeadStateReader() state.BeaconStateReader { headstate := s.HeadState() if headstate == nil { return nil diff --git a/cl/clparams/version.go b/cl/clparams/version.go index 7ba9f962b9a..c181337e337 100644 --- a/cl/clparams/version.go +++ b/cl/clparams/version.go @@ -10,7 +10,6 @@ const ( BellatrixVersion StateVersion = 2 CapellaVersion StateVersion = 3 DenebVersion StateVersion = 4 - ElectraVersion StateVersion = 5 ) // stringToClVersion converts the string to the current state version. @@ -26,8 +25,6 @@ func StringToClVersion(s string) (StateVersion, error) { return CapellaVersion, nil case "deneb": return DenebVersion, nil - case "electra": - return ElectraVersion, nil default: return 0, fmt.Errorf("unsupported fork version %s", s) } @@ -45,8 +42,6 @@ func ClVersionToString(s StateVersion) string { return "capella" case DenebVersion: return "deneb" - case ElectraVersion: - return "electra" default: panic("unsupported fork version") } diff --git a/cl/cltypes/beacon_block_test.go b/cl/cltypes/beacon_block_test.go index 73e4c111ec1..ae9c18347c1 100644 --- a/cl/cltypes/beacon_block_test.go +++ b/cl/cltypes/beacon_block_test.go @@ -39,7 +39,7 @@ func TestBeaconBody(t *testing.T) { BaseFee: big.NewInt(1), }, []types.Transaction{types.NewTransaction(1, [20]byte{}, uint256.NewInt(1), 5, uint256.NewInt(2), nil)}, nil, nil, types.Withdrawals{&types.Withdrawal{ Index: 69, - }}, nil /*requests*/) + }}) // Test BeaconBody body := &BeaconBody{ diff --git a/cl/phase1/core/state/interface.go b/cl/phase1/core/state/interface.go new file mode 100644 index 00000000000..eb1e14a3973 --- /dev/null +++ b/cl/phase1/core/state/interface.go @@ -0,0 +1,12 @@ +package state + +import libcommon "github.com/ledgerwatch/erigon-lib/common" + +// BeaconStateReader is an interface for reading the beacon state. +// +//go:generate mockgen -typed=true -destination=./mock_services/beacon_state_reader_mock.go -package=mock_services . BeaconStateReader +type BeaconStateReader interface { + ValidatorPublicKey(index int) (libcommon.Bytes48, error) + GetDomain(domainType [4]byte, epoch uint64) ([]byte, error) + CommitteeCount(epoch uint64) uint64 +} diff --git a/cl/abstract/mock_services/beacon_state_reader_mock.go b/cl/phase1/core/state/mock_services/beacon_state_reader_mock.go similarity index 54% rename from cl/abstract/mock_services/beacon_state_reader_mock.go rename to cl/phase1/core/state/mock_services/beacon_state_reader_mock.go index 3f92cdb0131..94875a81103 100644 --- a/cl/abstract/mock_services/beacon_state_reader_mock.go +++ b/cl/phase1/core/state/mock_services/beacon_state_reader_mock.go @@ -1,5 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/ledgerwatch/erigon/cl/abstract (interfaces: BeaconStateReader) +// Source: github.com/ledgerwatch/erigon/cl/phase1/core/state (interfaces: BeaconStateReader) // // Generated by this command: // @@ -13,8 +13,6 @@ import ( reflect "reflect" common "github.com/ledgerwatch/erigon-lib/common" - clparams "github.com/ledgerwatch/erigon/cl/clparams" - solid "github.com/ledgerwatch/erigon/cl/cltypes/solid" gomock "go.uber.org/mock/gomock" ) @@ -79,44 +77,6 @@ func (c *MockBeaconStateReaderCommitteeCountCall) DoAndReturn(f func(uint64) uin return c } -// GenesisValidatorsRoot mocks base method. -func (m *MockBeaconStateReader) GenesisValidatorsRoot() common.Hash { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GenesisValidatorsRoot") - ret0, _ := ret[0].(common.Hash) - return ret0 -} - -// GenesisValidatorsRoot indicates an expected call of GenesisValidatorsRoot. -func (mr *MockBeaconStateReaderMockRecorder) GenesisValidatorsRoot() *MockBeaconStateReaderGenesisValidatorsRootCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenesisValidatorsRoot", reflect.TypeOf((*MockBeaconStateReader)(nil).GenesisValidatorsRoot)) - return &MockBeaconStateReaderGenesisValidatorsRootCall{Call: call} -} - -// MockBeaconStateReaderGenesisValidatorsRootCall wrap *gomock.Call -type MockBeaconStateReaderGenesisValidatorsRootCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateReaderGenesisValidatorsRootCall) Return(arg0 common.Hash) *MockBeaconStateReaderGenesisValidatorsRootCall { - c.Call = c.Call.Return(arg0) - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateReaderGenesisValidatorsRootCall) Do(f func() common.Hash) *MockBeaconStateReaderGenesisValidatorsRootCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateReaderGenesisValidatorsRootCall) DoAndReturn(f func() common.Hash) *MockBeaconStateReaderGenesisValidatorsRootCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - // GetDomain mocks base method. func (m *MockBeaconStateReader) GetDomain(arg0 [4]byte, arg1 uint64) ([]byte, error) { m.ctrl.T.Helper() @@ -156,45 +116,6 @@ func (c *MockBeaconStateReaderGetDomainCall) DoAndReturn(f func([4]byte, uint64) return c } -// ValidatorForValidatorIndex mocks base method. -func (m *MockBeaconStateReader) ValidatorForValidatorIndex(arg0 int) (solid.Validator, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ValidatorForValidatorIndex", arg0) - ret0, _ := ret[0].(solid.Validator) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ValidatorForValidatorIndex indicates an expected call of ValidatorForValidatorIndex. -func (mr *MockBeaconStateReaderMockRecorder) ValidatorForValidatorIndex(arg0 any) *MockBeaconStateReaderValidatorForValidatorIndexCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidatorForValidatorIndex", reflect.TypeOf((*MockBeaconStateReader)(nil).ValidatorForValidatorIndex), arg0) - return &MockBeaconStateReaderValidatorForValidatorIndexCall{Call: call} -} - -// MockBeaconStateReaderValidatorForValidatorIndexCall wrap *gomock.Call -type MockBeaconStateReaderValidatorForValidatorIndexCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateReaderValidatorForValidatorIndexCall) Return(arg0 solid.Validator, arg1 error) *MockBeaconStateReaderValidatorForValidatorIndexCall { - c.Call = c.Call.Return(arg0, arg1) - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateReaderValidatorForValidatorIndexCall) Do(f func(int) (solid.Validator, error)) *MockBeaconStateReaderValidatorForValidatorIndexCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateReaderValidatorForValidatorIndexCall) DoAndReturn(f func(int) (solid.Validator, error)) *MockBeaconStateReaderValidatorForValidatorIndexCall { - c.Call = c.Call.DoAndReturn(f) - return c -} - // ValidatorPublicKey mocks base method. func (m *MockBeaconStateReader) ValidatorPublicKey(arg0 int) (common.Bytes48, error) { m.ctrl.T.Helper() @@ -233,41 +154,3 @@ func (c *MockBeaconStateReaderValidatorPublicKeyCall) DoAndReturn(f func(int) (c c.Call = c.Call.DoAndReturn(f) return c } - -// Version mocks base method. -func (m *MockBeaconStateReader) Version() clparams.StateVersion { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Version") - ret0, _ := ret[0].(clparams.StateVersion) - return ret0 -} - -// Version indicates an expected call of Version. -func (mr *MockBeaconStateReaderMockRecorder) Version() *MockBeaconStateReaderVersionCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockBeaconStateReader)(nil).Version)) - return &MockBeaconStateReaderVersionCall{Call: call} -} - -// MockBeaconStateReaderVersionCall wrap *gomock.Call -type MockBeaconStateReaderVersionCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockBeaconStateReaderVersionCall) Return(arg0 clparams.StateVersion) *MockBeaconStateReaderVersionCall { - c.Call = c.Call.Return(arg0) - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockBeaconStateReaderVersionCall) Do(f func() clparams.StateVersion) *MockBeaconStateReaderVersionCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockBeaconStateReaderVersionCall) DoAndReturn(f func() clparams.StateVersion) *MockBeaconStateReaderVersionCall { - c.Call = c.Call.DoAndReturn(f) - return c -} diff --git a/cl/phase1/execution_client/block_collector/block_collector.go b/cl/phase1/execution_client/block_collector/block_collector.go index 79eea139781..933dbba2118 100644 --- a/cl/phase1/execution_client/block_collector/block_collector.go +++ b/cl/phase1/execution_client/block_collector/block_collector.go @@ -109,9 +109,8 @@ func (b *blockCollector) Flush(ctx context.Context) error { b.logger.Warn("bad blocks segment received", "err", err) return err } - blocksBatch = append(blocksBatch, types.NewBlockFromStorage(executionPayload.BlockHash, header, txs, nil, body.Withdrawals, body.Requests)) + blocksBatch = append(blocksBatch, types.NewBlockFromStorage(executionPayload.BlockHash, header, txs, nil, body.Withdrawals)) if len(blocksBatch) >= batchSize { - b.logger.Info("[Caplin] Inserting blocks", "from", blocksBatch[0].NumberU64(), "to", blocksBatch[len(blocksBatch)-1].NumberU64()) if err := b.engine.InsertBlocks(ctx, blocksBatch, true); err != nil { b.logger.Warn("failed to insert blocks", "err", err) } diff --git a/cl/phase1/execution_client/execution_client_direct.go b/cl/phase1/execution_client/execution_client_direct.go index 5ff16bb0df7..6679e38dfb1 100644 --- a/cl/phase1/execution_client/execution_client_direct.go +++ b/cl/phase1/execution_client/execution_client_direct.go @@ -40,7 +40,7 @@ func (cc *ExecutionClientDirect) NewPayload(ctx context.Context, payload *cltype return true, err } - if err := cc.chainRW.InsertBlockAndWait(ctx, types.NewBlockFromStorage(payload.BlockHash, header, txs, nil, body.Withdrawals, body.Requests)); err != nil { + if err := cc.chainRW.InsertBlockAndWait(ctx, types.NewBlockFromStorage(payload.BlockHash, header, txs, nil, body.Withdrawals)); err != nil { return false, err } diff --git a/cl/phase1/network/services/attestation_service_test.go b/cl/phase1/network/services/attestation_service_test.go index 1e2cb816d15..18428bf79ec 100644 --- a/cl/phase1/network/services/attestation_service_test.go +++ b/cl/phase1/network/services/attestation_service_test.go @@ -10,12 +10,12 @@ import ( "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/types/ssz" - "github.com/ledgerwatch/erigon/cl/abstract" - mockState "github.com/ledgerwatch/erigon/cl/abstract/mock_services" mockSync "github.com/ledgerwatch/erigon/cl/beacon/synced_data/mock_services" "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/cl/cltypes" "github.com/ledgerwatch/erigon/cl/cltypes/solid" + "github.com/ledgerwatch/erigon/cl/phase1/core/state" + mockState "github.com/ledgerwatch/erigon/cl/phase1/core/state/mock_services" "github.com/ledgerwatch/erigon/cl/phase1/forkchoice/mock_services" "github.com/ledgerwatch/erigon/cl/utils/eth_clock" mockCommittee "github.com/ledgerwatch/erigon/cl/validator/committee_subscription/mock_services" @@ -84,7 +84,7 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { name: "Test attestation with committee index out of range", mock: func() { t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader).Times(1) - computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { + computeCommitteeCountPerSlot = func(_ state.BeaconStateReader, _, _ uint64) uint64 { return 1 } }, @@ -99,7 +99,7 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { name: "Test attestation with wrong subnet", mock: func() { t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader).Times(1) - computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { + computeCommitteeCountPerSlot = func(_ state.BeaconStateReader, _, _ uint64) uint64 { return 5 } computeSubnetForAttestation = func(_, _, _, _, _ uint64) uint64 { @@ -117,7 +117,7 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { name: "Test attestation with wrong slot (current_slot < slot)", mock: func() { t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader).Times(1) - computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { + computeCommitteeCountPerSlot = func(_ state.BeaconStateReader, _, _ uint64) uint64 { return 5 } computeSubnetForAttestation = func(_, _, _, _, _ uint64) uint64 { @@ -136,7 +136,7 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { name: "Attestation is aggregated", mock: func() { t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader).Times(1) - computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { + computeCommitteeCountPerSlot = func(_ state.BeaconStateReader, _, _ uint64) uint64 { return 5 } computeSubnetForAttestation = func(_, _, _, _, _ uint64) uint64 { @@ -159,7 +159,7 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { name: "Attestation is empty", mock: func() { t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader).Times(1) - computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { + computeCommitteeCountPerSlot = func(_ state.BeaconStateReader, _, _ uint64) uint64 { return 5 } computeSubnetForAttestation = func(_, _, _, _, _ uint64) uint64 { @@ -182,7 +182,7 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { name: "invalid signature", mock: func() { t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader).Times(1) - computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { + computeCommitteeCountPerSlot = func(_ state.BeaconStateReader, _, _ uint64) uint64 { return 5 } computeSubnetForAttestation = func(_, _, _, _, _ uint64) uint64 { @@ -209,7 +209,7 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { name: "block header not found", mock: func() { t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader).Times(1) - computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { + computeCommitteeCountPerSlot = func(_ state.BeaconStateReader, _, _ uint64) uint64 { return 8 } computeSubnetForAttestation = func(_, _, _, _, _ uint64) uint64 { @@ -236,7 +236,7 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { name: "invalid target block", mock: func() { t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader).Times(1) - computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { + computeCommitteeCountPerSlot = func(_ state.BeaconStateReader, _, _ uint64) uint64 { return 8 } computeSubnetForAttestation = func(_, _, _, _, _ uint64) uint64 { @@ -266,7 +266,7 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { name: "invalid finality checkpoint", mock: func() { t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader).Times(1) - computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { + computeCommitteeCountPerSlot = func(_ state.BeaconStateReader, _, _ uint64) uint64 { return 8 } computeSubnetForAttestation = func(_, _, _, _, _ uint64) uint64 { @@ -304,7 +304,7 @@ func (t *attestationTestSuite) TestAttestationProcessMessage() { name: "success", mock: func() { t.syncedData.EXPECT().HeadStateReader().Return(t.beaconStateReader).Times(1) - computeCommitteeCountPerSlot = func(_ abstract.BeaconStateReader, _, _ uint64) uint64 { + computeCommitteeCountPerSlot = func(_ state.BeaconStateReader, _, _ uint64) uint64 { return 8 } computeSubnetForAttestation = func(_, _, _, _, _ uint64) uint64 { diff --git a/cl/phase1/network/services/blob_sidecar_service_test.go b/cl/phase1/network/services/blob_sidecar_service_test.go index ce70b897101..e2768e1ee96 100644 --- a/cl/phase1/network/services/blob_sidecar_service_test.go +++ b/cl/phase1/network/services/blob_sidecar_service_test.go @@ -65,24 +65,20 @@ func TestBlobServiceUnsynced(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - blobService, _, _, _ := setupBlobSidecarService(t, ctrl, true) + blobService, _, _, _ := setupBlobSidecarService(t, ctrl, false) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - require.Error(t, blobService.ProcessMessage(ctx, nil, &cltypes.BlobSidecar{})) + require.Error(t, blobService.ProcessMessage(context.Background(), nil, &cltypes.BlobSidecar{})) } func TestBlobServiceInvalidIndex(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - blobService, syncedData, _, _ := setupBlobSidecarService(t, ctrl, true) + blobService, syncedData, _, _ := setupBlobSidecarService(t, ctrl, false) stateObj, _, _ := getObjectsForBlobSidecarServiceTests(t) syncedData.OnHeadState(stateObj) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - require.Error(t, blobService.ProcessMessage(ctx, nil, &cltypes.BlobSidecar{ + require.Error(t, blobService.ProcessMessage(context.Background(), nil, &cltypes.BlobSidecar{ Index: 99999, })) } @@ -91,14 +87,12 @@ func TestBlobServiceInvalidSubnet(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - blobService, syncedData, _, _ := setupBlobSidecarService(t, ctrl, true) + blobService, syncedData, _, _ := setupBlobSidecarService(t, ctrl, false) stateObj, _, _ := getObjectsForBlobSidecarServiceTests(t) syncedData.OnHeadState(stateObj) sn := uint64(99999) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - require.Error(t, blobService.ProcessMessage(ctx, &sn, &cltypes.BlobSidecar{ + require.Error(t, blobService.ProcessMessage(context.Background(), &sn, &cltypes.BlobSidecar{ Index: 0, })) } @@ -115,9 +109,7 @@ func TestBlobServiceBadTimings(t *testing.T) { ethClock.EXPECT().GetCurrentSlot().Return(uint64(0)).AnyTimes() ethClock.EXPECT().IsSlotCurrentSlotWithMaximumClockDisparity(gomock.Any()).Return(false).AnyTimes() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - require.Error(t, blobService.ProcessMessage(ctx, &sn, blobSidecar)) + require.Error(t, blobService.ProcessMessage(context.Background(), &sn, blobSidecar)) } func TestBlobServiceAlreadyHave(t *testing.T) { @@ -136,9 +128,7 @@ func TestBlobServiceAlreadyHave(t *testing.T) { ethClock.EXPECT().GetCurrentSlot().Return(uint64(0)).AnyTimes() ethClock.EXPECT().IsSlotCurrentSlotWithMaximumClockDisparity(gomock.Any()).Return(true).AnyTimes() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - require.Error(t, blobService.ProcessMessage(ctx, &sn, blobSidecar)) + require.Error(t, blobService.ProcessMessage(context.Background(), &sn, blobSidecar)) } func TestBlobServiceDontHaveParentRoot(t *testing.T) { @@ -155,9 +145,7 @@ func TestBlobServiceDontHaveParentRoot(t *testing.T) { ethClock.EXPECT().GetCurrentSlot().Return(uint64(0)).AnyTimes() ethClock.EXPECT().IsSlotCurrentSlotWithMaximumClockDisparity(gomock.Any()).Return(true).AnyTimes() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - require.Error(t, blobService.ProcessMessage(ctx, &sn, blobSidecar)) + require.Error(t, blobService.ProcessMessage(context.Background(), &sn, blobSidecar)) } func TestBlobServiceInvalidSidecarSlot(t *testing.T) { @@ -174,9 +162,7 @@ func TestBlobServiceInvalidSidecarSlot(t *testing.T) { ethClock.EXPECT().GetCurrentSlot().Return(uint64(0)).AnyTimes() ethClock.EXPECT().IsSlotCurrentSlotWithMaximumClockDisparity(gomock.Any()).Return(true).AnyTimes() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - require.Error(t, blobService.ProcessMessage(ctx, &sn, blobSidecar)) + require.Error(t, blobService.ProcessMessage(context.Background(), &sn, blobSidecar)) } func TestBlobServiceSuccess(t *testing.T) { diff --git a/cl/phase1/network/services/bls_to_execution_change_service.go b/cl/phase1/network/services/bls_to_execution_change_service.go index 9591e6f271c..0f9ba191946 100644 --- a/cl/phase1/network/services/bls_to_execution_change_service.go +++ b/cl/phase1/network/services/bls_to_execution_change_service.go @@ -5,6 +5,7 @@ import ( "context" "fmt" + "github.com/Giulio2002/bls" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/cl/beacon/beaconevents" "github.com/ledgerwatch/erigon/cl/beacon/synced_data" @@ -18,14 +19,14 @@ import ( type blsToExecutionChangeService struct { operationsPool pool.OperationsPool emitters *beaconevents.Emitters - syncedDataManager synced_data.SyncedData + syncedDataManager *synced_data.SyncedDataManager beaconCfg *clparams.BeaconChainConfig } func NewBLSToExecutionChangeService( operationsPool pool.OperationsPool, emitters *beaconevents.Emitters, - syncedDataManager synced_data.SyncedData, + syncedDataManager *synced_data.SyncedDataManager, beaconCfg *clparams.BeaconChainConfig, ) BLSToExecutionChangeService { return &blsToExecutionChangeService{ @@ -45,24 +46,20 @@ func (s *blsToExecutionChangeService) ProcessMessage(ctx context.Context, subnet return ErrIgnore } change := msg.Message - stateReader := s.syncedDataManager.HeadStateReader() - if stateReader == nil { - return ErrIgnore - } - stateMutator := s.syncedDataManager.HeadStateMutator() - if stateMutator == nil { + state := s.syncedDataManager.HeadState() + if state == nil { return ErrIgnore } // [IGNORE] current_epoch >= CAPELLA_FORK_EPOCH, where current_epoch is defined by the current wall-clock time. - if !(stateReader.Version() >= clparams.CapellaVersion) { + if !(state.Version() >= clparams.CapellaVersion) { return ErrIgnore } // ref: https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#new-process_bls_to_execution_change // assert address_change.validator_index < len(state.validators) - validator, err := stateReader.ValidatorForValidatorIndex(int(change.ValidatorIndex)) + validator, err := state.ValidatorForValidatorIndex(int(change.ValidatorIndex)) if err != nil { - return fmt.Errorf("unable to retrieve validator: %v", err) + return fmt.Errorf("unable to retrieve state: %v", err) } wc := validator.WithdrawalCredentials() @@ -76,20 +73,20 @@ func (s *blsToExecutionChangeService) ProcessMessage(ctx context.Context, subnet // Check the validator's withdrawal credentials against the provided message. hashedFrom := utils.Sha256(change.From[:]) if !bytes.Equal(hashedFrom[1:], wc[1:]) { - return fmt.Errorf("invalid withdrawal credentials hash") + return fmt.Errorf("invalid withdrawal credentials") } // assert bls.Verify(address_change.from_bls_pubkey, signing_root, signed_address_change.signature) - genesisValidatorRoot := stateReader.GenesisValidatorsRoot() + genesisValidatorRoot := state.GenesisValidatorsRoot() domain, err := fork.ComputeDomain(s.beaconCfg.DomainBLSToExecutionChange[:], utils.Uint32ToBytes4(uint32(s.beaconCfg.GenesisForkVersion)), genesisValidatorRoot) if err != nil { return err } - signedRoot, err := computeSigningRoot(change, domain) + signedRoot, err := fork.ComputeSigningRoot(change, domain) if err != nil { return err } - valid, err := blsVerify(msg.Signature[:], signedRoot[:], change.From[:]) + valid, err := bls.Verify(msg.Signature[:], signedRoot[:], change.From[:]) if err != nil { return err } @@ -104,9 +101,9 @@ func (s *blsToExecutionChangeService) ProcessMessage(ctx context.Context, subnet // ) newWc := libcommon.Hash{} newWc[0] = byte(s.beaconCfg.ETH1AddressWithdrawalPrefixByte) - copy(newWc[1:], make([]byte, 11)) - copy(newWc[12:], change.To[:]) - stateMutator.SetWithdrawalCredentialForValidatorAtIndex(int(change.ValidatorIndex), newWc) + copy(wc[1:], make([]byte, 11)) + copy(wc[12:], change.To[:]) + state.SetWithdrawalCredentialForValidatorAtIndex(int(change.ValidatorIndex), newWc) s.operationsPool.BLSToExecutionChangesPool.Insert(msg.Signature, msg) return nil diff --git a/cl/phase1/network/services/bls_to_execution_change_service_test.go b/cl/phase1/network/services/bls_to_execution_change_service_test.go deleted file mode 100644 index df264c5c1f1..00000000000 --- a/cl/phase1/network/services/bls_to_execution_change_service_test.go +++ /dev/null @@ -1,208 +0,0 @@ -package services - -import ( - "context" - "errors" - "fmt" - "log" - "testing" - - "github.com/ledgerwatch/erigon-lib/common" - mockState "github.com/ledgerwatch/erigon/cl/abstract/mock_services" - "github.com/ledgerwatch/erigon/cl/beacon/beaconevents" - mockSync "github.com/ledgerwatch/erigon/cl/beacon/synced_data/mock_services" - "github.com/ledgerwatch/erigon/cl/clparams" - "github.com/ledgerwatch/erigon/cl/cltypes" - "github.com/ledgerwatch/erigon/cl/cltypes/solid" - "github.com/ledgerwatch/erigon/cl/pool" - "github.com/ledgerwatch/erigon/cl/utils" - "github.com/stretchr/testify/suite" - "go.uber.org/mock/gomock" -) - -type blsToExecutionChangeTestSuite struct { - suite.Suite - gomockCtrl *gomock.Controller - operationsPool *pool.OperationsPool - emitters *beaconevents.Emitters - syncedData *mockSync.MockSyncedData - beaconCfg *clparams.BeaconChainConfig - - service BLSToExecutionChangeService - mockFuncs *mockFuncs -} - -func (t *blsToExecutionChangeTestSuite) SetupTest() { - t.gomockCtrl = gomock.NewController(t.T()) - t.operationsPool = &pool.OperationsPool{ - BLSToExecutionChangesPool: pool.NewOperationPool[common.Bytes96, *cltypes.SignedBLSToExecutionChange](10, "blsToExecutionChangesPool"), - } - t.syncedData = mockSync.NewMockSyncedData(t.gomockCtrl) - t.emitters = beaconevents.NewEmitters() - t.beaconCfg = &clparams.BeaconChainConfig{} - t.service = NewBLSToExecutionChangeService(*t.operationsPool, t.emitters, t.syncedData, t.beaconCfg) - // mock global functions - t.mockFuncs = &mockFuncs{ - ctrl: t.gomockCtrl, - } - computeSigningRoot = t.mockFuncs.ComputeSigningRoot - blsVerify = t.mockFuncs.BlsVerify -} - -func (t *blsToExecutionChangeTestSuite) TearDownTest() { - t.gomockCtrl.Finish() -} - -func (t *blsToExecutionChangeTestSuite) TestProcessMessage() { - mockMsg := &cltypes.SignedBLSToExecutionChange{ - Message: &cltypes.BLSToExecutionChange{ - ValidatorIndex: 1, - From: common.Bytes48{1, 2, 3, 4, 5, 6}, - To: common.Address{3, 2, 1}, - }, - Signature: [96]byte{1, 2, 3}, - } - - tests := []struct { - name string - mock func() - msg *cltypes.SignedBLSToExecutionChange - wantErr bool - specificErr error - }{ - { - name: "signature already exists in pool", - mock: func() { - t.operationsPool.BLSToExecutionChangesPool.Insert(mockMsg.Signature, mockMsg) - }, - msg: mockMsg, - wantErr: true, - }, - { - name: "version is less than CapellaVersion", - mock: func() { - mockStateReader := mockState.NewMockBeaconStateReader(t.gomockCtrl) - mockStateMutator := mockState.NewMockBeaconStateMutator(t.gomockCtrl) - mockStateReader.EXPECT().Version().Return(clparams.CapellaVersion - 1).Times(1) - t.syncedData.EXPECT().HeadStateReader().Return(mockStateReader).Times(1) - t.syncedData.EXPECT().HeadStateMutator().Return(mockStateMutator).Times(1) - }, - msg: mockMsg, - wantErr: true, - specificErr: ErrIgnore, - }, - { - name: "unable to retrieve validator", - mock: func() { - mockStateReader := mockState.NewMockBeaconStateReader(t.gomockCtrl) - mockStateMutator := mockState.NewMockBeaconStateMutator(t.gomockCtrl) - mockStateReader.EXPECT().Version().Return(clparams.CapellaVersion).Times(1) - mockStateReader.EXPECT().ValidatorForValidatorIndex(int(mockMsg.Message.ValidatorIndex)).Return(nil, errors.New("not found")).Times(1) - t.syncedData.EXPECT().HeadStateReader().Return(mockStateReader).Times(1) - t.syncedData.EXPECT().HeadStateMutator().Return(mockStateMutator).Times(1) - }, - msg: mockMsg, - wantErr: true, - }, - { - name: "invalid withdrawal credentials prefix", - mock: func() { - mockStateReader := mockState.NewMockBeaconStateReader(t.gomockCtrl) - mockStateMutator := mockState.NewMockBeaconStateMutator(t.gomockCtrl) - mockValidator := solid.NewValidator() - mockValidator.SetWithdrawalCredentials([32]byte{1, 1, 1}) // should be equal to BLS_WITHDRAWAL_PREFIX - mockStateReader.EXPECT().Version().Return(clparams.CapellaVersion).Times(1) - mockStateReader.EXPECT().ValidatorForValidatorIndex(int(mockMsg.Message.ValidatorIndex)).Return(mockValidator, nil).Times(1) - t.syncedData.EXPECT().HeadStateReader().Return(mockStateReader).Times(1) - t.syncedData.EXPECT().HeadStateMutator().Return(mockStateMutator).Times(1) - }, - msg: mockMsg, - wantErr: true, - }, - { - name: "hashed from is not equal to withdrawal credentials", - mock: func() { - mockStateReader := mockState.NewMockBeaconStateReader(t.gomockCtrl) - mockStateMutator := mockState.NewMockBeaconStateMutator(t.gomockCtrl) - mockValidator := solid.NewValidator() - mockValidator.SetWithdrawalCredentials([32]byte{0}) // first byte is equal to BLS_WITHDRAWAL_PREFIX - mockStateReader.EXPECT().Version().Return(clparams.CapellaVersion).Times(1) - mockStateReader.EXPECT().ValidatorForValidatorIndex(int(mockMsg.Message.ValidatorIndex)).Return(mockValidator, nil).Times(1) - t.syncedData.EXPECT().HeadStateReader().Return(mockStateReader).Times(1) - t.syncedData.EXPECT().HeadStateMutator().Return(mockStateMutator).Times(1) - }, - msg: mockMsg, - wantErr: true, - }, - { - name: "invalid bls signature", - mock: func() { - mockStateReader := mockState.NewMockBeaconStateReader(t.gomockCtrl) - mockStateMutator := mockState.NewMockBeaconStateMutator(t.gomockCtrl) - mockValidator := solid.NewValidator() - hashedFrom := utils.Sha256(mockMsg.Message.From[:]) - wc := [32]byte{0} - copy(wc[1:], hashedFrom[1:]) - mockValidator.SetWithdrawalCredentials(wc) - mockStateReader.EXPECT().Version().Return(clparams.CapellaVersion).Times(1) - mockStateReader.EXPECT().ValidatorForValidatorIndex(int(mockMsg.Message.ValidatorIndex)).Return(mockValidator, nil).Times(1) - t.syncedData.EXPECT().HeadStateReader().Return(mockStateReader).Times(1) - t.syncedData.EXPECT().HeadStateMutator().Return(mockStateMutator).Times(1) - mockStateReader.EXPECT().GenesisValidatorsRoot().Return([32]byte{}).Times(1) - // bls verify - t.gomockCtrl.RecordCall(t.mockFuncs, "ComputeSigningRoot", mockMsg.Message, gomock.Any()).Return([32]byte{}, nil).Times(1) - t.gomockCtrl.RecordCall(t.mockFuncs, "BlsVerify", mockMsg.Signature[:], gomock.Any(), mockMsg.Message.From[:]).Return(false, nil).Times(1) - }, - msg: mockMsg, - wantErr: true, - }, - { - name: "pass", - mock: func() { - mockStateReader := mockState.NewMockBeaconStateReader(t.gomockCtrl) - mockStateMutator := mockState.NewMockBeaconStateMutator(t.gomockCtrl) - mockValidator := solid.NewValidator() - hashedFrom := utils.Sha256(mockMsg.Message.From[:]) - wc := [32]byte{0} - copy(wc[1:], hashedFrom[1:]) - mockValidator.SetWithdrawalCredentials(wc) - mockStateReader.EXPECT().Version().Return(clparams.CapellaVersion).Times(1) - mockStateReader.EXPECT().ValidatorForValidatorIndex(int(mockMsg.Message.ValidatorIndex)).Return(mockValidator, nil).Times(1) - t.syncedData.EXPECT().HeadStateReader().Return(mockStateReader).Times(1) - t.syncedData.EXPECT().HeadStateMutator().Return(mockStateMutator).Times(1) - mockStateReader.EXPECT().GenesisValidatorsRoot().Return([32]byte{}).Times(1) - // bls verify - t.gomockCtrl.RecordCall(t.mockFuncs, "ComputeSigningRoot", mockMsg.Message, gomock.Any()).Return([32]byte{}, nil).Times(1) - t.gomockCtrl.RecordCall(t.mockFuncs, "BlsVerify", mockMsg.Signature[:], gomock.Any(), mockMsg.Message.From[:]).Return(true, nil).Times(1) - // update withdrawal credentials - mockNewWc := common.Hash{byte(t.beaconCfg.ETH1AddressWithdrawalPrefixByte)} - copy(mockNewWc[1:], make([]byte, 11)) - copy(mockNewWc[12:], mockMsg.Message.To[:]) - mockStateMutator.EXPECT().SetWithdrawalCredentialForValidatorAtIndex(int(mockMsg.Message.ValidatorIndex), mockNewWc).Times(1) - }, - msg: mockMsg, - wantErr: false, - }, - } - - for _, tt := range tests { - log.Printf("Running test case: %s", tt.name) - t.SetupTest() - tt.mock() - err := t.service.ProcessMessage(context.Background(), nil, tt.msg) - if tt.wantErr { - t.Require().Error(err) - fmt.Printf("Error: %v\n", err) - if tt.specificErr != nil { - t.Require().Equal(tt.specificErr, err) - } - } else { - t.Require().NoError(err) - } - t.gomockCtrl.Satisfied() - } -} - -func TestBlsToExecutionChangeTestSuite(t *testing.T) { - suite.Run(t, new(blsToExecutionChangeTestSuite)) -} diff --git a/cl/phase1/network/services/global_mock_test.go b/cl/phase1/network/services/global_mock_test.go deleted file mode 100644 index 0e960abb90a..00000000000 --- a/cl/phase1/network/services/global_mock_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package services - -import ( - "github.com/ledgerwatch/erigon-lib/types/ssz" - "go.uber.org/mock/gomock" -) - -type mockFuncs struct { - ctrl *gomock.Controller -} - -func (m *mockFuncs) ComputeSigningRoot(obj ssz.HashableSSZ, domain []byte) ([32]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ComputeSigningRoot", obj, domain) - ret0, _ := ret[0].([32]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (m *mockFuncs) BlsVerify(pubkey, message, signature []byte) (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BlsVerify", pubkey, message, signature) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} diff --git a/cl/phase1/network/services/proposer_slashing_service.go b/cl/phase1/network/services/proposer_slashing_service.go index cdb59156f0d..cfbf36d7525 100644 --- a/cl/phase1/network/services/proposer_slashing_service.go +++ b/cl/phase1/network/services/proposer_slashing_service.go @@ -4,9 +4,11 @@ import ( "context" "fmt" + "github.com/Giulio2002/bls" "github.com/ledgerwatch/erigon/cl/beacon/synced_data" "github.com/ledgerwatch/erigon/cl/clparams" "github.com/ledgerwatch/erigon/cl/cltypes" + "github.com/ledgerwatch/erigon/cl/fork" st "github.com/ledgerwatch/erigon/cl/phase1/core/state" "github.com/ledgerwatch/erigon/cl/phase1/core/state/lru" "github.com/ledgerwatch/erigon/cl/pool" @@ -15,7 +17,7 @@ import ( type proposerSlashingService struct { operationsPool pool.OperationsPool - syncedDataManager synced_data.SyncedData + syncedDataManager *synced_data.SyncedDataManager beaconCfg *clparams.BeaconChainConfig ethClock eth_clock.EthereumClock cache *lru.Cache[uint64, struct{}] @@ -23,7 +25,7 @@ type proposerSlashingService struct { func NewProposerSlashingService( operationsPool pool.OperationsPool, - syncedDataManager synced_data.SyncedData, + syncedDataManager *synced_data.SyncedDataManager, beaconCfg *clparams.BeaconChainConfig, ethClock eth_clock.EthereumClock, ) *proposerSlashingService { @@ -71,7 +73,7 @@ func (s *proposerSlashingService) ProcessMessage(ctx context.Context, subnet *ui } // Verify the proposer is slashable - state := s.syncedDataManager.HeadStateReader() + state := s.syncedDataManager.HeadState() if state == nil { return ErrIgnore } @@ -85,16 +87,16 @@ func (s *proposerSlashingService) ProcessMessage(ctx context.Context, subnet *ui // Verify signatures for both headers for _, signedHeader := range []*cltypes.SignedBeaconBlockHeader{msg.Header1, msg.Header2} { - domain, err := state.GetDomain(s.beaconCfg.DomainBeaconProposer, st.GetEpochAtSlot(s.beaconCfg, signedHeader.Header.Slot)) + domain, err := state.GetDomain(state.BeaconConfig().DomainBeaconProposer, st.GetEpochAtSlot(state.BeaconConfig(), signedHeader.Header.Slot)) if err != nil { return fmt.Errorf("unable to get domain: %v", err) } pk := proposer.PublicKey() - signingRoot, err := computeSigningRoot(signedHeader, domain) + signingRoot, err := fork.ComputeSigningRoot(signedHeader, domain) if err != nil { return fmt.Errorf("unable to compute signing root: %v", err) } - valid, err := blsVerify(signedHeader.Signature[:], signingRoot[:], pk[:]) + valid, err := bls.Verify(signedHeader.Signature[:], signingRoot[:], pk[:]) if err != nil { return fmt.Errorf("unable to verify signature: %v", err) } diff --git a/cl/phase1/network/services/proposer_slashing_service_test.go b/cl/phase1/network/services/proposer_slashing_service_test.go deleted file mode 100644 index f181a7b5406..00000000000 --- a/cl/phase1/network/services/proposer_slashing_service_test.go +++ /dev/null @@ -1,223 +0,0 @@ -package services - -import ( - "context" - "errors" - "log" - "testing" - - "github.com/ledgerwatch/erigon-lib/common" - mockState "github.com/ledgerwatch/erigon/cl/abstract/mock_services" - mockSync "github.com/ledgerwatch/erigon/cl/beacon/synced_data/mock_services" - "github.com/ledgerwatch/erigon/cl/clparams" - "github.com/ledgerwatch/erigon/cl/cltypes" - "github.com/ledgerwatch/erigon/cl/cltypes/solid" - "github.com/ledgerwatch/erigon/cl/pool" - "github.com/ledgerwatch/erigon/cl/utils/eth_clock" - "github.com/stretchr/testify/suite" - "go.uber.org/mock/gomock" -) - -type proposerSlashingTestSuite struct { - suite.Suite - gomockCtrl *gomock.Controller - operationsPool *pool.OperationsPool - syncedData *mockSync.MockSyncedData - beaconCfg *clparams.BeaconChainConfig - ethClock *eth_clock.MockEthereumClock - proposerSlashingService *proposerSlashingService - mockFuncs *mockFuncs -} - -func (t *proposerSlashingTestSuite) SetupTest() { - t.gomockCtrl = gomock.NewController(t.T()) - t.operationsPool = &pool.OperationsPool{ - ProposerSlashingsPool: pool.NewOperationPool[common.Bytes96, *cltypes.ProposerSlashing](10, "proposerSlashingsPool"), - } - t.syncedData = mockSync.NewMockSyncedData(t.gomockCtrl) - t.ethClock = eth_clock.NewMockEthereumClock(t.gomockCtrl) - t.beaconCfg = &clparams.BeaconChainConfig{ - SlotsPerEpoch: 2, - } - t.proposerSlashingService = NewProposerSlashingService(*t.operationsPool, t.syncedData, t.beaconCfg, t.ethClock) - // mock global functions - t.mockFuncs = &mockFuncs{ctrl: t.gomockCtrl} - computeSigningRoot = t.mockFuncs.ComputeSigningRoot - blsVerify = t.mockFuncs.BlsVerify -} - -func (t *proposerSlashingTestSuite) TearDownTest() { - t.gomockCtrl.Finish() -} - -func (t *proposerSlashingTestSuite) TestProcessMessage() { - mockProposerIndex := uint64(123) - mockMsg := &cltypes.ProposerSlashing{ - Header1: &cltypes.SignedBeaconBlockHeader{ - Header: &cltypes.BeaconBlockHeader{ - Slot: 1, - ProposerIndex: mockProposerIndex, - Root: common.Hash{1}, - }, - Signature: common.Bytes96{1, 2, 3}, - }, - Header2: &cltypes.SignedBeaconBlockHeader{ - Header: &cltypes.BeaconBlockHeader{ - Slot: 1, - ProposerIndex: mockProposerIndex, - Root: common.Hash{2}, - }, - Signature: common.Bytes96{4, 5, 6}, - }, - } - tests := []struct { - name string - mock func() - msg *cltypes.ProposerSlashing - wantErr bool - err error - }{ - { - name: "ignore proposer slashing", - mock: func() { - t.proposerSlashingService.cache.Add(mockProposerIndex, struct{}{}) - }, - msg: mockMsg, - wantErr: true, - err: ErrIgnore, - }, - { - name: "ignore proposer slashing in pool", - mock: func() { - t.operationsPool.ProposerSlashingsPool.Insert(pool.ComputeKeyForProposerSlashing(mockMsg), mockMsg) - }, - msg: mockMsg, - wantErr: true, - err: ErrIgnore, - }, - { - name: "non-matching slots", - mock: func() {}, - msg: &cltypes.ProposerSlashing{ - Header1: &cltypes.SignedBeaconBlockHeader{ - Header: &cltypes.BeaconBlockHeader{ - Slot: 1, - ProposerIndex: mockProposerIndex, - }, - }, - Header2: &cltypes.SignedBeaconBlockHeader{ - Header: &cltypes.BeaconBlockHeader{ - Slot: 2, - ProposerIndex: mockProposerIndex, - }, - }, - }, - wantErr: true, - }, - { - name: "non-matching proposer indices", - mock: func() {}, - msg: &cltypes.ProposerSlashing{ - Header1: &cltypes.SignedBeaconBlockHeader{ - Header: &cltypes.BeaconBlockHeader{ - Slot: 1, - ProposerIndex: mockProposerIndex, - }, - }, - Header2: &cltypes.SignedBeaconBlockHeader{ - Header: &cltypes.BeaconBlockHeader{ - Slot: 1, - ProposerIndex: mockProposerIndex + 1, - }, - }, - }, - wantErr: true, - }, - { - name: "empty head state", - mock: func() { - t.syncedData.EXPECT().HeadStateReader().Return(nil).Times(1) - }, - msg: mockMsg, - wantErr: true, - err: ErrIgnore, - }, - { - name: "validator not found", - mock: func() { - mockState := mockState.NewMockBeaconStateReader(t.gomockCtrl) - mockState.EXPECT().ValidatorForValidatorIndex(int(mockProposerIndex)).Return(nil, errors.New("not found")).Times(1) - t.syncedData.EXPECT().HeadStateReader().Return(mockState).Times(1) - }, - msg: mockMsg, - wantErr: true, - }, - { - name: "proposer is not slashable", - mock: func() { - mockState := mockState.NewMockBeaconStateReader(t.gomockCtrl) - mockValidator := solid.NewValidatorFromParameters( - [48]byte{}, - [32]byte{}, - 0, - false, - 0, - 0, - 0, - 0, - ) - mockState.EXPECT().ValidatorForValidatorIndex(int(mockProposerIndex)).Return(mockValidator, nil).Times(1) - t.syncedData.EXPECT().HeadStateReader().Return(mockState).Times(1) - t.ethClock.EXPECT().GetCurrentEpoch().Return(uint64(1)).Times(1) - }, - msg: mockMsg, - wantErr: true, - }, - { - name: "pass", - mock: func() { - mockState := mockState.NewMockBeaconStateReader(t.gomockCtrl) - mockValidator := solid.NewValidatorFromParameters( - [48]byte{}, - [32]byte{}, - 0, - false, - 0, - 0, - 2, - 2, - ) - t.syncedData.EXPECT().HeadStateReader().Return(mockState).Times(1) - mockState.EXPECT().ValidatorForValidatorIndex(int(mockProposerIndex)).Return(mockValidator, nil).Times(1) - t.ethClock.EXPECT().GetCurrentEpoch().Return(uint64(1)).Times(1) - - mockState.EXPECT().GetDomain(t.beaconCfg.DomainBeaconProposer, gomock.Any()).Return([]byte{}, nil).Times(2) - t.mockFuncs.ctrl.RecordCall(t.mockFuncs, "ComputeSigningRoot", mockMsg.Header1, []byte{}).Return([32]byte{}, nil).Times(1) - t.mockFuncs.ctrl.RecordCall(t.mockFuncs, "ComputeSigningRoot", mockMsg.Header2, []byte{}).Return([32]byte{}, nil).Times(1) - t.mockFuncs.ctrl.RecordCall(t.mockFuncs, "BlsVerify", gomock.Any(), gomock.Any(), gomock.Any()).Return(true, nil).Times(2) - }, - msg: mockMsg, - wantErr: false, - }, - } - - for _, tt := range tests { - log.Printf("Running test case: %s", tt.name) - t.SetupTest() - tt.mock() - err := t.proposerSlashingService.ProcessMessage(context.Background(), nil, tt.msg) - if tt.wantErr { - t.Assert().Error(err) - if tt.err != nil { - t.Assert().Equal(tt.err, err) - } - } else { - t.Assert().NoError(err) - } - t.gomockCtrl.Satisfied() - } -} - -func TestProposerSlashing(t *testing.T) { - suite.Run(t, new(proposerSlashingTestSuite)) -} diff --git a/cl/phase1/network/services/voluntary_exit_service.go b/cl/phase1/network/services/voluntary_exit_service.go index 3e192864739..925ed88e447 100644 --- a/cl/phase1/network/services/voluntary_exit_service.go +++ b/cl/phase1/network/services/voluntary_exit_service.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "github.com/Giulio2002/bls" "github.com/ledgerwatch/erigon/cl/beacon/beaconevents" "github.com/ledgerwatch/erigon/cl/beacon/synced_data" "github.com/ledgerwatch/erigon/cl/clparams" @@ -18,7 +19,7 @@ import ( type voluntaryExitService struct { operationsPool pool.OperationsPool emitters *beaconevents.Emitters - syncedDataManager synced_data.SyncedData + syncedDataManager *synced_data.SyncedDataManager beaconCfg *clparams.BeaconChainConfig ethClock eth_clock.EthereumClock } @@ -26,7 +27,7 @@ type voluntaryExitService struct { func NewVoluntaryExitService( operationsPool pool.OperationsPool, emitters *beaconevents.Emitters, - syncedDataManager synced_data.SyncedData, + syncedDataManager *synced_data.SyncedDataManager, beaconCfg *clparams.BeaconChainConfig, ethClock eth_clock.EthereumClock, ) VoluntaryExitService { @@ -51,7 +52,7 @@ func (s *voluntaryExitService) ProcessMessage(ctx context.Context, subnet *uint6 // ref: https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#voluntary-exits // def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVoluntaryExit) -> None: - state := s.syncedDataManager.HeadStateReader() + state := s.syncedDataManager.HeadState() if state == nil { return ErrIgnore } @@ -95,16 +96,16 @@ func (s *voluntaryExitService) ProcessMessage(ctx context.Context, subnet *uint6 if state.Version() < clparams.DenebVersion { domain, err = state.GetDomain(domainType, voluntaryExit.Epoch) } else if state.Version() >= clparams.DenebVersion { - domain, err = fork.ComputeDomain(domainType[:], utils.Uint32ToBytes4(uint32(s.beaconCfg.CapellaForkVersion)), state.GenesisValidatorsRoot()) + domain, err = fork.ComputeDomain(domainType[:], utils.Uint32ToBytes4(uint32(state.BeaconConfig().CapellaForkVersion)), state.GenesisValidatorsRoot()) } if err != nil { return err } - signingRoot, err := computeSigningRoot(voluntaryExit, domain) + signingRoot, err := fork.ComputeSigningRoot(voluntaryExit, domain) if err != nil { return err } - if valid, err := blsVerify(msg.Signature[:], signingRoot[:], pk[:]); err != nil { + if valid, err := bls.Verify(msg.Signature[:], signingRoot[:], pk[:]); err != nil { return err } else if !valid { return errors.New("ProcessVoluntaryExit: BLS verification failed") diff --git a/cl/phase1/network/services/voluntary_exit_service_test.go b/cl/phase1/network/services/voluntary_exit_service_test.go deleted file mode 100644 index fcae428abbb..00000000000 --- a/cl/phase1/network/services/voluntary_exit_service_test.go +++ /dev/null @@ -1,224 +0,0 @@ -package services - -import ( - "context" - "log" - "testing" - - "github.com/ledgerwatch/erigon-lib/types/ssz" - mockState "github.com/ledgerwatch/erigon/cl/abstract/mock_services" - "github.com/ledgerwatch/erigon/cl/beacon/beaconevents" - mockSync "github.com/ledgerwatch/erigon/cl/beacon/synced_data/mock_services" - "github.com/ledgerwatch/erigon/cl/clparams" - "github.com/ledgerwatch/erigon/cl/cltypes" - "github.com/ledgerwatch/erigon/cl/cltypes/solid" - "github.com/ledgerwatch/erigon/cl/pool" - "github.com/ledgerwatch/erigon/cl/utils/eth_clock" - "github.com/pkg/errors" - "github.com/stretchr/testify/suite" - "go.uber.org/mock/gomock" -) - -type voluntaryExitTestSuite struct { - suite.Suite - gomockCtrl *gomock.Controller - operationsPool *pool.OperationsPool - emitters *beaconevents.Emitters - syncedData *mockSync.MockSyncedData - ethClock *eth_clock.MockEthereumClock - beaconCfg *clparams.BeaconChainConfig - voluntaryExitService VoluntaryExitService - - mockFuncs *mockFuncs -} - -func (t *voluntaryExitTestSuite) SetupTest() { - computeSigningRoot = func(_ ssz.HashableSSZ, domain []byte) ([32]byte, error) { - return [32]byte{}, nil - } - t.gomockCtrl = gomock.NewController(t.T()) - t.emitters = beaconevents.NewEmitters() - t.operationsPool = &pool.OperationsPool{ - VoluntaryExitsPool: pool.NewOperationPool[uint64, *cltypes.SignedVoluntaryExit](10, "voluntaryExitsPool"), - } - t.syncedData = mockSync.NewMockSyncedData(t.gomockCtrl) - t.ethClock = eth_clock.NewMockEthereumClock(t.gomockCtrl) - t.beaconCfg = &clparams.BeaconChainConfig{} - t.voluntaryExitService = NewVoluntaryExitService(*t.operationsPool, t.emitters, t.syncedData, t.beaconCfg, t.ethClock) - // mock global functions - t.mockFuncs = &mockFuncs{ - ctrl: t.gomockCtrl, - } - blsVerify = t.mockFuncs.BlsVerify -} - -func (t *voluntaryExitTestSuite) TearDownTest() { -} - -func (t *voluntaryExitTestSuite) TestProcessMessage() { - curEpoch := uint64(100) - mockValidatorIndex := uint64(10) - mockMsg := &cltypes.SignedVoluntaryExit{ - VoluntaryExit: &cltypes.VoluntaryExit{ - Epoch: 1, - ValidatorIndex: mockValidatorIndex, - }, - Signature: [96]byte{}, - } - - tests := []struct { - name string - mock func() - msg *cltypes.SignedVoluntaryExit - wantErr bool - err error - }{ - { - name: "validator already in pool", - mock: func() { - t.operationsPool.VoluntaryExitsPool.Insert(mockValidatorIndex, mockMsg) - }, - msg: mockMsg, - wantErr: true, - err: ErrIgnore, - }, - { - name: "state is nil", - mock: func() { - t.syncedData.EXPECT().HeadStateReader().Return(nil) - }, - msg: mockMsg, - wantErr: true, - err: ErrIgnore, - }, - { - name: "validator not found", - mock: func() { - mockState := mockState.NewMockBeaconStateReader(t.gomockCtrl) - mockState.EXPECT().ValidatorForValidatorIndex(int(mockValidatorIndex)).Return(nil, errors.New("not found")).Times(1) - t.syncedData.EXPECT().HeadStateReader().Return(mockState).Times(1) - }, - msg: mockMsg, - wantErr: true, - err: ErrIgnore, - }, - { - name: "validator is not active", - mock: func() { - mockState := mockState.NewMockBeaconStateReader(t.gomockCtrl) - mockValidator := solid.NewValidatorFromParameters( - [48]byte{}, - [32]byte{}, - 0, - false, - 0, - 0, - 0, - 0, - ) - mockState.EXPECT().ValidatorForValidatorIndex(int(mockValidatorIndex)).Return(mockValidator, nil).Times(1) - t.syncedData.EXPECT().HeadStateReader().Return(mockState).Times(1) - t.ethClock.EXPECT().GetCurrentEpoch().Return(curEpoch).Times(1) - }, - msg: mockMsg, - wantErr: true, - }, - { - name: "validator has been initialized", - mock: func() { - mockState := mockState.NewMockBeaconStateReader(t.gomockCtrl) - mockValidator := solid.NewValidatorFromParameters( - [48]byte{}, - [32]byte{}, - 0, - false, - 0, - 0, - curEpoch+1, - 0, - ) - mockState.EXPECT().ValidatorForValidatorIndex(int(mockValidatorIndex)).Return(mockValidator, nil).Times(1) - t.syncedData.EXPECT().HeadStateReader().Return(mockState).Times(1) - t.ethClock.EXPECT().GetCurrentEpoch().Return(curEpoch).Times(1) - }, - msg: mockMsg, - wantErr: true, - }, - { - name: "bls verify failed", - mock: func() { - mockState := mockState.NewMockBeaconStateReader(t.gomockCtrl) - mockValidator := solid.NewValidatorFromParameters( - [48]byte{}, - [32]byte{}, - 0, - false, - 0, - 0, - curEpoch+1, - 0, - ) - mockState.EXPECT().ValidatorForValidatorIndex(int(mockValidatorIndex)).Return(mockValidator, nil).Times(1) - t.syncedData.EXPECT().HeadStateReader().Return(mockState).Times(1) - t.ethClock.EXPECT().GetCurrentEpoch().Return(curEpoch).Times(1) - t.beaconCfg.FarFutureEpoch = mockValidator.ExitEpoch() - mockState.EXPECT().Version().Return(clparams.AltairVersion).Times(1) - mockState.EXPECT().GetDomain(t.beaconCfg.DomainVoluntaryExit, mockMsg.VoluntaryExit.Epoch).Return([]byte{}, nil).Times(1) - computeSigningRoot = func(_ ssz.HashableSSZ, domain []byte) ([32]byte, error) { - return [32]byte{}, nil - } - t.gomockCtrl.RecordCall(t.mockFuncs, "BlsVerify", gomock.Any(), gomock.Any(), gomock.Any()).Return(false, nil).Times(1) - }, - msg: mockMsg, - wantErr: true, - }, - { - name: "success", - mock: func() { - mockState := mockState.NewMockBeaconStateReader(t.gomockCtrl) - mockValidator := solid.NewValidatorFromParameters( - [48]byte{}, - [32]byte{}, - 0, - false, - 0, - 0, - curEpoch+1, - 0, - ) - mockState.EXPECT().ValidatorForValidatorIndex(int(mockValidatorIndex)).Return(mockValidator, nil).Times(1) - t.syncedData.EXPECT().HeadStateReader().Return(mockState).Times(1) - t.ethClock.EXPECT().GetCurrentEpoch().Return(curEpoch).Times(1) - t.beaconCfg.FarFutureEpoch = mockValidator.ExitEpoch() - mockState.EXPECT().Version().Return(clparams.AltairVersion).Times(1) - mockState.EXPECT().GetDomain(t.beaconCfg.DomainVoluntaryExit, mockMsg.VoluntaryExit.Epoch).Return([]byte{}, nil).Times(1) - computeSigningRoot = func(_ ssz.HashableSSZ, domain []byte) ([32]byte, error) { - return [32]byte{}, nil - } - t.gomockCtrl.RecordCall(t.mockFuncs, "BlsVerify", gomock.Any(), gomock.Any(), gomock.Any()).Return(true, nil).Times(1) - }, - msg: mockMsg, - wantErr: false, - }, - } - - for _, tt := range tests { - log.Printf("VoluntaryExit running test case: %s", tt.name) - t.SetupTest() - tt.mock() - err := t.voluntaryExitService.ProcessMessage(context.Background(), nil, tt.msg) - if tt.wantErr { - t.Require().Error(err) - if tt.err != nil { - t.Require().Equal(tt.err, err) - } - log.Printf("error msg: %v", err.Error()) - } else { - t.Require().NoError(err) - } - } -} - -func TestVoluntaryExit(t *testing.T) { - suite.Run(t, new(voluntaryExitTestSuite)) -} diff --git a/cl/phase1/network/subnets/subnets.go b/cl/phase1/network/subnets/subnets.go index abebfff4ab6..1fba86c09ee 100644 --- a/cl/phase1/network/subnets/subnets.go +++ b/cl/phase1/network/subnets/subnets.go @@ -1,7 +1,6 @@ package subnets import ( - "github.com/ledgerwatch/erigon/cl/abstract" "github.com/ledgerwatch/erigon/cl/cltypes/solid" "github.com/ledgerwatch/erigon/cl/phase1/core/state" ) @@ -65,7 +64,7 @@ func ComputeSubnetForAttestation(committeePerSlot, slot, committeeIndex, slotsPe return (committeesSinceEpochStart + committeeIndex) % attSubnetCount } -func ComputeCommitteeCountPerSlot(s abstract.BeaconStateReader, slot uint64, slotsPerEpoch uint64) uint64 { +func ComputeCommitteeCountPerSlot(s state.BeaconStateReader, slot uint64, slotsPerEpoch uint64) uint64 { epoch := slot / slotsPerEpoch return s.CommitteeCount(epoch) } diff --git a/cl/phase1/stages/stage_history_download.go b/cl/phase1/stages/stage_history_download.go index 6c5ad919035..8f379d05597 100644 --- a/cl/phase1/stages/stage_history_download.go +++ b/cl/phase1/stages/stage_history_download.go @@ -170,7 +170,6 @@ func SpawnStageHistoryDownload(cfg StageHistoryReconstructionCfg, ctx context.Co logArgs = append(logArgs, "slot", currProgress, "blockNumber", currEth1Progress.Load(), - "frozenBlocks", cfg.engine.FrozenBlocks(ctx), "blk/sec", fmt.Sprintf("%.1f", speed), "snapshots", cfg.sn.SegmentsMax(), ) diff --git a/cmd/capcli/cli.go b/cmd/capcli/cli.go index f088b60279f..d9b43f84a82 100644 --- a/cmd/capcli/cli.go +++ b/cmd/capcli/cli.go @@ -18,7 +18,17 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" + lg "github.com/anacrolix/log" + + "github.com/ledgerwatch/erigon-lib/direct" + downloader3 "github.com/ledgerwatch/erigon-lib/downloader" "github.com/ledgerwatch/erigon-lib/metrics" + state2 "github.com/ledgerwatch/erigon-lib/state" + + "github.com/c2h5oh/datasize" + + "github.com/ledgerwatch/erigon-lib/chain/snapcfg" + "github.com/ledgerwatch/erigon-lib/downloader" "github.com/ledgerwatch/erigon/cl/antiquary" "github.com/ledgerwatch/erigon/cl/clparams" @@ -27,9 +37,12 @@ import ( "github.com/ledgerwatch/erigon/cmd/caplin/caplin1" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/ethconfig/estimate" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" "github.com/ledgerwatch/erigon-lib/common/datadir" + "github.com/ledgerwatch/erigon-lib/downloader/downloadercfg" "github.com/ledgerwatch/erigon-lib/downloader/snaptype" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/cl/persistence/beacon_indicies" @@ -56,6 +69,7 @@ var CLI struct { Chain Chain `cmd:"" help:"download the entire chain from reqresp network"` DumpSnapshots DumpSnapshots `cmd:"" help:"generate caplin snapshots"` CheckSnapshots CheckSnapshots `cmd:"" help:"check snapshot folder against content of chain data"` + DownloadSnapshots DownloadSnapshots `cmd:"" help:"download snapshots from webseed"` LoopSnapshots LoopSnapshots `cmd:"" help:"loop over snapshots"` RetrieveHistoricalState RetrieveHistoricalState `cmd:"" help:"retrieve historical state from db"` ChainEndpoint ChainEndpoint `cmd:"" help:"chain endpoint"` @@ -459,6 +473,67 @@ func (c *LoopSnapshots) Run(ctx *Context) error { return nil } +type DownloadSnapshots struct { + chainCfg + outputFolder +} + +func (d *DownloadSnapshots) Run(ctx *Context) error { + webSeeds := snapcfg.KnownWebseeds[d.Chain] + dirs := datadir.New(d.Datadir) + + _, beaconConfig, _, err := clparams.GetConfigsByNetworkName(d.Chain) + if err != nil { + return err + } + + log.Root().SetHandler(log.LvlFilterHandler(log.LvlDebug, log.StderrHandler)) + + db, _, err := caplin1.OpenCaplinDatabase(ctx, db_config.DatabaseConfiguration{PruneDepth: math.MaxUint64}, beaconConfig, nil, dirs.CaplinIndexing, dirs.CaplinBlobs, nil, false, 0) + if err != nil { + return err + } + tx, err := db.BeginRw(ctx) + if err != nil { + return err + } + defer tx.Rollback() + downloadRate, err := datasize.ParseString("16mb") + if err != nil { + return err + } + + uploadRate, err := datasize.ParseString("0mb") + if err != nil { + return err + } + version := "erigon: " + params.VersionWithCommit(params.GitCommit) + + downloaderCfg, err := downloadercfg.New(dirs, version, lg.Info, downloadRate, uploadRate, 42069, 10, 3, nil, webSeeds, d.Chain, true) + if err != nil { + return err + } + downlo, err := downloader.New(ctx, downloaderCfg, log.Root(), log.LvlInfo, true) + if err != nil { + return err + } + s, err := state2.NewAggregator(ctx, dirs, 200000, db, log.Root()) + if err != nil { + return err + } + downlo.MainLoopInBackground(false) + bittorrentServer, err := downloader3.NewGrpcServer(downlo) + if err != nil { + return fmt.Errorf("new server: %w", err) + } + + return snapshotsync.WaitForDownloader(ctx, "CapCliDownloader", false, false, snapshotsync.OnlyCaplin, s, tx, + freezeblocks.NewBlockReader( + freezeblocks.NewRoSnapshots(ethconfig.NewSnapCfg(false, false, false), dirs.Snap, 0, log.Root()), + freezeblocks.NewBorRoSnapshots(ethconfig.NewSnapCfg(false, false, false), dirs.Snap, 0, log.Root())), + params.ChainConfigByChainName(d.Chain), direct.NewDownloaderClient(bittorrentServer), []string{}) +} + type RetrieveHistoricalState struct { chainCfg outputFolder diff --git a/cmd/caplin/main.go b/cmd/caplin/main.go index 77ac2efe9e4..ae3742d74bc 100644 --- a/cmd/caplin/main.go +++ b/cmd/caplin/main.go @@ -56,7 +56,7 @@ func runCaplinNode(cliCtx *cli.Context) error { log.Error("[Phase1] Could not initialize caplin", "err", err) return err } - if _, _, _, err := debug.Setup(cliCtx, true /* root logger */); err != nil { + if _, _, _, _, err := debug.Setup(cliCtx, true /* root logger */); err != nil { return err } rcfg := beacon_router_configuration.RouterConfiguration{ diff --git a/cmd/devnet/devnet/node.go b/cmd/devnet/devnet/node.go index 32b2e8a7440..a819bbddf7e 100644 --- a/cmd/devnet/devnet/node.go +++ b/cmd/devnet/devnet/node.go @@ -16,6 +16,7 @@ import ( "github.com/ledgerwatch/erigon/cmd/devnet/requests" "github.com/ledgerwatch/erigon/diagnostics" "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/eth/tracers" "github.com/ledgerwatch/erigon/node/nodecfg" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/debug" @@ -138,6 +139,7 @@ func (n *devnetNode) EnableMetrics(int) { // run configures, creates and serves an erigon node func (n *devnetNode) run(ctx *cli.Context) error { var logger log.Logger + var tracer *tracers.Tracer var err error var metricsMux *http.ServeMux var pprofMux *http.ServeMux @@ -153,7 +155,7 @@ func (n *devnetNode) run(ctx *cli.Context) error { n.Unlock() }() - if logger, metricsMux, pprofMux, err = debug.Setup(ctx, false /* rootLogger */); err != nil { + if logger, tracer, metricsMux, pprofMux, err = debug.Setup(ctx, false /* rootLogger */); err != nil { return err } @@ -183,7 +185,7 @@ func (n *devnetNode) run(ctx *cli.Context) error { logger.Warn("TODO: custom BorStateSyncDelay is not applied to BorConfig.StateSyncConfirmationDelay", "delay", stateSyncConfirmationDelay) } - n.ethNode, err = enode.New(ctx.Context, n.nodeCfg, n.ethCfg, logger) + n.ethNode, err = enode.New(ctx.Context, n.nodeCfg, n.ethCfg, logger, tracer) diagnostics.Setup(ctx, n.ethNode, metricsMux, pprofMux) diff --git a/cmd/devnet/services/polygon/heimdall.go b/cmd/devnet/services/polygon/heimdall.go index 3f77b835086..7a2e21c0d7d 100644 --- a/cmd/devnet/services/polygon/heimdall.go +++ b/cmd/devnet/services/polygon/heimdall.go @@ -210,7 +210,7 @@ func (h *Heimdall) FetchCheckpointCount(ctx context.Context) (int64, error) { return 0, fmt.Errorf("TODO") } -func (h *Heimdall) FetchCheckpoints(ctx context.Context, page uint64, limit uint64) ([]*heimdall.Checkpoint, error) { +func (h *Heimdall) FetchCheckpoints(ctx context.Context, page uint64, limit uint64) (heimdall.Checkpoints, error) { return nil, fmt.Errorf("TODO") } diff --git a/cmd/devnet/services/polygon/proofgenerator_test.go b/cmd/devnet/services/polygon/proofgenerator_test.go index 0a0e5b57de5..92f992535ce 100644 --- a/cmd/devnet/services/polygon/proofgenerator_test.go +++ b/cmd/devnet/services/polygon/proofgenerator_test.go @@ -19,6 +19,7 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon/accounts/abi/bind" "github.com/ledgerwatch/erigon/cmd/devnet/blocks" @@ -144,7 +145,12 @@ func (rg *requestGenerator) GetTransactionReceipt(ctx context.Context, hash libc } defer tx.Rollback() - _, _, _, ibs, _, err := transactions.ComputeTxEnv(ctx, engine, block, chainConfig, reader, tx, 0) + historyV3, err := kvcfg.HistoryV3.Enabled(tx) + if err != nil { + panic(err) + } + + _, _, _, ibs, _, err := transactions.ComputeTxEnv(ctx, engine, block, chainConfig, reader, tx, 0, historyV3) if err != nil { return nil, err diff --git a/cmd/diag/db/db.go b/cmd/diag/db/db.go index 412902505e2..6600b126ba7 100644 --- a/cmd/diag/db/db.go +++ b/cmd/diag/db/db.go @@ -173,7 +173,7 @@ func DBsInfo(cliCtx *cli.Context) ([]DBInfo, error) { func getAllDbsNames(cliCtx *cli.Context) ([]string, error) { var data []string - url := "http://" + cliCtx.String(flags.DebugURLFlag.Name) + flags.ApiPath + "/dbs" + url := "http://" + cliCtx.String(flags.DebugURLFlag.Name) + "/debug/diag/dbs" err := util.MakeHttpGetCall(cliCtx.Context, url, &data) if err != nil { @@ -185,7 +185,7 @@ func getAllDbsNames(cliCtx *cli.Context) ([]string, error) { func getDb(cliCtx *cli.Context, dbName string) ([]BDTableInfo, error) { var data []BDTableInfo - url := "http://" + cliCtx.String(flags.DebugURLFlag.Name) + flags.ApiPath + "/dbs/" + dbName + "/tables" + url := "http://" + cliCtx.String(flags.DebugURLFlag.Name) + "/debug/diag/dbs/" + dbName + "/tables" err := util.MakeHttpGetCall(cliCtx.Context, url, &data) if err != nil { diff --git a/cmd/diag/downloader/diag_downloader.go b/cmd/diag/downloader/diag_downloader.go index bb213d26def..db6b784dcb2 100644 --- a/cmd/diag/downloader/diag_downloader.go +++ b/cmd/diag/downloader/diag_downloader.go @@ -1,11 +1,9 @@ package downloader import ( + "encoding/json" "fmt" - "time" - "github.com/jedib0t/go-pretty/v6/table" - "github.com/jedib0t/go-pretty/v6/text" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/diagnostics" "github.com/ledgerwatch/erigon/cmd/diag/flags" @@ -13,373 +11,67 @@ import ( "github.com/urfave/cli/v2" ) -var ( - FileFilterFlag = cli.StringFlag{ - Name: "downloader.file.filter", - Aliases: []string{"dff"}, - Usage: "Filter files list [all|active|inactive|downloaded|queued], dafault value is all", - Required: false, - Value: "all", - } - - FileNameFlag = cli.StringFlag{ - Name: "downloader.file.name", - Aliases: []string{"dfn"}, - Usage: "File name to print details about.", - Required: false, - Value: "", - } -) - var Command = cli.Command{ - Action: printDownloadStatus, + Action: print, Name: "downloader", Aliases: []string{"dl"}, - Usage: "Print snapshot download status", + Usage: "print snapshot download stats", ArgsUsage: "", Flags: []cli.Flag{ &flags.DebugURLFlag, &flags.OutputFlag, }, - Subcommands: []*cli.Command{ - { - Name: "files", - Aliases: []string{"fls"}, - Action: printFiles, - Usage: "Print snapshot download files status", - ArgsUsage: "", - Flags: []cli.Flag{ - &flags.DebugURLFlag, - &flags.OutputFlag, - &FileFilterFlag, - &FileNameFlag, - }, - }, - }, Description: ``, } -func printDownloadStatus(cliCtx *cli.Context) error { - data, err := getData(cliCtx) - - if err != nil { - - return err - } - - snapshotDownloadStatus := getSnapshotStatusRow(data.SnapshotDownload) - - switch cliCtx.String(flags.OutputFlag.Name) { - case "json": - util.RenderJson(snapshotDownloadStatus) - - case "text": - util.RenderTableWithHeader( - "Snapshot download info:", - table.Row{"Status", "Progress", "Downloaded", "Total", "Time Left", "Total Time", "Download Rate", "Upload Rate", "Peers", "Files", "Connections", "Alloc", "Sys"}, - []table.Row{snapshotDownloadStatus}, - ) - } - - return nil -} - -func printFiles(cliCtx *cli.Context) error { - if cliCtx.String(FileNameFlag.Name) != "" { - return printFile(cliCtx) - } +func print(cliCtx *cli.Context) error { + var data diagnostics.SyncStatistics + url := "http://" + cliCtx.String(flags.DebugURLFlag.Name) + "/debug/diag/snapshot-sync" - data, err := getData(cliCtx) + err := util.MakeHttpGetCall(cliCtx.Context, url, &data) if err != nil { - txt := text.Colors{text.FgWhite, text.BgRed} - fmt.Printf("%s %s", txt.Sprint("[ERROR]"), "Failed to connect to Erigon node.") return err } - snapshotDownloadStatus := getSnapshotStatusRow(data.SnapshotDownload) - - snapDownload := data.SnapshotDownload - - files := snapDownload.SegmentsDownloading - rows := []table.Row{} - - for _, file := range files { - rows = append(rows, getFileRow(file)) - } - - filteredRows := filterRows(rows, cliCtx.String(FileFilterFlag.Name)) - switch cliCtx.String(flags.OutputFlag.Name) { case "json": - util.RenderJson(snapshotDownloadStatus) - util.RenderJson(filteredRows) - case "text": - //Print overall status - util.RenderTableWithHeader( - "Snapshot download info:", - table.Row{"Status", "Progress", "Downloaded", "Total", "Time Left", "Total Time", "Download Rate", "Upload Rate", "Peers", "Files", "Connections", "Alloc", "Sys"}, - []table.Row{snapshotDownloadStatus}, - ) - - //Print files status - util.RenderTableWithHeader( - "Files download info:", - table.Row{"File", "Progress", "Total", "Downloaded", "Peers", "Peers Download Rate", "Webseeds", "Webseeds Download Rate", "Time Left", "Active"}, - filteredRows, - ) - } - - return nil -} - -func printFile(cliCtx *cli.Context) error { - data, err := getData(cliCtx) - - if err != nil { - return err - } - - snapDownload := data.SnapshotDownload - - if file, ok := snapDownload.SegmentsDownloading[cliCtx.String(FileNameFlag.Name)]; ok { - - if file.DownloadedBytes >= file.TotalBytes { - fileRow := getDownloadedFileRow(file) - switch cliCtx.String(flags.OutputFlag.Name) { - case "json": - util.RenderJson(fileRow) - case "text": - //Print file status - util.RenderTableWithHeader( - "File download info:", - table.Row{"File", "Size", "Average Download Rate", "Time Took"}, - []table.Row{fileRow}, - ) - } - } else { - fileRow := getFileRow(file) - filePeers := getPeersRows(file.Peers) - fileWebseeds := getPeersRows(file.Webseeds) - - switch cliCtx.String(flags.OutputFlag.Name) { - case "json": - util.RenderJson(fileRow) - util.RenderJson(filePeers) - util.RenderJson(fileWebseeds) - case "text": - //Print file status - util.RenderTableWithHeader( - "file download info:", - table.Row{"File", "Progress", "Total", "Downloaded", "Peers", "Peers Download Rate", "Webseeds", "Webseeds Download Rate", "Time Left", "Active"}, - []table.Row{fileRow}, - ) - - //Print peers and webseeds status - util.RenderTableWithHeader( - "", - table.Row{"Peer", "Download Rate"}, - filePeers, - ) - - util.RenderTableWithHeader( - "", - table.Row{"Webseed", "Download Rate"}, - fileWebseeds, - ) - } - } - } else { - txt := text.Colors{text.FgWhite, text.BgRed} - fmt.Printf("%s %s", txt.Sprint("[ERROR]"), "File with name: "+cliCtx.String(FileNameFlag.Name)+" does not exist.") - } - - return nil -} - -func getDownloadedFileRow(file diagnostics.SegmentDownloadStatistics) table.Row { - averageDownloadRate := common.ByteCount(file.DownloadedStats.AverageRate) + "/s" - totalDownloadTimeString := time.Duration(file.DownloadedStats.TimeTook) * time.Second - - row := table.Row{ - file.Name, - common.ByteCount(file.TotalBytes), - averageDownloadRate, - totalDownloadTimeString.String(), - } - - return row -} - -func getSnapshotStatusRow(snapDownload diagnostics.SnapshotDownloadStatistics) table.Row { - status := "Downloading" - if snapDownload.DownloadFinished { - status = "Finished" - } - - downloadedPercent := getPercentDownloaded(snapDownload.Downloaded, snapDownload.Total) - - remainingBytes := snapDownload.Total - snapDownload.Downloaded - downloadTimeLeft := util.CalculateTime(remainingBytes, snapDownload.DownloadRate) - - totalDownloadTimeString := time.Duration(snapDownload.TotalTime) * time.Second - - rowObj := table.Row{ - status, // Status - downloadedPercent, // Progress - common.ByteCount(snapDownload.Downloaded), // Downloaded - common.ByteCount(snapDownload.Total), // Total - downloadTimeLeft, // Time Left - totalDownloadTimeString.String(), // Total Time - common.ByteCount(snapDownload.DownloadRate) + "/s", // Download Rate - common.ByteCount(snapDownload.UploadRate) + "/s", // Upload Rate - snapDownload.Peers, // Peers - snapDownload.Files, // Files - snapDownload.Connections, // Connections - common.ByteCount(snapDownload.Alloc), // Alloc - common.ByteCount(snapDownload.Sys), // Sys - } - - return rowObj -} - -func getFileRow(file diagnostics.SegmentDownloadStatistics) table.Row { - peersDownloadRate := getFileDownloadRate(file.Peers) - webseedsDownloadRate := getFileDownloadRate(file.Webseeds) - totalDownloadRate := peersDownloadRate + webseedsDownloadRate - downloadedPercent := getPercentDownloaded(file.DownloadedBytes, file.TotalBytes) - remainingBytes := file.TotalBytes - file.DownloadedBytes - downloadTimeLeft := util.CalculateTime(remainingBytes, totalDownloadRate) - isActive := "false" - if totalDownloadRate > 0 { - isActive = "true" - } - - row := table.Row{ - file.Name, - downloadedPercent, - common.ByteCount(file.TotalBytes), - common.ByteCount(file.DownloadedBytes), - len(file.Peers), - common.ByteCount(peersDownloadRate) + "/s", - len(file.Webseeds), - common.ByteCount(webseedsDownloadRate) + "/s", - downloadTimeLeft, - isActive, - } - - return row -} - -func getPeersRows(peers []diagnostics.SegmentPeer) []table.Row { - rows := make([]table.Row, 0) + bytes, err := json.Marshal(data.SnapshotDownload) - for _, peer := range peers { - row := table.Row{ - peer.Url, - common.ByteCount(peer.DownloadRate) + "/s", + if err != nil { + return err } - rows = append(rows, row) - } - - return rows -} - -func getFileDownloadRate(peers []diagnostics.SegmentPeer) uint64 { - var downloadRate uint64 - - for _, peer := range peers { - downloadRate += peer.DownloadRate - } + fmt.Println(string(bytes)) - return downloadRate -} - -func getData(cliCtx *cli.Context) (diagnostics.SyncStatistics, error) { - var data diagnostics.SyncStatistics - url := "http://" + cliCtx.String(flags.DebugURLFlag.Name) + flags.ApiPath + "/snapshot-sync" - - err := util.MakeHttpGetCall(cliCtx.Context, url, &data) - - if err != nil { - return data, err - } - - return data, nil -} - -func filterRows(rows []table.Row, filter string) []table.Row { - switch filter { - case "all": - return rows - case "active": - return filterActive(rows) - case "inactive": - return filterInactive(rows) - case "downloaded": - return filterDownloaded(rows) - case "queued": - return filterQueued(rows) - } - - return rows -} - -func filterActive(rows []table.Row) []table.Row { - filtered := []table.Row{} - - for _, row := range rows { - if row[len(row)-1] == "true" { - filtered = append(filtered, row) - } - } - - return filtered -} - -func filterInactive(rows []table.Row) []table.Row { - filtered := []table.Row{} - - for _, row := range rows { - if row[len(row)-1] == "false" { - filtered = append(filtered, row) - } - } - - return filtered -} - -func filterDownloaded(rows []table.Row) []table.Row { - filtered := []table.Row{} - - for _, row := range rows { - if row[1] == "100.00%" { - filtered = append(filtered, row) + case "text": + fmt.Println("-------------------Snapshot Download-------------------") + + snapDownload := data.SnapshotDownload + var remainingBytes uint64 + percent := 50 + if snapDownload.Total > snapDownload.Downloaded { + remainingBytes = snapDownload.Total - snapDownload.Downloaded + percent = int((snapDownload.Downloaded*100)/snapDownload.Total) / 2 } - } - - return filtered -} -func filterQueued(rows []table.Row) []table.Row { - filtered := []table.Row{} + logstr := "[" - for _, row := range rows { - if row[1] == "0.00%" { - filtered = append(filtered, row) + for i := 1; i < 50; i++ { + if i < percent { + logstr += "#" + } else { + logstr += "." + } } - } - return filtered -} + logstr += "]" -func getPercentDownloaded(downloaded, total uint64) string { - percent := float32(downloaded) / float32(total/100) + fmt.Println("Download:", logstr, common.ByteCount(snapDownload.Downloaded), "/", common.ByteCount(snapDownload.Total)) + downloadTimeLeft := util.CalculateTime(remainingBytes, snapDownload.DownloadRate) - if percent > 100 { - percent = 100 + fmt.Println("Time left:", downloadTimeLeft) } - return fmt.Sprintf("%.2f%%", percent) + return nil } diff --git a/cmd/diag/flags/flags.go b/cmd/diag/flags/flags.go index c8ecdc0f0ae..a172bfb3f3e 100644 --- a/cmd/diag/flags/flags.go +++ b/cmd/diag/flags/flags.go @@ -3,8 +3,6 @@ package flags import "github.com/urfave/cli/v2" var ( - ApiPath = "/debug/diag" - DebugURLFlag = cli.StringFlag{ Name: "debug.addr", Aliases: []string{"da"}, diff --git a/cmd/diag/main.go b/cmd/diag/main.go index f805b75d8b1..a6bff652ea0 100644 --- a/cmd/diag/main.go +++ b/cmd/diag/main.go @@ -14,7 +14,6 @@ import ( "github.com/ledgerwatch/erigon/cmd/diag/db" "github.com/ledgerwatch/erigon/cmd/diag/downloader" "github.com/ledgerwatch/erigon/cmd/diag/stages" - "github.com/ledgerwatch/erigon/cmd/diag/ui" "github.com/ledgerwatch/erigon/cmd/snapshots/sync" "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/params" @@ -34,7 +33,6 @@ func main() { &downloader.Command, &stages.Command, &db.Command, - &ui.Command, } app.Flags = []cli.Flag{} diff --git a/cmd/diag/stages/stages.go b/cmd/diag/stages/stages.go index efbf9d39f91..9addc0ff585 100644 --- a/cmd/diag/stages/stages.go +++ b/cmd/diag/stages/stages.go @@ -32,7 +32,7 @@ var Command = cli.Command{ func printCurentStage(cliCtx *cli.Context) error { var data diagnostics.SyncStatistics - url := "http://" + cliCtx.String(flags.DebugURLFlag.Name) + flags.ApiPath + "/snapshot-sync" + url := "http://" + cliCtx.String(flags.DebugURLFlag.Name) + "/debug/diag/snapshot-sync" err := util.MakeHttpGetCall(cliCtx.Context, url, &data) if err != nil { diff --git a/cmd/diag/ui/ui.go b/cmd/diag/ui/ui.go deleted file mode 100644 index 1620747b5d9..00000000000 --- a/cmd/diag/ui/ui.go +++ /dev/null @@ -1,137 +0,0 @@ -package ui - -import ( - "encoding/json" - "fmt" - "log" - "net/http" - "sync" - "time" - - "github.com/ledgerwatch/erigonwatch" - - "github.com/go-chi/chi/v5" - "github.com/go-chi/chi/v5/middleware" - "github.com/go-chi/cors" - "github.com/jedib0t/go-pretty/v6/text" - "github.com/ledgerwatch/erigon/cmd/diag/flags" - "github.com/urfave/cli/v2" -) - -var ( - UIURLFlag = cli.StringFlag{ - Name: "ui.addr", - Usage: "URL to serve UI web application", - Required: false, - Value: "127.0.0.1:6060", - } -) - -var Command = cli.Command{ - Name: "ui", - Action: runUI, - Aliases: []string{"u"}, - Usage: "run local ui", - ArgsUsage: "", - Flags: []cli.Flag{ - &flags.DebugURLFlag, - &UIURLFlag, - }, - Description: ``, -} - -func runUI(cli *cli.Context) error { - supportedSubpaths := []string{ - "sentry-network", - "sentinel-network", - "downloader", - "logs", - "chain", - "data", - "debug", - "testing", - "performance", - "documentation", - "issues", - "admin", - } - - listenUrl := cli.String(UIURLFlag.Name) - - assets, _ := erigonwatch.UIFiles() - fs := http.FileServer(http.FS(assets)) - - r := chi.NewRouter() - r.Use(middleware.Logger) - r.Use(middleware.Recoverer) - r.Use(middleware.RouteHeaders(). - Route("Origin", "*", cors.Handler(cors.Options{ - AllowedOrigins: []string{"*"}, - AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"}, - AllowedHeaders: []string{"Accept", "Content-Type", "session-id"}, - AllowCredentials: false, // <----------<<< do not allow credentials - })). - Handler) - - r.Mount("/", fs) - - for _, subpath := range supportedSubpaths { - addhandler(r, "/"+subpath, fs) - } - - // Use the file system to serve static files - url := "http://" + cli.String(flags.DebugURLFlag.Name) - addr := DiagAddress{ - Address: url, - } - - //r.Get("/diagaddr", writeDiagAdderss(addr)) - r.Handle("/data", http.StripPrefix("/data", fs)) - - r.HandleFunc("/diagaddr", func(w http.ResponseWriter, r *http.Request) { - writeDiagAdderss(w, addr) - }) - - srv := &http.Server{ - Addr: listenUrl, - Handler: r, - MaxHeaderBytes: 1 << 20, - ReadHeaderTimeout: 1 * time.Minute, - } - - var wg sync.WaitGroup - wg.Add(1) - - go func() { - defer wg.Done() // Signal that the goroutine has completed - err := srv.ListenAndServe() - - if err != nil { - log.Fatal(err) - } - }() - - uiUrl := fmt.Sprintf("http://%s", listenUrl) - fmt.Println(text.Hyperlink(uiUrl, fmt.Sprintf("UI running on %s", uiUrl))) - - wg.Wait() // Wait for the server goroutine to finish - return nil -} - -func addhandler(r *chi.Mux, path string, handler http.Handler) { - r.Handle(path, http.StripPrefix(path, handler)) -} - -type DiagAddress struct { - Address string `json:"address"` -} - -func writeDiagAdderss(w http.ResponseWriter, addr DiagAddress) { - w.Header().Set("Access-Control-Allow-Origin", "*") - w.Header().Set("Content-Type", "application/json") - - if err := json.NewEncoder(w).Encode(addr); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } - -} diff --git a/cmd/diag/util/util.go b/cmd/diag/util/util.go index 277f7259886..f6c9e6184e2 100644 --- a/cmd/diag/util/util.go +++ b/cmd/diag/util/util.go @@ -6,11 +6,7 @@ import ( "fmt" "io" "net/http" - "os" "time" - - "github.com/jedib0t/go-pretty/v6/table" - "github.com/jedib0t/go-pretty/v6/text" ) func MakeHttpGetCall(ctx context.Context, url string, data interface{}) error { @@ -53,44 +49,3 @@ func CalculateTime(amountLeft, rate uint64) string { return fmt.Sprintf("%dhrs:%dm", hours, minutes) } - -func RenderJson(data interface{}) { - bytes, err := json.Marshal(data) - - if err == nil { - fmt.Println(string(bytes)) - fmt.Print("\n") - } -} - -func RenderTableWithHeader(title string, header table.Row, rows []table.Row) { - if title != "" { - txt := text.Colors{text.FgBlue, text.Bold} - fmt.Println(txt.Sprint(title)) - - if len(rows) == 0 { - txt := text.Colors{text.FgRed, text.Bold} - fmt.Println(txt.Sprint("No data to show")) - } - } - - if len(rows) > 0 { - t := table.NewWriter() - t.SetOutputMirror(os.Stdout) - - t.AppendHeader(header) - if len(rows) > 0 { - t.AppendRows(rows) - } - - t.AppendSeparator() - t.Render() - } - - fmt.Print("\n") -} - -func RenderUseDiagUI() { - txt := text.Colors{text.BgGreen, text.Bold} - fmt.Println(txt.Sprint("To get detailed info about Erigon node state use 'diag ui' command.")) -} diff --git a/cmd/erigon/main.go b/cmd/erigon/main.go index e6196d0932f..60c8eb9edb5 100644 --- a/cmd/erigon/main.go +++ b/cmd/erigon/main.go @@ -12,6 +12,7 @@ import ( "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/metrics" "github.com/ledgerwatch/erigon/diagnostics" + "github.com/ledgerwatch/erigon/eth/tracers" "github.com/ledgerwatch/erigon/params" erigonapp "github.com/ledgerwatch/erigon/turbo/app" erigoncli "github.com/ledgerwatch/erigon/turbo/cli" @@ -42,11 +43,12 @@ func main() { func runErigon(cliCtx *cli.Context) error { var logger log.Logger + var tracer *tracers.Tracer var err error var metricsMux *http.ServeMux var pprofMux *http.ServeMux - if logger, metricsMux, pprofMux, err = debug.Setup(cliCtx, true /* rootLogger */); err != nil { + if logger, tracer, metricsMux, pprofMux, err = debug.Setup(cliCtx, true /* rootLogger */); err != nil { return err } @@ -63,7 +65,7 @@ func runErigon(cliCtx *cli.Context) error { ethCfg := node.NewEthConfigUrfave(cliCtx, nodeCfg, logger) - ethNode, err := node.New(cliCtx.Context, nodeCfg, ethCfg, logger) + ethNode, err := node.New(cliCtx.Context, nodeCfg, ethCfg, logger, tracer) if err != nil { log.Error("Erigon startup", "err", err) return err diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index f7b0bcb56f7..daff8d49ef1 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -24,10 +24,13 @@ import ( "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" + "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/consensus/ethash" "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/turbo/rpchelper" ) @@ -60,7 +63,6 @@ type stEnv struct { UncleHash libcommon.Hash `json:"uncleHash,omitempty"` Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` WithdrawalsHash *libcommon.Hash `json:"withdrawalsRoot,omitempty"` - Requests []*types.Request `json:"requests,omitempty"` } type stEnvMarshaling struct { @@ -77,13 +79,14 @@ type stEnvMarshaling struct { func MakePreState(chainRules *chain.Rules, tx kv.RwTx, accounts types.GenesisAlloc) (state.StateReader, *state.PlainStateWriter) { var blockNr uint64 = 0 - stateReader, stateWriter := rpchelper.NewLatestStateReader(tx), state.NewPlainStateWriter(tx, tx, blockNr) + histV3, _ := kvcfg.HistoryV3.Enabled(tx) + stateReader, stateWriter := rpchelper.NewLatestStateReader(tx, histV3), state.NewPlainStateWriter(tx, tx, blockNr) statedb := state.New(stateReader) //ibs for addr, a := range accounts { statedb.SetCode(addr, a.Code) statedb.SetNonce(addr, a.Nonce) balance, _ := uint256.FromBig(a.Balance) - statedb.SetBalance(addr, balance) + statedb.SetBalance(addr, balance, tracing.BalanceIncreaseGenesisBalance) for k, v := range a.Storage { key := k val := uint256.NewInt(0).SetBytes(v.Bytes()) diff --git a/cmd/evm/internal/t8ntool/gen_stenv.go b/cmd/evm/internal/t8ntool/gen_stenv.go index 4a854e2ee59..7f08b6a3735 100644 --- a/cmd/evm/internal/t8ntool/gen_stenv.go +++ b/cmd/evm/internal/t8ntool/gen_stenv.go @@ -7,8 +7,9 @@ import ( "errors" "math/big" - "github.com/ledgerwatch/erigon-lib/common" - common0 "github.com/ledgerwatch/erigon/common" + libcommon "github.com/ledgerwatch/erigon-lib/common" + + "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/core/types" ) @@ -18,29 +19,25 @@ var _ = (*stEnvMarshaling)(nil) // MarshalJSON marshals as JSON. func (s stEnv) MarshalJSON() ([]byte, error) { type stEnv struct { - Coinbase common0.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` - Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"` - Random *math.HexOrDecimal256 `json:"currentRandom"` - MixDigest common.Hash `json:"mixHash,omitempty"` - ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` - GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` - Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` - Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` - ParentTimestamp math.HexOrDecimal64 `json:"parentTimestamp,omitempty"` - BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` - Ommers []ommer `json:"ommers,omitempty"` - BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` - ParentUncleHash common.Hash `json:"parentUncleHash"` - UncleHash common.Hash `json:"uncleHash,omitempty"` - Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` - WithdrawalsHash *common.Hash `json:"withdrawalsRoot,omitempty"` - Requests []*types.Request `json:"requests,omitempty"` + Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"` + Random *math.HexOrDecimal256 `json:"currentRandom"` + ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` + GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` + Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` + Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` + ParentTimestamp math.HexOrDecimal64 `json:"parentTimestamp,omitempty"` + BlockHashes map[math.HexOrDecimal64]libcommon.Hash `json:"blockHashes,omitempty"` + Ommers []ommer `json:"ommers,omitempty"` + BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` + ParentUncleHash libcommon.Hash `json:"parentUncleHash"` + UncleHash libcommon.Hash `json:"uncleHash,omitempty"` + Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` } var enc stEnv - enc.Coinbase = common0.UnprefixedAddress(s.Coinbase) + enc.Coinbase = common.UnprefixedAddress(s.Coinbase) enc.Difficulty = (*math.HexOrDecimal256)(s.Difficulty) enc.Random = (*math.HexOrDecimal256)(s.Random) - enc.MixDigest = s.MixDigest enc.ParentDifficulty = (*math.HexOrDecimal256)(s.ParentDifficulty) enc.GasLimit = math.HexOrDecimal64(s.GasLimit) enc.Number = math.HexOrDecimal64(s.Number) @@ -52,31 +49,26 @@ func (s stEnv) MarshalJSON() ([]byte, error) { enc.ParentUncleHash = s.ParentUncleHash enc.UncleHash = s.UncleHash enc.Withdrawals = s.Withdrawals - enc.WithdrawalsHash = s.WithdrawalsHash - enc.Requests = s.Requests return json.Marshal(&enc) } // UnmarshalJSON unmarshals from JSON. func (s *stEnv) UnmarshalJSON(input []byte) error { type stEnv struct { - Coinbase *common0.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` - Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"` - Random *math.HexOrDecimal256 `json:"currentRandom"` - MixDigest *common.Hash `json:"mixHash,omitempty"` - ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` - GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` - Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` - Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` - ParentTimestamp *math.HexOrDecimal64 `json:"parentTimestamp,omitempty"` - BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` - Ommers []ommer `json:"ommers,omitempty"` - BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` - ParentUncleHash *common.Hash `json:"parentUncleHash"` - UncleHash *common.Hash `json:"uncleHash,omitempty"` - Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` - WithdrawalsHash *common.Hash `json:"withdrawalsRoot,omitempty"` - Requests []*types.Request `json:"requests,omitempty"` + Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"` + Random *math.HexOrDecimal256 `json:"currentRandom"` + ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` + GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` + Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` + Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` + ParentTimestamp *math.HexOrDecimal64 `json:"parentTimestamp,omitempty"` + BlockHashes map[math.HexOrDecimal64]libcommon.Hash `json:"blockHashes,omitempty"` + Ommers []ommer `json:"ommers,omitempty"` + BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` + ParentUncleHash *libcommon.Hash `json:"parentUncleHash"` + UncleHash libcommon.Hash `json:"uncleHash,omitempty"` + Withdrawals []*types.Withdrawal `json:"withdrawals,omitempty"` } var dec stEnv if err := json.Unmarshal(input, &dec); err != nil { @@ -85,16 +77,13 @@ func (s *stEnv) UnmarshalJSON(input []byte) error { if dec.Coinbase == nil { return errors.New("missing required field 'currentCoinbase' for stEnv") } - s.Coinbase = common.Address(*dec.Coinbase) + s.Coinbase = libcommon.Address(*dec.Coinbase) if dec.Difficulty != nil { s.Difficulty = (*big.Int)(dec.Difficulty) } if dec.Random != nil { s.Random = (*big.Int)(dec.Random) } - if dec.MixDigest != nil { - s.MixDigest = *dec.MixDigest - } if dec.ParentDifficulty != nil { s.ParentDifficulty = (*big.Int)(dec.ParentDifficulty) } @@ -125,17 +114,10 @@ func (s *stEnv) UnmarshalJSON(input []byte) error { if dec.ParentUncleHash != nil { s.ParentUncleHash = *dec.ParentUncleHash } - if dec.UncleHash != nil { - s.UncleHash = *dec.UncleHash - } + s.UncleHash = dec.UncleHash if dec.Withdrawals != nil { s.Withdrawals = dec.Withdrawals } - if dec.WithdrawalsHash != nil { - s.WithdrawalsHash = dec.WithdrawalsHash - } - if dec.Requests != nil { - s.Requests = dec.Requests - } + return nil } diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index 200448aecb4..792f1be0db5 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -30,6 +30,7 @@ import ( "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/kv/temporal/temporaltest" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/eth/consensuschain" "github.com/ledgerwatch/log/v3" "github.com/urfave/cli/v2" @@ -39,6 +40,7 @@ import ( "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/consensus/ethash" "github.com/ledgerwatch/erigon/consensus/merge" @@ -99,7 +101,7 @@ func Main(ctx *cli.Context) error { err error baseDir = "" ) - var getTracer func(txIndex int, txHash libcommon.Hash) (vm.EVMLogger, error) + var getTracer func(txIndex int, txHash libcommon.Hash) (*tracing.Hooks, error) // If user specified a basedir, make sure it exists if ctx.IsSet(OutputBasedir.Name) { @@ -126,7 +128,7 @@ func Main(ctx *cli.Context) error { prevFile.Close() } }() - getTracer = func(txIndex int, txHash libcommon.Hash) (vm.EVMLogger, error) { + getTracer = func(txIndex int, txHash libcommon.Hash) (*tracing.Hooks, error) { if prevFile != nil { prevFile.Close() } @@ -135,10 +137,10 @@ func Main(ctx *cli.Context) error { return nil, NewError(ErrorIO, fmt.Errorf("failed creating trace-file: %v", err2)) } prevFile = traceFile - return trace_logger.NewJSONLogger(logConfig, traceFile), nil + return trace_logger.NewJSONLogger(logConfig, traceFile).Hooks, nil } } else { - getTracer = func(txIndex int, txHash libcommon.Hash) (tracer vm.EVMLogger, err error) { + getTracer = func(txIndex int, txHash libcommon.Hash) (tracer *tracing.Hooks, err error) { return nil, nil } } @@ -278,7 +280,7 @@ func Main(ctx *cli.Context) error { ommerN.SetUint64(header.Number.Uint64() - ommer.Delta) ommerHeaders[i] = &types.Header{Coinbase: ommer.Address, Number: &ommerN} } - block := types.NewBlock(header, txs, ommerHeaders, nil /* receipts */, prestate.Env.Withdrawals, prestate.Env.Requests) + block := types.NewBlock(header, txs, ommerHeaders, nil /* receipts */, prestate.Env.Withdrawals) var hashError error getHash := func(num uint64) libcommon.Hash { @@ -293,7 +295,7 @@ func Main(ctx *cli.Context) error { return h } - db, _ := temporaltest.NewTestDB(nil, datadir.New("")) + _, db, _ := temporaltest.NewTestDB(nil, datadir.New("")) defer db.Close() tx, err := db.BeginRw(context.Background()) @@ -330,7 +332,11 @@ func Main(ctx *cli.Context) error { body, _ := rlp.EncodeToBytes(txs) collector := make(Alloc) - dumper := state.NewDumper(tx, prestate.Env.Number, true) + historyV3, err := kvcfg.HistoryV3.Enabled(tx) + if err != nil { + return err + } + dumper := state.NewDumper(tx, prestate.Env.Number, historyV3) dumper.DumpToCollector(collector, false, false, libcommon.Address{}, 0) return dispatchOutput(ctx, baseDir, result, collector, body) } diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go index 86e9659adc1..2889e13d260 100644 --- a/cmd/evm/runner.go +++ b/cmd/evm/runner.go @@ -34,6 +34,7 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" common2 "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/common/hexutility" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon/cmd/utils/flags" "github.com/ledgerwatch/erigon/core/types" @@ -46,6 +47,7 @@ import ( "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/core/vm/runtime" + "github.com/ledgerwatch/erigon/eth/tracers" "github.com/ledgerwatch/erigon/eth/tracers/logger" "github.com/ledgerwatch/erigon/params" ) @@ -133,7 +135,7 @@ func runCmd(ctx *cli.Context) error { } var ( - tracer vm.EVMLogger + tracer *tracers.Tracer debugLogger *logger.StructLogger statedb *state.IntraBlockState chainConfig *chain.Config @@ -145,7 +147,7 @@ func runCmd(ctx *cli.Context) error { tracer = logger.NewJSONLogger(logconfig, os.Stdout) } else if ctx.Bool(DebugFlag.Name) { debugLogger = logger.NewStructLogger(logconfig) - tracer = debugLogger + tracer = debugLogger.Tracer() } else { debugLogger = logger.NewStructLogger(logconfig) } @@ -153,7 +155,7 @@ func runCmd(ctx *cli.Context) error { defer db.Close() if ctx.String(GenesisFlag.Name) != "" { gen := readGenesis(ctx.String(GenesisFlag.Name)) - core.MustCommitGenesis(gen, db, "", log.Root()) + core.MustCommitGenesis(gen, db, "", log.Root(), nil) genesisConfig = gen chainConfig = gen.Config } else { @@ -236,7 +238,7 @@ func runCmd(ctx *cli.Context) error { Coinbase: genesisConfig.Coinbase, BlockNumber: new(big.Int).SetUint64(genesisConfig.Number), EVMConfig: vm.Config{ - Tracer: tracer, + Tracer: tracer.Hooks, Debug: ctx.Bool(DebugFlag.Name) || ctx.Bool(MachineFlag.Name), }, } @@ -300,7 +302,11 @@ func runCmd(ctx *cli.Context) error { fmt.Println("Could not commit state: ", err) os.Exit(1) } - fmt.Println(string(state.NewDumper(tx, 0, true).DefaultDump())) + historyV3, err := kvcfg.HistoryV3.Enabled(tx) + if err != nil { + return err + } + fmt.Println(string(state.NewDumper(tx, 0, historyV3).DefaultDump())) } if memProfilePath := ctx.String(MemProfileFlag.Name); memProfilePath != "" { diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go index 67cabd4c0b2..61d2d01c012 100644 --- a/cmd/evm/staterunner.go +++ b/cmd/evm/staterunner.go @@ -27,11 +27,7 @@ import ( "github.com/c2h5oh/datasize" mdbx2 "github.com/erigontech/mdbx-go/mdbx" libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon-lib/common/datadir" - "github.com/ledgerwatch/erigon-lib/config3" "github.com/ledgerwatch/erigon-lib/kv/mdbx" - "github.com/ledgerwatch/erigon-lib/kv/temporal" - libstate "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/log/v3" "github.com/urfave/cli/v2" @@ -79,9 +75,9 @@ func stateTestCmd(ctx *cli.Context) error { Debug: ctx.Bool(DebugFlag.Name) || ctx.Bool(MachineFlag.Name), } if machineFriendlyOutput { - cfg.Tracer = logger.NewJSONLogger(config, os.Stderr) + cfg.Tracer = logger.NewJSONLogger(config, os.Stderr).Hooks } else if ctx.Bool(DebugFlag.Name) { - cfg.Tracer = logger.NewStructLogger(config) + cfg.Tracer = logger.NewStructLogger(config).Hooks() } if len(ctx.Args().First()) != 0 { @@ -126,29 +122,16 @@ func runStateTest(fname string, cfg vm.Config, jsonOut bool) error { func aggregateResultsFromStateTests( stateTests map[string]tests.StateTest, cfg vm.Config, jsonOut bool) ([]StatetestResult, error) { - dirs := datadir.New(filepath.Join(os.TempDir(), "erigon-statetest")) //this DB is shared. means: // - faster sequential tests: don't need create/delete db // - less parallelism: multiple processes can open same DB but only 1 can create rw-transaction (other will wait when 1-st finish) - _db := mdbx.NewMDBX(log.New()). - Path(dirs.Chaindata). + db := mdbx.NewMDBX(log.New()). + Path(filepath.Join(os.TempDir(), "erigon-statetest")). Flags(func(u uint) uint { - return u | mdbx2.UtterlyNoSync | mdbx2.NoMetaSync | mdbx2.NoMemInit | mdbx2.WriteMap + return u | mdbx2.UtterlyNoSync | mdbx2.NoMetaSync | mdbx2.LifoReclaim | mdbx2.NoMemInit }). GrowthStep(1 * datasize.MB). MustOpen() - defer _db.Close() - - agg, err := libstate.NewAggregator(context.Background(), dirs, config3.HistoryV3AggregationStep, _db, log.New()) - if err != nil { - return nil, err - } - defer agg.Close() - - db, err := temporal.New(_db, agg) - if err != nil { - return nil, err - } defer db.Close() tx, txErr := db.BeginRw(context.Background()) diff --git a/cmd/hack/hack.go b/cmd/hack/hack.go index 80fbf94dac5..44c84d87161 100644 --- a/cmd/hack/hack.go +++ b/cmd/hack/hack.go @@ -29,6 +29,7 @@ import ( "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon-lib/kv/temporal/historyv2" "github.com/ledgerwatch/erigon-lib/recsplit" @@ -131,8 +132,15 @@ func printCurrentBlockNumber(chaindata string) { } func blocksIO(db kv.RoDB) (services.FullBlockReader, *blockio.BlockWriter) { + var histV3 bool + if err := db.View(context.Background(), func(tx kv.Tx) error { + histV3, _ = kvcfg.HistoryV3.Enabled(tx) + return nil + }); err != nil { + panic(err) + } br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, "", 0, log.New()), nil /* BorSnapshots */) - bw := blockio.NewBlockWriter() + bw := blockio.NewBlockWriter(histV3) return br, bw } diff --git a/cmd/hack/tool/fromdb/tool.go b/cmd/hack/tool/fromdb/tool.go index 01852ee79f0..8bcff3561ca 100644 --- a/cmd/hack/tool/fromdb/tool.go +++ b/cmd/hack/tool/fromdb/tool.go @@ -5,6 +5,7 @@ import ( "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon/cmd/hack/tool" "github.com/ledgerwatch/erigon/ethdb/prune" ) @@ -34,3 +35,16 @@ func PruneMode(db kv.RoDB) (pm prune.Mode) { } return } +func HistV3(db kv.RoDB) (enabled bool) { + if err := db.View(context.Background(), func(tx kv.Tx) error { + var err error + enabled, err = kvcfg.HistoryV3.Enabled(tx) + if err != nil { + return err + } + return nil + }); err != nil { + panic(err) + } + return +} diff --git a/cmd/integration/commands/flags.go b/cmd/integration/commands/flags.go index c1ace6d7387..db19c885865 100644 --- a/cmd/integration/commands/flags.go +++ b/cmd/integration/commands/flags.go @@ -10,30 +10,30 @@ import ( ) var ( - chaindata string - databaseVerbosity int - referenceChaindata string - block, pruneTo, unwind uint64 - unwindEvery uint64 - batchSizeStr string - reset, warmup, noCommit bool - resetPruneAt bool - bucket string - datadirCli, toChaindata string - migration string - squeezeCommitmentFiles bool - integrityFast, integritySlow bool - file string - HeimdallURL string - txtrace bool // Whether to trace the execution (should only be used together with `block`) - pruneFlag string - pruneB, pruneH, pruneR, pruneT, pruneC uint64 - pruneBBefore, pruneHBefore, pruneRBefore uint64 - pruneTBefore, pruneCBefore uint64 - experiments []string - unwindTypes []string - chain string // Which chain to use (mainnet, goerli, sepolia, etc.) - outputCsvFile string + chaindata string + databaseVerbosity int + referenceChaindata string + block, pruneTo, unwind uint64 + unwindEvery uint64 + batchSizeStr string + reset, warmup, noCommit bool + resetPruneAt bool + bucket string + datadirCli, toChaindata string + migration string + squeezeCommitmentFiles bool + integrityFast, integritySlow bool + file string + HeimdallURL string + txtrace bool // Whether to trace the execution (should only be used together with `block`) + pruneFlag string + pruneH, pruneR, pruneT, pruneC uint64 + pruneHBefore, pruneRBefore uint64 + pruneTBefore, pruneCBefore uint64 + experiments []string + unwindTypes []string + chain string // Which chain to use (mainnet, goerli, sepolia, etc.) + outputCsvFile string commitmentMode string commitmentTrie string diff --git a/cmd/integration/commands/reset_state.go b/cmd/integration/commands/reset_state.go index de6486b8cde..35ece38fe28 100644 --- a/cmd/integration/commands/reset_state.go +++ b/cmd/integration/commands/reset_state.go @@ -13,6 +13,7 @@ import ( "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" @@ -122,10 +123,14 @@ func printStages(tx kv.Tx, snapshots *freezeblocks.RoSnapshots, borSn *freezeblo fmt.Fprintf(w, "prune distance: %s\n\n", pm.String()) fmt.Fprintf(w, "blocks.v2: %t, segments=%d, indices=%d\n", snapshots.Cfg().Enabled, snapshots.SegmentsMax(), snapshots.IndicesMax()) fmt.Fprintf(w, "blocks.bor.v2: segments=%d, indices=%d\n\n", borSn.SegmentsMax(), borSn.IndicesMax()) + h3, err := kvcfg.HistoryV3.Enabled(tx) + if err != nil { + return err + } _, lastBlockInHistSnap, _ := rawdbv3.TxNums.FindBlockNum(tx, agg.EndTxNumMinimax()) _lb, _lt, _ := rawdbv3.TxNums.Last(tx) - fmt.Fprintf(w, "state.history: idx steps: %.02f, lastBlockInSnap=%d, TxNums_Index(%d,%d), filesAmount: %d\n\n", rawdbhelpers.IdxStepsCountV3(tx), lastBlockInHistSnap, _lb, _lt, agg.FilesAmount()) + fmt.Fprintf(w, "history.v3: %t, idx steps: %.02f, lastBlockInSnap=%d, TxNums_Index(%d,%d), filesAmount: %d\n\n", h3, rawdbhelpers.IdxStepsCountV3(tx), lastBlockInHistSnap, _lb, _lt, agg.FilesAmount()) s1, err := tx.ReadSequence(kv.EthTx) if err != nil { return err diff --git a/cmd/integration/commands/root.go b/cmd/integration/commands/root.go index 5432ba3ea92..fcb9a932357 100644 --- a/cmd/integration/commands/root.go +++ b/cmd/integration/commands/root.go @@ -13,6 +13,7 @@ import ( "golang.org/x/sync/semaphore" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon/cmd/utils" @@ -91,12 +92,25 @@ func openDB(opts kv2.MdbxOpts, applyMigrations bool, logger log.Logger) (kv.RwDB } if opts.GetLabel() == kv.ChainDB { - _, _, agg := allSnapshots(context.Background(), db, logger) - tdb, err := temporal.New(db, agg) - if err != nil { + var h3 bool + var err error + if err := db.View(context.Background(), func(tx kv.Tx) error { + h3, err = kvcfg.HistoryV3.Enabled(tx) + if err != nil { + return err + } + return nil + }); err != nil { return nil, err } - db = tdb + if h3 { + _, _, agg := allSnapshots(context.Background(), db, logger) + tdb, err := temporal.New(db, agg) + if err != nil { + return nil, err + } + db = tdb + } } return db, nil diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index 925849f2ea0..55d81e8f565 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -27,6 +27,7 @@ import ( "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/config3" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" libstate "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon-lib/wrap" @@ -565,6 +566,28 @@ var cmdSetSnap = &cobra.Command{ }, } +var cmdForceSetHistoryV3 = &cobra.Command{ + Use: "force_set_history_v3", + Short: "Override existing --history.v3 flag value (if you know what you are doing)", + Run: func(cmd *cobra.Command, args []string) { + logger := debug.SetupCobra(cmd, "integration") + db, err := openDB(dbCfg(kv.ChainDB, chaindata), true, logger) + if err != nil { + logger.Error("Opening DB", "error", err) + return + } + defer db.Close() + if err := db.Update(context.Background(), func(tx kv.RwTx) error { + return kvcfg.HistoryV3.ForceWrite(tx, _forceSetHistoryV3) + }); err != nil { + if !errors.Is(err, context.Canceled) { + logger.Error(err.Error()) + } + return + } + }, +} + func init() { withConfig(cmdPrintStages) withDataDir(cmdPrintStages) @@ -743,16 +766,20 @@ func init() { must(cmdSetSnap.MarkFlagRequired("snapshots")) rootCmd.AddCommand(cmdSetSnap) + withConfig(cmdForceSetHistoryV3) + withDataDir2(cmdForceSetHistoryV3) + cmdForceSetHistoryV3.Flags().BoolVar(&_forceSetHistoryV3, "history.v3", false, "") + must(cmdForceSetHistoryV3.MarkFlagRequired("history.v3")) + rootCmd.AddCommand(cmdForceSetHistoryV3) + withConfig(cmdSetPrune) withDataDir(cmdSetPrune) withChain(cmdSetPrune) cmdSetPrune.Flags().StringVar(&pruneFlag, "prune", "hrtc", "") - cmdSetPrune.Flags().Uint64Var(&pruneB, "prune.b.older", 0, "") cmdSetPrune.Flags().Uint64Var(&pruneH, "prune.h.older", 0, "") cmdSetPrune.Flags().Uint64Var(&pruneR, "prune.r.older", 0, "") cmdSetPrune.Flags().Uint64Var(&pruneT, "prune.t.older", 0, "") cmdSetPrune.Flags().Uint64Var(&pruneC, "prune.c.older", 0, "") - cmdSetPrune.Flags().Uint64Var(&pruneBBefore, "prune.b.before", 0, "") cmdSetPrune.Flags().Uint64Var(&pruneHBefore, "prune.h.before", 0, "") cmdSetPrune.Flags().Uint64Var(&pruneRBefore, "prune.r.before", 0, "") cmdSetPrune.Flags().Uint64Var(&pruneTBefore, "prune.t.before", 0, "") @@ -770,7 +797,7 @@ func stageSnapshots(db kv.RwDB, ctx context.Context, logger log.Logger) error { br, bw := blocksIO(db, logger) _, _, _, _, _ = newSync(ctx, db, nil /* miningConfig */, logger) - chainConfig, _ := fromdb.ChainConfig(db), fromdb.PruneMode(db) + chainConfig, _, _ := fromdb.ChainConfig(db), kvcfg.HistoryV3.FromDB(db), fromdb.PruneMode(db) return db.Update(ctx, func(tx kv.RwTx) error { if reset { @@ -825,7 +852,7 @@ func stageHeaders(db kv.RwDB, ctx context.Context, logger log.Logger) error { defer agg.Close() br, bw := blocksIO(db, logger) _, _, _, _, _ = newSync(ctx, db, nil /* miningConfig */, logger) - chainConfig, _ := fromdb.ChainConfig(db), fromdb.PruneMode(db) + chainConfig, _, _ := fromdb.ChainConfig(db), kvcfg.HistoryV3.FromDB(db), fromdb.PruneMode(db) if integritySlow { if err := db.View(ctx, func(tx kv.Tx) error { @@ -991,7 +1018,7 @@ func stageBodies(db kv.RwDB, ctx context.Context, logger log.Logger) error { defer sn.Close() defer borSn.Close() defer agg.Close() - chainConfig := fromdb.ChainConfig(db) + chainConfig, historyV3 := fromdb.ChainConfig(db), kvcfg.HistoryV3.FromDB(db) _, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger) br, bw := blocksIO(db, logger) @@ -1004,7 +1031,7 @@ func stageBodies(db kv.RwDB, ctx context.Context, logger log.Logger) error { } u := sync.NewUnwindState(stages.Bodies, s.BlockNumber-unwind, s.BlockNumber) - cfg := stagedsync.StageBodiesCfg(db, nil, nil, nil, nil, 0, *chainConfig, br, bw, nil) + cfg := stagedsync.StageBodiesCfg(db, nil, nil, nil, nil, 0, *chainConfig, br, historyV3, bw, nil) if err := stagedsync.UnwindBodiesStage(u, tx, cfg, ctx); err != nil { return err } @@ -1151,7 +1178,7 @@ func stageExec(db kv.RwDB, ctx context.Context, logger log.Logger) error { s := stage(sync, nil, db, stages.Execution) logger.Info("Stage", "name", s.ID, "progress", s.BlockNumber) - chainConfig, pm := fromdb.ChainConfig(db), fromdb.PruneMode(db) + chainConfig, historyV3, pm := fromdb.ChainConfig(db), kvcfg.HistoryV3.FromDB(db), fromdb.PruneMode(db) if pruneTo > 0 { pm.History = prune.Distance(s.BlockNumber - pruneTo) pm.Receipts = prune.Distance(s.BlockNumber - pruneTo) @@ -1167,11 +1194,11 @@ func stageExec(db kv.RwDB, ctx context.Context, logger log.Logger) error { br, _ := blocksIO(db, logger) cfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, nil, chainConfig, engine, vmConfig, nil, /*stateStream=*/ false, - /*badBlockHalt=*/ true, dirs, br, nil, genesis, syncCfg, agg, nil) + /*badBlockHalt=*/ true, historyV3, dirs, br, nil, genesis, syncCfg, agg, nil) - if unwind > 0 { + if unwind > 0 && historyV3 { if err := db.View(ctx, func(tx kv.Tx) error { - blockNumWithCommitment, ok, err := tx.(libstate.HasAggTx).AggTx().(*libstate.AggregatorRoTx).CanUnwindBeforeBlockNum(s.BlockNumber-unwind, tx) + blockNumWithCommitment, ok, err := tx.(libstate.HasAggCtx).AggCtx().(*libstate.AggregatorRoTx).CanUnwindBeforeBlockNum(s.BlockNumber-unwind, tx) if err != nil { return err } @@ -1260,7 +1287,7 @@ func stageCustomTrace(db kv.RwDB, ctx context.Context, logger log.Logger) error s := stage(sync, nil, db, stages.CustomTrace) logger.Info("Stage", "name", s.ID, "progress", s.BlockNumber) - chainConfig, pm := fromdb.ChainConfig(db), fromdb.PruneMode(db) + chainConfig, historyV3, pm := fromdb.ChainConfig(db), kvcfg.HistoryV3.FromDB(db), fromdb.PruneMode(db) if pruneTo > 0 { pm.History = prune.Distance(s.BlockNumber - pruneTo) pm.Receipts = prune.Distance(s.BlockNumber - pruneTo) @@ -1276,9 +1303,9 @@ func stageCustomTrace(db kv.RwDB, ctx context.Context, logger log.Logger) error br, _ := blocksIO(db, logger) cfg := stagedsync.StageCustomTraceCfg(db, pm, dirs, br, chainConfig, engine, genesis, &syncCfg) - if unwind > 0 { + if unwind > 0 && historyV3 { if err := db.View(ctx, func(tx kv.Tx) error { - blockNumWithCommitment, ok, err := tx.(libstate.HasAggTx).AggTx().(*libstate.AggregatorRoTx).CanUnwindBeforeBlockNum(s.BlockNumber-unwind, tx) + blockNumWithCommitment, ok, err := tx.(libstate.HasAggCtx).AggCtx().(*libstate.AggregatorRoTx).CanUnwindBeforeBlockNum(s.BlockNumber-unwind, tx) if err != nil { return err } @@ -1333,7 +1360,7 @@ func stageCustomTrace(db kv.RwDB, ctx context.Context, logger log.Logger) error } func stageTrie(db kv.RwDB, ctx context.Context, logger log.Logger) error { - dirs, pm := datadir.New(datadirCli), fromdb.PruneMode(db) + dirs, pm, historyV3 := datadir.New(datadirCli), fromdb.PruneMode(db), kvcfg.HistoryV3.FromDB(db) sn, borSn, agg := allSnapshots(ctx, db, logger) defer sn.Close() defer borSn.Close() @@ -1366,7 +1393,6 @@ func stageTrie(db kv.RwDB, ctx context.Context, logger log.Logger) error { logger.Info("StageExec", "progress", execStage.BlockNumber) logger.Info("StageTrie", "progress", s.BlockNumber) br, _ := blocksIO(db, logger) - historyV3 := true cfg := stagedsync.StageTrieCfg(db, true /* checkRoot */, true /* saveHashesToDb */, false /* badBlockHalt */, dirs.Tmp, br, nil /* hd */, historyV3, agg) if unwind > 0 { u := sync.NewUnwindState(stages.IntermediateHashes, s.BlockNumber-unwind, s.BlockNumber) @@ -1392,7 +1418,7 @@ func stageTrie(db kv.RwDB, ctx context.Context, logger log.Logger) error { } func stagePatriciaTrie(db kv.RwDB, ctx context.Context, logger log.Logger) error { - dirs, pm := datadir.New(datadirCli), fromdb.PruneMode(db) + dirs, pm, historyV3 := datadir.New(datadirCli), fromdb.PruneMode(db), kvcfg.HistoryV3.FromDB(db) _ = pm sn, _, agg := allSnapshots(ctx, db, logger) defer sn.Close() @@ -1411,8 +1437,11 @@ func stagePatriciaTrie(db kv.RwDB, ctx context.Context, logger log.Logger) error } defer tx.Rollback() + if enabled, _ := kvcfg.HistoryV3.Enabled(tx); !enabled { + panic("this method for v3 only") + } + br, _ := blocksIO(db, logger) - historyV3 := true cfg := stagedsync.StageTrieCfg(db, true /* checkRoot */, true /* saveHashesToDb */, false /* badBlockHalt */, dirs.Tmp, br, nil /* hd */, historyV3, agg) if _, err := stagedsync.RebuildPatriciaTrieBasedOnFiles(tx, cfg, ctx, logger); err != nil { @@ -1422,254 +1451,257 @@ func stagePatriciaTrie(db kv.RwDB, ctx context.Context, logger log.Logger) error } func stageHashState(db kv.RwDB, ctx context.Context, logger log.Logger) error { - return fmt.Errorf("this stage is disable in --history.v3=true") - //dirs, pm := datadir.New(datadirCli), fromdb.PruneMode(db) - //sn, borSn, agg := allSnapshots(ctx, db, logger) - //defer sn.Close() - //defer borSn.Close() - //defer agg.Close() - //_, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger) - //must(sync.SetCurrentStage(stages.HashState)) - // - //if warmup { - // return reset2.Warmup(ctx, db, log.LvlInfo, stages.HashState) - //} - //if reset { - // return reset2.Reset(ctx, db, stages.HashState) - //} - // - //tx, err := db.BeginRw(ctx) - //if err != nil { - // return err - //} - //defer tx.Rollback() - // - //s := stage(sync, tx, nil, stages.HashState) - //if pruneTo > 0 { - // pm.History = prune.Distance(s.BlockNumber - pruneTo) - // pm.Receipts = prune.Distance(s.BlockNumber - pruneTo) - // pm.CallTraces = prune.Distance(s.BlockNumber - pruneTo) - // pm.TxIndex = prune.Distance(s.BlockNumber - pruneTo) - //} - // - //logger.Info("Stage", "name", s.ID, "progress", s.BlockNumber) - // - //cfg := stagedsync.StageHashStateCfg(db, dirs, historyV3) - //if unwind > 0 { - // u := sync.NewUnwindState(stages.HashState, s.BlockNumber-unwind, s.BlockNumber) - // err = stagedsync.UnwindHashStateStage(u, s, tx, cfg, ctx, logger) - // if err != nil { - // return err - // } - //} else if pruneTo > 0 { - // p, err := sync.PruneStageState(stages.HashState, s.BlockNumber, tx, nil) - // if err != nil { - // return err - // } - // err = stagedsync.PruneHashStateStage(p, tx, cfg, ctx) - // if err != nil { - // return err - // } - //} else { - // err = stagedsync.SpawnHashStateStage(s, tx, cfg, ctx, logger) - // if err != nil { - // return err - // } - //} - //return tx.Commit() -} + dirs, pm, historyV3 := datadir.New(datadirCli), fromdb.PruneMode(db), kvcfg.HistoryV3.FromDB(db) + sn, borSn, agg := allSnapshots(ctx, db, logger) + defer sn.Close() + defer borSn.Close() + defer agg.Close() + _, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger) + must(sync.SetCurrentStage(stages.HashState)) -func stageLogIndex(db kv.RwDB, ctx context.Context, logger log.Logger) error { - return fmt.Errorf("this stage is disable in --history.v3=true") - //dirs, pm, chainConfig := datadir.New(datadirCli), fromdb.PruneMode(db), fromdb.ChainConfig(db) - //_, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger) - //must(sync.SetCurrentStage(stages.LogIndex)) - //if warmup { - // return reset2.Warmup(ctx, db, log.LvlInfo, stages.LogIndex) - //} - //if reset { - // return reset2.Reset(ctx, db, stages.LogIndex) - //} - //if resetPruneAt { - // return reset2.ResetPruneAt(ctx, db, stages.LogIndex) - //} - //tx, err := db.BeginRw(ctx) - //if err != nil { - // return err - //} - //defer tx.Rollback() - // - //execAt := progress(tx, stages.Execution) - //s := stage(sync, tx, nil, stages.LogIndex) - //if pruneTo > 0 { - // pm.History = prune.Distance(s.BlockNumber - pruneTo) - // pm.Receipts = prune.Distance(s.BlockNumber - pruneTo) - // pm.CallTraces = prune.Distance(s.BlockNumber - pruneTo) - // pm.TxIndex = prune.Distance(s.BlockNumber - pruneTo) - //} - // - //logger.Info("Stage exec", "progress", execAt) - //logger.Info("Stage", "name", s.ID, "progress", s.BlockNumber) - // - //cfg := stagedsync.StageLogIndexCfg(db, pm, dirs.Tmp, chainConfig.DepositContract) - //if unwind > 0 { - // u := sync.NewUnwindState(stages.LogIndex, s.BlockNumber-unwind, s.BlockNumber) - // err = stagedsync.UnwindLogIndex(u, s, tx, cfg, ctx) - // if err != nil { - // return err - // } - //} else if pruneTo > 0 { - // p, err := sync.PruneStageState(stages.LogIndex, s.BlockNumber, nil, db) - // if err != nil { - // return err - // } - // err = stagedsync.PruneLogIndex(p, tx, cfg, ctx, logger) - // if err != nil { - // return err - // } - //} else { - // if err := stagedsync.SpawnLogIndex(s, tx, cfg, ctx, block, logger); err != nil { - // return err - // } - //} - //return tx.Commit() -} + if warmup { + return reset2.Warmup(ctx, db, log.LvlInfo, stages.HashState) + } + if reset { + return reset2.Reset(ctx, db, stages.HashState) + } -func stageCallTraces(db kv.RwDB, ctx context.Context, logger log.Logger) error { - return fmt.Errorf("this stage is disable in --history.v3=true") - /* - dirs, pm := datadir.New(datadirCli), fromdb.PruneMode(db) - _, _, sync, _, _ := newSync(ctx, db, nil , logger) - must(sync.SetCurrentStage(stages.CallTraces)) + tx, err := db.BeginRw(ctx) + if err != nil { + return err + } + defer tx.Rollback() + + s := stage(sync, tx, nil, stages.HashState) + if pruneTo > 0 { + pm.History = prune.Distance(s.BlockNumber - pruneTo) + pm.Receipts = prune.Distance(s.BlockNumber - pruneTo) + pm.CallTraces = prune.Distance(s.BlockNumber - pruneTo) + pm.TxIndex = prune.Distance(s.BlockNumber - pruneTo) + } - if warmup { - return reset2.Warmup(ctx, db, log.LvlInfo, stages.CallTraces) + logger.Info("Stage", "name", s.ID, "progress", s.BlockNumber) + + cfg := stagedsync.StageHashStateCfg(db, dirs, historyV3) + if unwind > 0 { + u := sync.NewUnwindState(stages.HashState, s.BlockNumber-unwind, s.BlockNumber) + err = stagedsync.UnwindHashStateStage(u, s, tx, cfg, ctx, logger) + if err != nil { + return err } - if reset { - return reset2.Reset(ctx, db, stages.CallTraces) + } else if pruneTo > 0 { + p, err := sync.PruneStageState(stages.HashState, s.BlockNumber, tx, nil) + if err != nil { + return err } - - tx, err := db.BeginRw(ctx) + err = stagedsync.PruneHashStateStage(p, tx, cfg, ctx) if err != nil { return err } - defer tx.Rollback() - var batchSize datasize.ByteSize - must(batchSize.UnmarshalText([]byte(batchSizeStr))) + } else { + err = stagedsync.SpawnHashStateStage(s, tx, cfg, ctx, logger) + if err != nil { + return err + } + } + return tx.Commit() +} - execStage := progress(tx, stages.Execution) - s := stage(sync, tx, nil, stages.CallTraces) - if pruneTo > 0 { - pm.History = prune.Distance(s.BlockNumber - pruneTo) - pm.Receipts = prune.Distance(s.BlockNumber - pruneTo) - pm.CallTraces = prune.Distance(s.BlockNumber - pruneTo) - pm.TxIndex = prune.Distance(s.BlockNumber - pruneTo) +func stageLogIndex(db kv.RwDB, ctx context.Context, logger log.Logger) error { + dirs, pm, historyV3, chainConfig := datadir.New(datadirCli), fromdb.PruneMode(db), kvcfg.HistoryV3.FromDB(db), fromdb.ChainConfig(db) + if historyV3 { + return fmt.Errorf("this stage is disable in --history.v3=true") + } + _, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger) + must(sync.SetCurrentStage(stages.LogIndex)) + if warmup { + return reset2.Warmup(ctx, db, log.LvlInfo, stages.LogIndex) + } + if reset { + return reset2.Reset(ctx, db, stages.LogIndex) + } + if resetPruneAt { + return reset2.ResetPruneAt(ctx, db, stages.LogIndex) + } + tx, err := db.BeginRw(ctx) + if err != nil { + return err + } + defer tx.Rollback() + + execAt := progress(tx, stages.Execution) + s := stage(sync, tx, nil, stages.LogIndex) + if pruneTo > 0 { + pm.History = prune.Distance(s.BlockNumber - pruneTo) + pm.Receipts = prune.Distance(s.BlockNumber - pruneTo) + pm.CallTraces = prune.Distance(s.BlockNumber - pruneTo) + pm.TxIndex = prune.Distance(s.BlockNumber - pruneTo) + } + + logger.Info("Stage exec", "progress", execAt) + logger.Info("Stage", "name", s.ID, "progress", s.BlockNumber) + + cfg := stagedsync.StageLogIndexCfg(db, pm, dirs.Tmp, chainConfig.DepositContract) + if unwind > 0 { + u := sync.NewUnwindState(stages.LogIndex, s.BlockNumber-unwind, s.BlockNumber) + err = stagedsync.UnwindLogIndex(u, s, tx, cfg, ctx) + if err != nil { + return err + } + } else if pruneTo > 0 { + p, err := sync.PruneStageState(stages.LogIndex, s.BlockNumber, nil, db) + if err != nil { + return err + } + err = stagedsync.PruneLogIndex(p, tx, cfg, ctx, logger) + if err != nil { + return err } - logger.Info("ID exec", "progress", execStage) - if block != 0 { - s.BlockNumber = block - logger.Info("Overriding initial state", "block", block) + } else { + if err := stagedsync.SpawnLogIndex(s, tx, cfg, ctx, block, logger); err != nil { + return err } - logger.Info("ID call traces", "progress", s.BlockNumber) + } + return tx.Commit() +} - cfg := stagedsync.StageCallTracesCfg(db, pm, block, dirs.Tmp) +func stageCallTraces(db kv.RwDB, ctx context.Context, logger log.Logger) error { + dirs, pm, historyV3 := datadir.New(datadirCli), fromdb.PruneMode(db), kvcfg.HistoryV3.FromDB(db) + if historyV3 { + return fmt.Errorf("this stage is disable in --history.v3=true") + } + _, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger) + must(sync.SetCurrentStage(stages.CallTraces)) - if unwind > 0 { - u := sync.NewUnwindState(stages.CallTraces, s.BlockNumber-unwind, s.BlockNumber) - err = stagedsync.UnwindCallTraces(u, s, tx, cfg, ctx, logger) - if err != nil { - return err - } - } else if pruneTo > 0 { - p, err := sync.PruneStageState(stages.CallTraces, s.BlockNumber, tx, nil) - if err != nil { - return err - } - err = stagedsync.PruneCallTraces(p, tx, cfg, ctx, logger) - if err != nil { - return err - } - } else { - if err := stagedsync.SpawnCallTraces(s, tx, cfg, ctx, logger); err != nil { - return err - } + if warmup { + return reset2.Warmup(ctx, db, log.LvlInfo, stages.CallTraces) + } + if reset { + return reset2.Reset(ctx, db, stages.CallTraces) + } + + tx, err := db.BeginRw(ctx) + if err != nil { + return err + } + defer tx.Rollback() + var batchSize datasize.ByteSize + must(batchSize.UnmarshalText([]byte(batchSizeStr))) + + execStage := progress(tx, stages.Execution) + s := stage(sync, tx, nil, stages.CallTraces) + if pruneTo > 0 { + pm.History = prune.Distance(s.BlockNumber - pruneTo) + pm.Receipts = prune.Distance(s.BlockNumber - pruneTo) + pm.CallTraces = prune.Distance(s.BlockNumber - pruneTo) + pm.TxIndex = prune.Distance(s.BlockNumber - pruneTo) + } + logger.Info("ID exec", "progress", execStage) + if block != 0 { + s.BlockNumber = block + logger.Info("Overriding initial state", "block", block) + } + logger.Info("ID call traces", "progress", s.BlockNumber) + + cfg := stagedsync.StageCallTracesCfg(db, pm, block, dirs.Tmp) + + if unwind > 0 { + u := sync.NewUnwindState(stages.CallTraces, s.BlockNumber-unwind, s.BlockNumber) + err = stagedsync.UnwindCallTraces(u, s, tx, cfg, ctx, logger) + if err != nil { + return err + } + } else if pruneTo > 0 { + p, err := sync.PruneStageState(stages.CallTraces, s.BlockNumber, tx, nil) + if err != nil { + return err + } + err = stagedsync.PruneCallTraces(p, tx, cfg, ctx, logger) + if err != nil { + return err } - return tx.Commit() - */ + } else { + if err := stagedsync.SpawnCallTraces(s, tx, cfg, ctx, logger); err != nil { + return err + } + } + return tx.Commit() } func stageHistory(db kv.RwDB, ctx context.Context, logger log.Logger) error { - return fmt.Errorf("this stage is disable in --history.v3=true") - //dirs, pm := datadir.New(datadirCli), fromdb.PruneMode(db) - //sn, borSn, agg := allSnapshots(ctx, db, logger) - //defer sn.Close() - //defer borSn.Close() - //defer agg.Close() - //_, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger) - //must(sync.SetCurrentStage(stages.AccountHistoryIndex)) - // - //if warmup { - // return reset2.Warmup(ctx, db, log.LvlInfo, stages.AccountHistoryIndex, stages.StorageHistoryIndex) - //} - //if reset { - // return reset2.Reset(ctx, db, stages.AccountHistoryIndex, stages.StorageHistoryIndex) - //} - //tx, err := db.BeginRw(ctx) - //if err != nil { - // return err - //} - //defer tx.Rollback() - // - //execStage := progress(tx, stages.Execution) - //stageStorage := stage(sync, tx, nil, stages.StorageHistoryIndex) - //stageAcc := stage(sync, tx, nil, stages.AccountHistoryIndex) - //if pruneTo > 0 { - // pm.History = prune.Distance(stageAcc.BlockNumber - pruneTo) - // pm.Receipts = prune.Distance(stageAcc.BlockNumber - pruneTo) - // pm.CallTraces = prune.Distance(stageAcc.BlockNumber - pruneTo) - // pm.TxIndex = prune.Distance(stageAcc.BlockNumber - pruneTo) - //} - //logger.Info("ID exec", "progress", execStage) - //logger.Info("ID acc history", "progress", stageAcc.BlockNumber) - //logger.Info("ID storage history", "progress", stageStorage.BlockNumber) - // - //cfg := stagedsync.StageHistoryCfg(db, pm, dirs.Tmp) - //if unwind > 0 { //nolint:staticcheck - // u := sync.NewUnwindState(stages.StorageHistoryIndex, stageStorage.BlockNumber-unwind, stageStorage.BlockNumber) - // if err := stagedsync.UnwindStorageHistoryIndex(u, stageStorage, tx, cfg, ctx); err != nil { - // return err - // } - // u = sync.NewUnwindState(stages.AccountHistoryIndex, stageAcc.BlockNumber-unwind, stageAcc.BlockNumber) - // if err := stagedsync.UnwindAccountHistoryIndex(u, stageAcc, tx, cfg, ctx); err != nil { - // return err - // } - //} else if pruneTo > 0 { - // pa, err := sync.PruneStageState(stages.AccountHistoryIndex, stageAcc.BlockNumber, tx, db) - // if err != nil { - // return err - // } - // err = stagedsync.PruneAccountHistoryIndex(pa, tx, cfg, ctx, logger) - // if err != nil { - // return err - // } - // ps, err := sync.PruneStageState(stages.StorageHistoryIndex, stageStorage.BlockNumber, tx, db) - // if err != nil { - // return err - // } - // err = stagedsync.PruneStorageHistoryIndex(ps, tx, cfg, ctx, logger) - // if err != nil { - // return err - // } - // _ = printStages(tx, sn, borSn, agg) - //} else { - // if err := stagedsync.SpawnAccountHistoryIndex(stageAcc, tx, cfg, ctx, logger); err != nil { - // return err - // } - // if err := stagedsync.SpawnStorageHistoryIndex(stageStorage, tx, cfg, ctx, logger); err != nil { - // return err - // } - //} - //return tx.Commit() + dirs, pm, historyV3 := datadir.New(datadirCli), fromdb.PruneMode(db), kvcfg.HistoryV3.FromDB(db) + if historyV3 { + return fmt.Errorf("this stage is disable in --history.v3=true") + } + sn, borSn, agg := allSnapshots(ctx, db, logger) + defer sn.Close() + defer borSn.Close() + defer agg.Close() + _, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger) + must(sync.SetCurrentStage(stages.AccountHistoryIndex)) + + if warmup { + return reset2.Warmup(ctx, db, log.LvlInfo, stages.AccountHistoryIndex, stages.StorageHistoryIndex) + } + if reset { + return reset2.Reset(ctx, db, stages.AccountHistoryIndex, stages.StorageHistoryIndex) + } + tx, err := db.BeginRw(ctx) + if err != nil { + return err + } + defer tx.Rollback() + + execStage := progress(tx, stages.Execution) + stageStorage := stage(sync, tx, nil, stages.StorageHistoryIndex) + stageAcc := stage(sync, tx, nil, stages.AccountHistoryIndex) + if pruneTo > 0 { + pm.History = prune.Distance(stageAcc.BlockNumber - pruneTo) + pm.Receipts = prune.Distance(stageAcc.BlockNumber - pruneTo) + pm.CallTraces = prune.Distance(stageAcc.BlockNumber - pruneTo) + pm.TxIndex = prune.Distance(stageAcc.BlockNumber - pruneTo) + } + logger.Info("ID exec", "progress", execStage) + logger.Info("ID acc history", "progress", stageAcc.BlockNumber) + logger.Info("ID storage history", "progress", stageStorage.BlockNumber) + + cfg := stagedsync.StageHistoryCfg(db, pm, dirs.Tmp) + if unwind > 0 { //nolint:staticcheck + u := sync.NewUnwindState(stages.StorageHistoryIndex, stageStorage.BlockNumber-unwind, stageStorage.BlockNumber) + if err := stagedsync.UnwindStorageHistoryIndex(u, stageStorage, tx, cfg, ctx); err != nil { + return err + } + u = sync.NewUnwindState(stages.AccountHistoryIndex, stageAcc.BlockNumber-unwind, stageAcc.BlockNumber) + if err := stagedsync.UnwindAccountHistoryIndex(u, stageAcc, tx, cfg, ctx); err != nil { + return err + } + } else if pruneTo > 0 { + pa, err := sync.PruneStageState(stages.AccountHistoryIndex, stageAcc.BlockNumber, tx, db) + if err != nil { + return err + } + err = stagedsync.PruneAccountHistoryIndex(pa, tx, cfg, ctx, logger) + if err != nil { + return err + } + ps, err := sync.PruneStageState(stages.StorageHistoryIndex, stageStorage.BlockNumber, tx, db) + if err != nil { + return err + } + err = stagedsync.PruneStorageHistoryIndex(ps, tx, cfg, ctx, logger) + if err != nil { + return err + } + _ = printStages(tx, sn, borSn, agg) + } else { + if err := stagedsync.SpawnAccountHistoryIndex(stageAcc, tx, cfg, ctx, logger); err != nil { + return err + } + if err := stagedsync.SpawnStorageHistoryIndex(stageStorage, tx, cfg, ctx, logger); err != nil { + return err + } + } + return tx.Commit() } func stageTxLookup(db kv.RwDB, ctx context.Context, logger log.Logger) error { @@ -1822,8 +1854,9 @@ var _blockWriterSingleton *blockio.BlockWriter func blocksIO(db kv.RoDB, logger log.Logger) (services.FullBlockReader, *blockio.BlockWriter) { openBlockReaderOnce.Do(func() { sn, borSn, _ := allSnapshots(context.Background(), db, logger) + histV3 := kvcfg.HistoryV3.FromDB(db) _blockReaderSingleton = freezeblocks.NewBlockReader(sn, borSn) - _blockWriterSingleton = blockio.NewBlockWriter() + _blockWriterSingleton = blockio.NewBlockWriter(histV3) }) return _blockReaderSingleton, _blockWriterSingleton } @@ -1831,14 +1864,14 @@ func blocksIO(db kv.RoDB, logger log.Logger) (services.FullBlockReader, *blockio const blockBufferSize = 128 func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig, logger log.Logger) (consensus.Engine, *vm.Config, *stagedsync.Sync, *stagedsync.Sync, stagedsync.MiningState) { - dirs, pm := datadir.New(datadirCli), fromdb.PruneMode(db) + dirs, historyV3, pm := datadir.New(datadirCli), kvcfg.HistoryV3.FromDB(db), fromdb.PruneMode(db) vmConfig := &vm.Config{} events := shards.NewEvents() genesis := core.GenesisBlockByChainName(chain) - chainConfig, genesisBlock, genesisErr := core.CommitGenesisBlock(db, genesis, "", logger) + chainConfig, genesisBlock, genesisErr := core.CommitGenesisBlock(db, genesis, "", logger, nil) if _, ok := genesisErr.(*chain2.ConfigCompatError); genesisErr != nil && !ok { panic(genesisErr) } @@ -1848,6 +1881,7 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig, must(batchSize.UnmarshalText([]byte(batchSizeStr))) cfg := ethconfig.Defaults + cfg.HistoryV3 = historyV3 cfg.Prune = pm cfg.BatchSize = batchSize cfg.DeprecatedTxPool.Disable = true @@ -1906,7 +1940,7 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig, signatures = bor.Signatures } stages := stages2.NewDefaultStages(context.Background(), db, snapDb, p2p.Config{}, &cfg, sentryControlServer, notifications, nil, blockReader, blockRetire, agg, nil, nil, - heimdallClient, recents, signatures, logger) + heimdallClient, recents, signatures, logger, nil) sync := stagedsync.New(cfg.Sync, stages, stagedsync.DefaultUnwindOrder, stagedsync.DefaultPruneOrder, logger) miner := stagedsync.NewMiningState(&cfg.Miner) @@ -1932,6 +1966,7 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig, notifications.Accumulator, cfg.StateStream, /*stateStream=*/ false, + cfg.HistoryV3, dirs, blockReader, sentryControlServer.Hd, @@ -1970,8 +2005,8 @@ func stage(st *stagedsync.Sync, tx kv.Tx, db kv.RoDB, stage stages.SyncStage) *s func overrideStorageMode(db kv.RwDB, logger log.Logger) error { chainConfig := fromdb.ChainConfig(db) - pm, err := prune.FromCli(chainConfig.ChainID.Uint64(), pruneFlag, pruneB, pruneH, pruneR, pruneT, pruneC, - pruneHBefore, pruneRBefore, pruneTBefore, pruneCBefore, pruneBBefore, experiments) + pm, err := prune.FromCli(chainConfig.ChainID.Uint64(), pruneFlag, pruneH, pruneR, pruneT, pruneC, + pruneHBefore, pruneRBefore, pruneTBefore, pruneCBefore, experiments) if err != nil { return err } diff --git a/cmd/integration/commands/state_stages.go b/cmd/integration/commands/state_stages.go index 9be136fa832..9d1b5ee0e4f 100644 --- a/cmd/integration/commands/state_stages.go +++ b/cmd/integration/commands/state_stages.go @@ -23,6 +23,7 @@ import ( "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/bitmapdb" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/temporal/historyv2" "github.com/ledgerwatch/erigon/cmd/hack/tool/fromdb" @@ -180,7 +181,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. defer borSn.Close() defer agg.Close() engine, vmConfig, stateStages, miningStages, miner := newSync(ctx, db, &miningConfig, logger1) - chainConfig, pm := fromdb.ChainConfig(db), fromdb.PruneMode(db) + chainConfig, historyV3, pm := fromdb.ChainConfig(db), kvcfg.HistoryV3.FromDB(db), fromdb.PruneMode(db) tx, err := db.BeginRw(ctx) if err != nil { @@ -223,7 +224,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. syncCfg.ReconWorkerCount = int(reconWorkers) br, _ := blocksIO(db, logger1) - execCfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, changeSetHook, chainConfig, engine, vmConfig, changesAcc, false, true, dirs, + execCfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, changeSetHook, chainConfig, engine, vmConfig, changesAcc, false, true, historyV3, dirs, br, nil, genesis, syncCfg, agg, nil) execUntilFunc := func(execToBlock uint64) func(firstCycle bool, badBlockUnwind bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, txc wrap.TxContainer, logger log.Logger) error { @@ -248,8 +249,10 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. stopAt = 1 } + var structLogger *logger.StructLogger traceStart := func() { - vmConfig.Tracer = logger.NewStructLogger(&logger.LogConfig{}) + structLogger = logger.NewStructLogger(&logger.LogConfig{}) + vmConfig.Tracer = structLogger.Hooks() vmConfig.Debug = true } traceStop := func(id int) { @@ -262,7 +265,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. } encoder := json.NewEncoder(w) encoder.SetIndent(" ", " ") - for _, l := range logger.FormatLogs(vmConfig.Tracer.(*logger.StructLogger).StructLogs()) { + for _, l := range logger.FormatLogs(structLogger.StructLogs()) { if err2 := encoder.Encode(l); err2 != nil { panic(err2) } @@ -460,6 +463,7 @@ func loopIh(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) e defer agg.Close() _, _, sync, _, _ := newSync(ctx, db, nil /* miningConfig */, logger) dirs := datadir.New(datadirCli) + historyV3 := kvcfg.HistoryV3.FromDB(db) tx, err := db.BeginRw(ctx) if err != nil { @@ -474,13 +478,12 @@ func loopIh(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) e to := execStage.BlockNumber - unwind _ = sync.SetCurrentStage(stages.HashState) u := &stagedsync.UnwindState{ID: stages.HashState, UnwindPoint: to} - if err = stagedsync.UnwindHashStateStage(u, stage(sync, tx, nil, stages.HashState), tx, stagedsync.StageHashStateCfg(db, dirs), ctx, logger); err != nil { + if err = stagedsync.UnwindHashStateStage(u, stage(sync, tx, nil, stages.HashState), tx, stagedsync.StageHashStateCfg(db, dirs, historyV3), ctx, logger); err != nil { return err } _ = sync.SetCurrentStage(stages.IntermediateHashes) u = &stagedsync.UnwindState{ID: stages.IntermediateHashes, UnwindPoint: to} br, _ := blocksIO(db, logger) - historyV3 := true if err = stagedsync.UnwindIntermediateHashesStage(u, stage(sync, tx, nil, stages.IntermediateHashes), tx, stagedsync.StageTrieCfg(db, true, true, false, dirs.Tmp, br, nil, historyV3, agg), ctx, logger); err != nil { return err @@ -548,6 +551,10 @@ func loopExec(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) sync.EnableStages(stages.Execution) var batchSize datasize.ByteSize must(batchSize.UnmarshalText([]byte(batchSizeStr))) + historyV3, err := kvcfg.HistoryV3.Enabled(tx) + if err != nil { + return err + } from := progress(tx, stages.Execution) to := from + unwind @@ -560,7 +567,7 @@ func loopExec(db kv.RwDB, ctx context.Context, unwind uint64, logger log.Logger) br, _ := blocksIO(db, logger) cfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, nil, chainConfig, engine, vmConfig, nil, /*stateStream=*/ false, - /*badBlockHalt=*/ true, dirs, br, nil, genesis, syncCfg, agg, nil) + /*badBlockHalt=*/ true, historyV3, dirs, br, nil, genesis, syncCfg, agg, nil) // set block limit of execute stage sync.MockExecFunc(stages.Execution, func(firstCycle bool, badBlockUnwind bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, txc wrap.TxContainer, logger log.Logger) error { diff --git a/cmd/prometheus/dashboards/erigon_internals.json b/cmd/prometheus/dashboards/erigon_internals.json index 445e2ddc5b3..93a9c7e5c7e 100644 --- a/cmd/prometheus/dashboards/erigon_internals.json +++ b/cmd/prometheus/dashboards/erigon_internals.json @@ -1,4 +1,47 @@ { + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "Prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__elements": {}, + "__requires": [ + { + "type": "panel", + "id": "bargauge", + "name": "Bar gauge", + "version": "" + }, + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "10.4.2" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "stat", + "name": "Stat", + "version": "" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], "annotations": { "list": [ { @@ -24,7 +67,7 @@ "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, - "id": 2, + "id": null, "links": [], "liveNow": false, "panels": [ @@ -215,7 +258,7 @@ "overrides": [] }, "gridPos": { - "h": 6, + "h": 5, "w": 8, "x": 8, "y": 1 @@ -235,7 +278,7 @@ "sort": "none" } }, - "pluginVersion": "10.3.4", + "pluginVersion": "8.0.6", "targets": [ { "datasource": { @@ -317,7 +360,7 @@ "overrides": [] }, "gridPos": { - "h": 6, + "h": 4, "w": 8, "x": 16, "y": 1 @@ -325,15 +368,13 @@ "id": 200, "options": { "legend": { - "calcs": [ - "mean" - ], + "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { - "mode": "multi", + "mode": "single", "sort": "none" } }, @@ -347,12 +388,197 @@ "editorMode": "code", "expr": "prune_seconds{quantile=\"$quantile\",instance=~\"$instance\"}", "instant": false, - "legendFormat": "{{ type }}: {{ instance }}", + "legendFormat": "{{instance}} {{type}} ", "range": true, "refId": "A" } ], "title": "Prune, seconds", + "transparent": true, + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 2 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 8, + "x": 16, + "y": 5 + }, + "id": 202, + "options": { + "displayMode": "lcd", + "maxVizHeight": 300, + "minVizHeight": 16, + "minVizWidth": 8, + "namePlacement": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "sizing": "auto", + "valueMode": "color" + }, + "pluginVersion": "10.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "domain_prunable{instance=~\"$instance\",type=\"domain\"}", + "hide": false, + "legendFormat": "{{instance}}-{{type}}-{{table}}", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "domain_prunable{instance=~\"$instance\",type=\"history\",table!=\"commitment\"}/1562500", + "hide": false, + "legendFormat": "{{instance}}-{{type}}-{{table}}", + "range": true, + "refId": "C" + } + ], + "title": "pruning availability, steps", + "type": "bargauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "ops" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 8, + "y": 6 + }, + "id": 158, + "options": { + "legend": { + "calcs": [ + "mean" + ], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "8.0.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "rate(sync{instance=~\"$instance\",stage=\"execution\"}[$rate_interval])", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ stage }}: {{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Sync Stages progress rate", "type": "timeseries" }, { @@ -419,7 +645,7 @@ "overrides": [] }, "gridPos": { - "h": 9, + "h": 8, "w": 8, "x": 0, "y": 7 @@ -469,12 +695,24 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "irate(domain_commitment_keys{instance=~\"$instance\"}[$__rate_interval])", + "expr": "sum(rate(domain_commitment_keys[$rate_interval])) by (instance)", "hide": false, "legendFormat": "keys committed: {{instance}}", "range": true, "refId": "A" }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "irate(domain_commitment_updates{instance=~\"$instance\"}[$rate_interval])", + "hide": false, + "legendFormat": "commitment node updates: {{instance}}", + "range": true, + "refId": "C" + }, { "datasource": { "type": "prometheus", @@ -483,7 +721,7 @@ "editorMode": "code", "expr": "irate(domain_commitment_updates_applied{instance=~\"$instance\"}[$rate_interval])", "hide": false, - "legendFormat": "prefixes committed {{instance}}", + "legendFormat": "commitment trie node updates: {{instance}}", "range": true, "refId": "F" }, @@ -521,7 +759,7 @@ "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", - "fillOpacity": 10, + "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, @@ -529,14 +767,14 @@ "viz": false }, "insertNulls": false, - "lineInterpolation": "linear", + "lineInterpolation": "smooth", "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, - "showPoints": "never", - "spanNulls": true, + "showPoints": "auto", + "spanNulls": false, "stacking": { "group": "A", "mode": "none" @@ -558,30 +796,27 @@ "value": 80 } ] - }, - "unit": "ops" + } }, "overrides": [] }, "gridPos": { - "h": 6, + "h": 5, "w": 8, "x": 8, - "y": 7 + "y": 11 }, - "id": 158, + "id": 198, "options": { "legend": { - "calcs": [ - "mean" - ], + "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, "tooltip": { "mode": "multi", - "sort": "none" + "sort": "desc" } }, "pluginVersion": "10.3.4", @@ -592,189 +827,8 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "exemplar": true, - "expr": "rate(sync{instance=~\"$instance\",stage=\"execution\"}[$rate_interval])", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{ stage }}: {{instance}}", - "range": true, - "refId": "A" - } - ], - "title": "Sync Stages progress rate", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 2 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 8, - "x": 16, - "y": 7 - }, - "id": 202, - "options": { - "displayMode": "lcd", - "maxVizHeight": 300, - "minVizHeight": 16, - "minVizWidth": 8, - "namePlacement": "auto", - "orientation": "horizontal", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showUnfilled": true, - "sizing": "auto", - "valueMode": "color" - }, - "pluginVersion": "10.4.2", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "domain_prunable{instance=~\"$instance\",type=\"domain\"}", - "hide": false, - "legendFormat": "{{instance}}-{{type}}-{{table}}", - "range": true, - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "domain_prunable{instance=~\"$instance\",type=\"history\",table!=\"commitment\"}/1562500", - "hide": false, - "legendFormat": "{{instance}}-{{type}}-{{table}}", - "range": true, - "refId": "C" - } - ], - "title": "pruning availability, steps", - "type": "bargauge" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "smooth", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 5, - "w": 8, - "x": 8, - "y": 13 - }, - "id": 198, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "desc" - } - }, - "pluginVersion": "10.3.4", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "domain_running_merges{instance=~\"$instance\"}", - "legendFormat": "running merges: {{instance}}", + "expr": "domain_running_merges{instance=~\"$instance\"}", + "legendFormat": "running merges: {{instance}}", "range": true, "refId": "A" }, @@ -833,15 +887,15 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "domain_running_unwind{instance=~\"$instance\"}", + "expr": "domain_wal_flushes{instance=~\"$instance\"}", "hide": false, "instant": false, - "legendFormat": "running unwind {{instance}}", + "legendFormat": "WAL flushes {{instance}}", "range": true, - "refId": "G" + "refId": "F" } ], - "title": "State: running collate/merge/prune/unwind", + "title": "State: running collate/merge/prune", "type": "timeseries" }, { @@ -908,7 +962,7 @@ "h": 5, "w": 8, "x": 16, - "y": 13 + "y": 11 }, "id": 199, "options": { @@ -926,7 +980,7 @@ "sort": "none" } }, - "pluginVersion": "10.3.4", + "pluginVersion": "8.0.6", "targets": [ { "datasource": { @@ -1004,13 +1058,39 @@ }, "unit": "s" }, - "overrides": [] + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "prune took [index]: mainnet3-1:6061", + "prune took [index]: mainnet3-3:6061" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] }, "gridPos": { - "h": 7, + "h": 6, "w": 8, "x": 0, - "y": 16 + "y": 15 }, "id": 112, "options": { @@ -1035,10 +1115,10 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "rate(domain_collate_took_sum{instance=~\"$instance\",type=\"domain\"}[$rate_interval])", + "expr": "rate(domain_collate_took_sum{instance=~\"$instance\"}[$rate_interval])", "format": "time_series", "instant": false, - "legendFormat": "collation [domain]: {{instance}}", + "legendFormat": "collation took: {{instance}}", "range": true, "refId": "A" }, @@ -1048,9 +1128,9 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "rate(domain_step_took_sum{instance=~\"$instance\"}[$__rate_interval])", + "expr": "rate(domain_step_took_sum{instance=~\"$instance\"}[$rate_interval])", "hide": false, - "legendFormat": "step: {{instance}}", + "legendFormat": "step took: {{instance}}", "range": true, "refId": "C" }, @@ -1062,7 +1142,7 @@ "editorMode": "code", "expr": "rate(domain_prune_took_sum{instance=~\"$instance\"}[$rate_interval])", "hide": false, - "legendFormat": "prune [{{type}}]: {{instance}}", + "legendFormat": "prune took [{{type}}]: {{instance}}", "range": true, "refId": "B" }, @@ -1074,7 +1154,7 @@ "editorMode": "code", "expr": "rate(domain_commitment_took_sum{instance=~\"$instance\"}[$rate_interval])", "hide": false, - "legendFormat": "commitment: {{instance}}", + "legendFormat": "commitment took: {{instance}}", "range": true, "refId": "D" }, @@ -1084,42 +1164,16 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "rate(domain_collate_took_sum{instance=~\"$instance\",type=\"index\"}[$rate_interval])", - "format": "time_series", - "hide": false, - "instant": false, - "legendFormat": "collation [idx]: {{instance}}", - "range": true, - "refId": "E" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "rate(domain_unwind_took{instance=~\"$instance\",type=\"domain\"}[$rate_interval])", - "hide": false, - "instant": false, - "legendFormat": "unwind [domain] {{label_name}}", - "range": true, - "refId": "G" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "rate(domain_unwind_took{instance=~\"$instance\",type=\"shared\"}[$rate_interval])", + "exemplar": false, + "expr": "rate(domain_commitment_write_took_sum{instance=~\"$instance\"}[$rate_interval])", "hide": false, "instant": false, - "legendFormat": "unwind [SharedDomain] {{label_name}}", + "legendFormat": "commitment update write took: {{instance}}", "range": true, - "refId": "H" + "refId": "F" } ], - "title": "State: timings", + "title": "State: timins", "type": "timeseries" }, { @@ -1187,7 +1241,7 @@ "h": 5, "w": 8, "x": 8, - "y": 18 + "y": 16 }, "id": 194, "options": { @@ -1204,7 +1258,7 @@ "sort": "none" } }, - "pluginVersion": "10.3.4", + "pluginVersion": "8.0.6", "targets": [ { "datasource": { @@ -1304,7 +1358,7 @@ "h": 5, "w": 8, "x": 16, - "y": 18 + "y": 16 }, "id": 201, "options": { @@ -1381,7 +1435,7 @@ "h": 1, "w": 24, "x": 0, - "y": 23 + "y": 21 }, "id": 17, "panels": [], @@ -1452,7 +1506,8 @@ } ] }, - "unit": "ops" + "unit": "ops", + "unitScale": true }, "overrides": [] }, @@ -1460,7 +1515,7 @@ "h": 5, "w": 8, "x": 0, - "y": 24 + "y": 22 }, "id": 141, "options": { @@ -1475,7 +1530,7 @@ "sort": "none" } }, - "pluginVersion": "10.3.4", + "pluginVersion": "8.0.6", "targets": [ { "datasource": { @@ -1550,15 +1605,41 @@ } ] }, - "unit": "s" + "unit": "s", + "unitScale": true }, - "overrides": [] + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "sync: mainnet3-1:6061" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] }, "gridPos": { - "h": 5, + "h": 9, "w": 16, "x": 8, - "y": 24 + "y": 22 }, "id": 166, "options": { @@ -1575,7 +1656,7 @@ "sort": "none" } }, - "pluginVersion": "10.3.4", + "pluginVersion": "8.0.6", "targets": [ { "datasource": { @@ -1817,7 +1898,8 @@ } ] }, - "unit": "decbytes" + "unit": "decbytes", + "unitScale": true }, "overrides": [] }, @@ -1825,7 +1907,7 @@ "h": 5, "w": 8, "x": 0, - "y": 29 + "y": 27 }, "id": 159, "options": { @@ -1924,15 +2006,42 @@ } ] }, - "unit": "short" + "unit": "short", + "unitScale": true }, - "overrides": [] + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "cow: mainnet3-1:6061", + "cow: mainnet3-3:6061" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] }, "gridPos": { - "h": 9, + "h": 7, "w": 16, "x": 8, - "y": 29 + "y": 31 }, "id": 168, "options": { @@ -1949,7 +2058,7 @@ "sort": "none" } }, - "pluginVersion": "10.3.4", + "pluginVersion": "8.0.6", "targets": [ { "datasource": { @@ -2204,7 +2313,8 @@ } ] }, - "unit": "decbytes" + "unit": "decbytes", + "unitScale": true }, "overrides": [] }, @@ -2212,7 +2322,7 @@ "h": 6, "w": 8, "x": 0, - "y": 34 + "y": 32 }, "id": 167, "options": { @@ -2229,7 +2339,7 @@ "sort": "none" } }, - "pluginVersion": "10.3.4", + "pluginVersion": "8.0.6", "targets": [ { "datasource": { @@ -2265,7 +2375,6 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "", "fieldConfig": { "defaults": { "color": { @@ -2293,7 +2402,7 @@ "scaleDistribution": { "type": "linear" }, - "showPoints": "never", + "showPoints": "auto", "spanNulls": false, "stacking": { "group": "A", @@ -2316,17 +2425,44 @@ } ] }, - "unit": "short" + "unit": "short", + "unitScale": true }, - "overrides": [] + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "gc_leaf: mainnet3-3:6061", + "gc_leaf: mainnet3-1:6061" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] }, "gridPos": { - "h": 4, - "w": 16, - "x": 8, + "h": 6, + "w": 8, + "x": 0, "y": 38 }, - "id": 150, + "id": 169, "options": { "legend": { "calcs": [ @@ -2341,7 +2477,7 @@ "sort": "none" } }, - "pluginVersion": "10.3.4", + "pluginVersion": "8.0.6", "targets": [ { "datasource": { @@ -2349,9 +2485,9 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "rate(process_minor_pagefaults_total{instance=~\"$instance\"}[$rate_interval])", + "expr": "db_gc_leaf{instance=~\"$instance\"}", "interval": "", - "legendFormat": "soft: {{instance}}", + "legendFormat": "gc_leaf: {{instance}}", "refId": "A" }, { @@ -2360,14 +2496,28 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "rate(process_major_pagefaults_total{instance=~\"$instance\"}[$rate_interval])", + "expr": "db_gc_overflow{instance=~\"$instance\"}", "hide": false, "interval": "", - "legendFormat": "hard: {{instance}}", + "legendFormat": "gc_overflow: {{instance}}", "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "exec_steps_in_db{instance=~\"$instance\"}/100", + "hide": false, + "interval": "", + "legendFormat": "exec_steps_in_db: {{instance}}", + "range": true, + "refId": "E" } ], - "title": "getrusage: minflt - soft page faults (reclaims), majflt - hard faults", + "title": "GC and State", "type": "timeseries" }, { @@ -2375,6 +2525,7 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, + "description": "", "fieldConfig": { "defaults": { "color": { @@ -2402,7 +2553,7 @@ "scaleDistribution": { "type": "linear" }, - "showPoints": "auto", + "showPoints": "never", "spanNulls": false, "stacking": { "group": "A", @@ -2425,17 +2576,44 @@ } ] }, - "unit": "short" + "unit": "short", + "unitScale": true }, - "overrides": [] + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "hard: mainnet3-1:6061", + "hard: mainnet3-3:6061" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] }, "gridPos": { "h": 6, - "w": 8, - "x": 0, - "y": 40 + "w": 16, + "x": 8, + "y": 38 }, - "id": 169, + "id": 150, "options": { "legend": { "calcs": [ @@ -2450,7 +2628,7 @@ "sort": "none" } }, - "pluginVersion": "10.3.4", + "pluginVersion": "8.0.6", "targets": [ { "datasource": { @@ -2458,9 +2636,9 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "db_gc_leaf{instance=~\"$instance\"}", + "expr": "rate(process_minor_pagefaults_total{instance=~\"$instance\"}[$rate_interval])", "interval": "", - "legendFormat": "gc_leaf: {{instance}}", + "legendFormat": "soft: {{instance}}", "refId": "A" }, { @@ -2469,28 +2647,14 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "db_gc_overflow{instance=~\"$instance\"}", + "expr": "rate(process_major_pagefaults_total{instance=~\"$instance\"}[$rate_interval])", "hide": false, "interval": "", - "legendFormat": "gc_overflow: {{instance}}", + "legendFormat": "hard: {{instance}}", "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "exemplar": true, - "expr": "exec_steps_in_db{instance=~\"$instance\"}/100", - "hide": false, - "interval": "", - "legendFormat": "exec_steps_in_db: {{instance}}", - "range": true, - "refId": "E" } ], - "title": "GC and State", + "title": "getrusage: minflt - soft page faults (reclaims), majflt - hard faults", "type": "timeseries" }, { @@ -2547,15 +2711,16 @@ "value": 80 } ] - } + }, + "unitScale": true }, "overrides": [] }, "gridPos": { - "h": 4, - "w": 15, + "h": 8, + "w": 16, "x": 8, - "y": 42 + "y": 44 }, "id": 191, "options": { @@ -2570,7 +2735,7 @@ "sort": "none" } }, - "pluginVersion": "10.3.4", + "pluginVersion": "8.0.6", "targets": [ { "datasource": { @@ -2767,7 +2932,7 @@ "h": 1, "w": 24, "x": 0, - "y": 46 + "y": 52 }, "id": 134, "panels": [], @@ -2787,94 +2952,58 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "", "fieldConfig": { "defaults": { "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": true, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } + "mode": "thresholds" }, "mappings": [], "thresholds": { "mode": "absolute", - "steps": [ - { - "color": "green" - }, - { - "color": "red", - "value": 80 - } - ] + "steps": [] }, - "unit": "decbytes" + "unit": "short", + "unitScale": true }, "overrides": [] }, "gridPos": { - "h": 6, + "h": 18, "w": 8, "x": 0, - "y": 47 + "y": 53 }, - "id": 148, + "id": 165, "options": { - "legend": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { "calcs": [ - "max" + "range" ], - "displayMode": "list", - "placement": "bottom", - "showLegend": true + "fields": "", + "values": false }, - "tooltip": { - "mode": "multi", - "sort": "none" - } + "showPercentChange": false, + "text": { + "titleSize": 14, + "valueSize": 14 + }, + "textMode": "auto", + "wideLayout": true }, - "pluginVersion": "10.3.4", + "pluginVersion": "10.3.5", "targets": [ { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "exemplar": true, - "expr": "process_virtual_memory_bytes{instance=~\"$instance\"}", - "hide": true, + "expr": "process_io_read_syscalls_total{instance=~\"$instance\"}", "interval": "", - "legendFormat": "resident virtual mem: {{instance}}", + "legendFormat": "process_io_read_syscalls_total: {{instance}}", "refId": "A" }, { @@ -2882,11 +3011,10 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "exemplar": true, - "expr": "process_resident_memory_anon_bytes{instance=~\"$instance\"}", - "hide": true, + "expr": "process_io_write_syscalls_total{instance=~\"$instance\"}", + "hide": false, "interval": "", - "legendFormat": "resident anon mem: {{instance}}", + "legendFormat": "process_io_write_syscalls_total: {{instance}}", "refId": "B" }, { @@ -2894,11 +3022,10 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "exemplar": true, - "expr": "process_resident_memory_bytes{instance=~\"$instance\"}", + "expr": "process_minor_pagefaults_total{instance=~\"$instance\"}", "hide": false, "interval": "", - "legendFormat": "resident mem: {{instance}}", + "legendFormat": "process_minor_pagefaults_total: {{instance}}", "refId": "C" }, { @@ -2906,10 +3033,10 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "expr": "mem_data{instance=~\"$instance\"}", + "expr": "process_major_pagefaults_total{instance=~\"$instance\"}", "hide": false, "interval": "", - "legendFormat": "data: {{instance}}", + "legendFormat": "process_major_pagefaults_total: {{instance}}", "refId": "D" }, { @@ -2917,10 +3044,10 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "expr": "mem_stack{instance=~\"$instance\"}", + "expr": "process_io_storage_read_bytes_total{instance=~\"$instance\"}", "hide": false, "interval": "", - "legendFormat": "stack: {{instance}}", + "legendFormat": "process_io_storage_read_bytes_total: {{instance}}", "refId": "E" }, { @@ -2928,10 +3055,10 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "expr": "mem_locked{instance=~\"$instance\"}", + "expr": "process_io_storage_written_bytes_total{instance=~\"$instance\"}", "hide": false, "interval": "", - "legendFormat": "locked: {{instance}}", + "legendFormat": "process_io_storage_written_bytes_total: {{instance}}", "refId": "F" }, { @@ -2939,15 +3066,92 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "expr": "mem_swap{instance=~\"$instance\"}", + "expr": "db_pgops_newly{instance=~\"$instance\"}", "hide": false, "interval": "", - "legendFormat": "swap: {{instance}}", + "legendFormat": "pgops_newly: {{instance}}", + "refId": "H" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "db_pgops_cow{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "pgops_cow: {{instance}}", + "refId": "I" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "db_pgops_clone{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "pgops_clone: {{instance}}", + "refId": "J" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "db_pgops_split{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "pgops_split: {{instance}}", + "refId": "K" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "db_pgops_merge{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "pgops_merge: {{instance}}", + "refId": "L" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "db_pgops_spill{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "pgops_spill: {{instance}}", "refId": "G" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "db_pgops_unspill{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "pgops_unspill: {{instance}}", + "refId": "M" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "db_pgops_wops{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "pgops_wops: {{instance}}", + "refId": "N" } ], - "title": "mem: resident set size", - "type": "timeseries" + "title": "Rusage Total (\"last value\" - \"first value\" on selected period)", + "type": "stat" }, { "datasource": { @@ -3005,15 +3209,42 @@ } ] }, - "unit": "none" + "unit": "none", + "unitScale": true }, - "overrides": [] + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "out: mainnet3-1:6061", + "out: mainnet3-3:6061" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] }, "gridPos": { "h": 6, "w": 8, "x": 8, - "y": 47 + "y": 53 }, "id": 155, "options": { @@ -3030,7 +3261,7 @@ "sort": "none" } }, - "pluginVersion": "10.3.4", + "pluginVersion": "8.0.6", "targets": [ { "datasource": { @@ -3120,7 +3351,8 @@ } ] }, - "unit": "cps" + "unit": "cps", + "unitScale": true }, "overrides": [] }, @@ -3128,7 +3360,7 @@ "h": 6, "w": 8, "x": 16, - "y": 47 + "y": 53 }, "id": 153, "options": { @@ -3145,7 +3377,7 @@ "sort": "none" } }, - "pluginVersion": "10.3.4", + "pluginVersion": "8.0.6", "targets": [ { "datasource": { @@ -3169,7 +3401,6 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "", "fieldConfig": { "defaults": { "color": { @@ -3198,7 +3429,7 @@ "type": "linear" }, "showPoints": "never", - "spanNulls": true, + "spanNulls": false, "stacking": { "group": "A", "mode": "none" @@ -3220,17 +3451,44 @@ } ] }, - "unit": "short" + "unit": "short", + "unitScale": true }, - "overrides": [] + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "read: mainnet3-3:6061", + "read: mainnet3-1:6061" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] }, "gridPos": { "h": 6, "w": 8, - "x": 0, - "y": 53 + "x": 8, + "y": 59 }, - "id": 86, + "id": 85, "options": { "legend": { "calcs": [ @@ -3245,21 +3503,19 @@ "sort": "none" } }, - "pluginVersion": "10.3.4", + "pluginVersion": "8.0.6", "targets": [ { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "editorMode": "code", "exemplar": true, - "expr": "rate(go_memstats_mallocs_total{instance=~\"$instance\"}[$rate_interval])", + "expr": "rate(process_io_storage_read_bytes_total{instance=~\"$instance\"}[$rate_interval])", "format": "time_series", "interval": "", "intervalFactor": 1, - "legendFormat": "memstats_mallocs_total: {{ instance }}", - "range": true, + "legendFormat": "read: {{instance}}", "refId": "A" }, { @@ -3267,19 +3523,16 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "editorMode": "code", "exemplar": true, - "expr": "rate(go_memstats_frees_total{instance=~\"$instance\"}[$rate_interval])", + "expr": "rate(process_io_storage_written_bytes_total{instance=~\"$instance\"}[$rate_interval])", "format": "time_series", - "hide": false, "interval": "", "intervalFactor": 1, - "legendFormat": "memstats_frees_total: {{ instance }}", - "range": true, + "legendFormat": "write: {{instance}}", "refId": "B" } ], - "title": "Process Mem: allocate objects/sec, free", + "title": "Disk bytes/sec", "type": "timeseries" }, { @@ -3315,7 +3568,7 @@ "type": "linear" }, "showPoints": "never", - "spanNulls": false, + "spanNulls": true, "stacking": { "group": "A", "mode": "none" @@ -3337,7 +3590,118 @@ } ] }, - "unit": "short" + "unit": "none", + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 8, + "x": 16, + "y": 59 + }, + "id": 128, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "8.0.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "go_goroutines{instance=~\"$instance\"}", + "instant": false, + "interval": "", + "legendFormat": "goroutines: {{instance}}", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "go_threads{instance=~\"$instance\"}", + "instant": false, + "interval": "", + "legendFormat": "threads: {{instance}}", + "refId": "B" + } + ], + "title": "GO Goroutines and Threads", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes", + "unitScale": true }, "overrides": [] }, @@ -3345,9 +3709,9 @@ "h": 6, "w": 8, "x": 8, - "y": 53 + "y": 65 }, - "id": 85, + "id": 154, "options": { "legend": { "calcs": [ @@ -3362,19 +3726,21 @@ "sort": "none" } }, - "pluginVersion": "10.3.4", + "pluginVersion": "8.0.6", "targets": [ { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, + "editorMode": "code", "exemplar": true, - "expr": "rate(process_io_storage_read_bytes_total{instance=~\"$instance\"}[$rate_interval])", + "expr": "go_memstats_stack_sys_bytes{instance=~\"$instance\"}", "format": "time_series", "interval": "", "intervalFactor": 1, - "legendFormat": "read: {{instance}}", + "legendFormat": "stack_sys: {{ instance }}", + "range": true, "refId": "A" }, { @@ -3382,16 +3748,83 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, + "editorMode": "code", "exemplar": true, - "expr": "rate(process_io_storage_written_bytes_total{instance=~\"$instance\"}[$rate_interval])", + "expr": "go_memstats_sys_bytes{instance=~\"$instance\"}", "format": "time_series", + "hide": false, "interval": "", "intervalFactor": 1, - "legendFormat": "write: {{instance}}", + "legendFormat": "sys: {{ instance }}", + "range": true, "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "go_memstats_stack_inuse_bytes{instance=~\"$instance\"}", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "stack_inuse: {{ instance }}", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "go_memstats_mspan_sys_bytes{instance=~\"$instance\"}", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "mspan_sys: {{ instance }}", + "range": true, + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "go_memstats_mcache_sys_bytes{instance=~\"$instance\"}", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "mcache_sys: {{ instance }}", + "range": true, + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "go_memstats_heap_alloc_bytes{instance=~\"$instance\"}", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "heap_alloc: {{ instance }}", + "range": true, + "refId": "F" } ], - "title": "Disk bytes/sec", + "title": "go memstat", "type": "timeseries" }, { @@ -3412,7 +3845,7 @@ "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", - "fillOpacity": 0, + "fillOpacity": 10, "gradientMode": "none", "hideFrom": { "legend": false, @@ -3449,17 +3882,18 @@ } ] }, - "unit": "none" + "unit": "s", + "unitScale": true }, "overrides": [] }, "gridPos": { - "h": 6, + "h": 5, "w": 8, "x": 16, - "y": 53 + "y": 65 }, - "id": 128, + "id": 124, "options": { "legend": { "calcs": [], @@ -3472,7 +3906,7 @@ "sort": "none" } }, - "pluginVersion": "10.3.4", + "pluginVersion": "8.0.6", "targets": [ { "datasource": { @@ -3480,26 +3914,15 @@ "uid": "${DS_PROMETHEUS}" }, "editorMode": "code", - "expr": "go_goroutines{instance=~\"$instance\"}", + "exemplar": true, + "expr": "rate(go_gc_duration_seconds{quantile=\"0.75\",instance=~\"$instance\"}[$rate_interval])", "instant": false, "interval": "", - "legendFormat": "goroutines: {{instance}}", + "legendFormat": "", "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "go_threads{instance=~\"$instance\"}", - "instant": false, - "interval": "", - "legendFormat": "threads: {{instance}}", - "refId": "B" } ], - "title": "GO Goroutines and Threads", + "title": "GC Stop the World per sec", "type": "timeseries" }, { @@ -3507,6 +3930,7 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, + "description": "", "fieldConfig": { "defaults": { "color": { @@ -3516,7 +3940,6 @@ "axisBorderShow": false, "axisCenteredZero": false, "axisColorMode": "text", - "axisGridShow": true, "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, @@ -3558,7 +3981,8 @@ } ] }, - "unit": "percent" + "unit": "decbytes", + "unitScale": true }, "overrides": [] }, @@ -3566,13 +3990,13 @@ "h": 5, "w": 8, "x": 0, - "y": 59 + "y": 71 }, - "id": 106, + "id": 148, "options": { "legend": { "calcs": [ - "mean" + "max" ], "displayMode": "list", "placement": "bottom", @@ -3583,25 +4007,90 @@ "sort": "none" } }, - "pluginVersion": "10.3.4", + "pluginVersion": "8.0.6", "targets": [ { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "editorMode": "code", "exemplar": true, - "expr": "increase(process_cpu_seconds_total{instance=~\"$instance\"}[1m])", - "format": "time_series", + "expr": "process_virtual_memory_bytes{instance=~\"$instance\"}", + "hide": true, "interval": "", - "intervalFactor": 1, - "legendFormat": "system: {{instance}}", - "range": true, + "legendFormat": "resident virtual mem: {{instance}}", "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "process_resident_memory_anon_bytes{instance=~\"$instance\"}", + "hide": true, + "interval": "", + "legendFormat": "resident anon mem: {{instance}}", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "process_resident_memory_bytes{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "resident mem: {{instance}}", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "mem_data{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "data: {{instance}}", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "mem_stack{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "stack: {{instance}}", + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "mem_locked{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "locked: {{instance}}", + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "expr": "mem_swap{instance=~\"$instance\"}", + "hide": false, + "interval": "", + "legendFormat": "swap: {{instance}}", + "refId": "G" } ], - "title": "CPU", + "title": "mem: resident set size", "type": "timeseries" }, { @@ -3660,17 +4149,18 @@ } ] }, - "unit": "decbytes" + "unit": "short", + "unitScale": true }, "overrides": [] }, "gridPos": { "h": 5, "w": 8, - "x": 8, - "y": 59 + "x": 0, + "y": 76 }, - "id": 154, + "id": 86, "options": { "legend": { "calcs": [ @@ -3685,7 +4175,7 @@ "sort": "none" } }, - "pluginVersion": "10.3.4", + "pluginVersion": "8.0.6", "targets": [ { "datasource": { @@ -3694,11 +4184,11 @@ }, "editorMode": "code", "exemplar": true, - "expr": "go_memstats_stack_sys_bytes{instance=~\"$instance\"}", + "expr": "rate(go_memstats_mallocs_total{instance=~\"$instance\"}[$rate_interval])", "format": "time_series", "interval": "", "intervalFactor": 1, - "legendFormat": "stack_sys: {{ instance }}", + "legendFormat": "memstats_mallocs_total: {{ instance }}", "range": true, "refId": "A" }, @@ -3709,81 +4199,17 @@ }, "editorMode": "code", "exemplar": true, - "expr": "go_memstats_sys_bytes{instance=~\"$instance\"}", + "expr": "rate(go_memstats_frees_total{instance=~\"$instance\"}[$rate_interval])", "format": "time_series", "hide": false, "interval": "", "intervalFactor": 1, - "legendFormat": "sys: {{ instance }}", + "legendFormat": "memstats_frees_total: {{ instance }}", "range": true, "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "exemplar": true, - "expr": "go_memstats_stack_inuse_bytes{instance=~\"$instance\"}", - "format": "time_series", - "hide": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "stack_inuse: {{ instance }}", - "range": true, - "refId": "C" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "exemplar": true, - "expr": "go_memstats_mspan_sys_bytes{instance=~\"$instance\"}", - "format": "time_series", - "hide": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "mspan_sys: {{ instance }}", - "range": true, - "refId": "D" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "exemplar": true, - "expr": "go_memstats_mcache_sys_bytes{instance=~\"$instance\"}", - "format": "time_series", - "hide": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "mcache_sys: {{ instance }}", - "range": true, - "refId": "E" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "exemplar": true, - "expr": "go_memstats_heap_alloc_bytes{instance=~\"$instance\"}", - "format": "time_series", - "hide": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "heap_alloc: {{ instance }}", - "range": true, - "refId": "F" } ], - "title": "go memstat", + "title": "Process Mem: allocate objects/sec, free", "type": "timeseries" }, { @@ -3800,11 +4226,12 @@ "axisBorderShow": false, "axisCenteredZero": false, "axisColorMode": "text", + "axisGridShow": true, "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", - "fillOpacity": 10, + "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, @@ -3841,20 +4268,23 @@ } ] }, - "unit": "s" + "unit": "percent", + "unitScale": true }, "overrides": [] }, "gridPos": { "h": 5, "w": 8, - "x": 16, - "y": 59 + "x": 0, + "y": 81 }, - "id": 124, + "id": 106, "options": { "legend": { - "calcs": [], + "calcs": [ + "mean" + ], "displayMode": "list", "placement": "bottom", "showLegend": true @@ -3864,7 +4294,7 @@ "sort": "none" } }, - "pluginVersion": "10.3.4", + "pluginVersion": "8.0.6", "targets": [ { "datasource": { @@ -3873,14 +4303,16 @@ }, "editorMode": "code", "exemplar": true, - "expr": "rate(go_gc_duration_seconds{quantile=\"0.75\",instance=~\"$instance\"}[$rate_interval])", - "instant": false, + "expr": "increase(process_cpu_seconds_total{instance=~\"$instance\"}[1m])", + "format": "time_series", "interval": "", - "legendFormat": "", + "intervalFactor": 1, + "legendFormat": "system: {{instance}}", + "range": true, "refId": "A" } ], - "title": "GC Stop the World per sec", + "title": "CPU", "type": "timeseries" }, { @@ -3892,7 +4324,7 @@ "h": 1, "w": 24, "x": 0, - "y": 64 + "y": 86 }, "id": 173, "panels": [], @@ -3962,7 +4394,8 @@ } ] }, - "unit": "s" + "unit": "s", + "unitScale": true }, "overrides": [] }, @@ -3970,7 +4403,7 @@ "h": 8, "w": 12, "x": 0, - "y": 65 + "y": 87 }, "id": 175, "options": { @@ -4119,7 +4552,8 @@ } ] }, - "unit": "reqps" + "unit": "reqps", + "unitScale": true }, "overrides": [] }, @@ -4127,7 +4561,7 @@ "h": 8, "w": 12, "x": 12, - "y": 65 + "y": 87 }, "id": 177, "options": { @@ -4266,7 +4700,8 @@ "value": 80 } ] - } + }, + "unitScale": true }, "overrides": [] }, @@ -4274,7 +4709,7 @@ "h": 6, "w": 8, "x": 0, - "y": 73 + "y": 95 }, "id": 176, "options": { @@ -4363,7 +4798,8 @@ "value": 80 } ] - } + }, + "unitScale": true }, "overrides": [] }, @@ -4371,7 +4807,7 @@ "h": 6, "w": 8, "x": 8, - "y": 73 + "y": 95 }, "id": 180, "options": { @@ -4473,7 +4909,8 @@ } ] }, - "unit": "short" + "unit": "short", + "unitScale": true }, "overrides": [] }, @@ -4481,7 +4918,7 @@ "h": 6, "w": 8, "x": 16, - "y": 73 + "y": 95 }, "id": 181, "options": { @@ -4583,7 +5020,8 @@ } ] }, - "unit": "binBps" + "unit": "binBps", + "unitScale": true }, "overrides": [] }, @@ -4591,7 +5029,7 @@ "h": 6, "w": 8, "x": 0, - "y": 79 + "y": 101 }, "id": 178, "options": { @@ -4635,7 +5073,7 @@ "h": 1, "w": 24, "x": 0, - "y": 85 + "y": 107 }, "id": 183, "panels": [], @@ -4705,7 +5143,8 @@ } ] }, - "unit": "reqps" + "unit": "reqps", + "unitScale": true }, "overrides": [] }, @@ -4713,7 +5152,7 @@ "h": 8, "w": 12, "x": 0, - "y": 86 + "y": 108 }, "id": 185, "options": { @@ -4814,7 +5253,8 @@ } ] }, - "unit": "s" + "unit": "s", + "unitScale": true }, "overrides": [] }, @@ -4822,7 +5262,7 @@ "h": 8, "w": 12, "x": 12, - "y": 86 + "y": 108 }, "id": 186, "options": { @@ -4911,7 +5351,8 @@ } ] }, - "unit": "s" + "unit": "s", + "unitScale": true }, "overrides": [] }, @@ -4919,7 +5360,7 @@ "h": 8, "w": 12, "x": 0, - "y": 94 + "y": 116 }, "id": 187, "options": { @@ -5008,7 +5449,8 @@ } ] }, - "unit": "none" + "unit": "none", + "unitScale": true }, "overrides": [] }, @@ -5016,7 +5458,7 @@ "h": 8, "w": 12, "x": 12, - "y": 94 + "y": 116 }, "id": 188, "options": { @@ -5031,7 +5473,7 @@ "sort": "none" } }, - "pluginVersion": "10.3.4", + "pluginVersion": "8.0.6", "targets": [ { "datasource": { @@ -5114,7 +5556,8 @@ } ] }, - "unit": "short" + "unit": "short", + "unitScale": true }, "overrides": [] }, @@ -5122,7 +5565,7 @@ "h": 6, "w": 8, "x": 8, - "y": 102 + "y": 124 }, "id": 189, "options": { @@ -5247,7 +5690,8 @@ "value": 80 } ] - } + }, + "unitScale": true }, "overrides": [] }, @@ -5255,7 +5699,7 @@ "h": 6, "w": 8, "x": 16, - "y": 102 + "y": 124 }, "id": 184, "options": { @@ -5313,7 +5757,7 @@ "h": 1, "w": 24, "x": 0, - "y": 108 + "y": 130 }, "id": 75, "panels": [], @@ -5383,15 +5827,41 @@ } ] }, - "unit": "Bps" + "unit": "Bps", + "unitScale": true }, - "overrides": [] + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "egress: mainnet2-1:6061" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] }, "gridPos": { "h": 9, "w": 12, "x": 0, - "y": 109 + "y": 131 }, "id": 96, "options": { @@ -5411,7 +5881,7 @@ "sort": "none" } }, - "pluginVersion": "10.3.4", + "pluginVersion": "8.0.6", "targets": [ { "datasource": { @@ -5503,7 +5973,8 @@ } ] }, - "unit": "none" + "unit": "none", + "unitScale": true }, "overrides": [] }, @@ -5511,7 +5982,7 @@ "h": 9, "w": 12, "x": 12, - "y": 109 + "y": 131 }, "id": 77, "options": { @@ -5531,7 +6002,7 @@ "sort": "none" } }, - "pluginVersion": "10.3.4", + "pluginVersion": "8.0.6", "targets": [ { "datasource": { @@ -5633,11 +6104,7 @@ "type": "custom" }, { - "current": { - "selected": false, - "text": "All", - "value": "$__all" - }, + "current": {}, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" @@ -5788,6 +6255,6 @@ "timezone": "", "title": "Erigon Internals", "uid": "b42a61d7-02b1-416c-8ab4-b9c864356174", - "version": 22, + "version": 14, "weekStart": "" } \ No newline at end of file diff --git a/cmd/rpcdaemon/cli/config.go b/cmd/rpcdaemon/cli/config.go index 08a2eeacb5b..d0e1aa3471a 100644 --- a/cmd/rpcdaemon/cli/config.go +++ b/cmd/rpcdaemon/cli/config.go @@ -33,6 +33,7 @@ import ( txpool "github.com/ledgerwatch/erigon-lib/gointerfaces/txpoolproto" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" "github.com/ledgerwatch/erigon-lib/kv/remotedb" @@ -278,7 +279,7 @@ func EmbeddedServices(ctx context.Context, // ... adding back in place to see about the above statement stateCache = kvcache.New(stateCacheCfg) } else { - stateCache = kvcache.NewDummy() + stateCache = kvcache.NewDummy(stateCacheCfg.StateV3) } subscribeToStateChangesLoop(ctx, stateDiffClient, stateCache) @@ -434,11 +435,20 @@ func RemoteServices(ctx context.Context, cfg *httpcfg.HttpCfg, logger log.Logger onNewSnapshot() blockReader = freezeblocks.NewBlockReader(allSnapshots, allBorSnapshots) - db, err = temporal.New(rwKv, agg) - if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, nil, err + var histV3Enabled bool + _ = db.View(ctx, func(tx kv.Tx) error { + histV3Enabled, _ = kvcfg.HistoryV3.Enabled(tx) + return nil + }) + cfg.StateCache.StateV3 = histV3Enabled + if histV3Enabled { + logger.Info("HistoryV3", "enable", histV3Enabled) + db, err = temporal.New(rwKv, agg) + if err != nil { + return nil, nil, nil, nil, nil, nil, nil, nil, nil, err + } } - stateCache = kvcache.NewDummy() + stateCache = kvcache.NewDummy(cfg.StateCache.StateV3) } // If DB can't be configured - used PrivateApiAddr as remote DB if db == nil { @@ -449,7 +459,7 @@ func RemoteServices(ctx context.Context, cfg *httpcfg.HttpCfg, logger log.Logger if cfg.StateCache.CacheSize > 0 { stateCache = kvcache.New(cfg.StateCache) } else { - stateCache = kvcache.NewDummy() + stateCache = kvcache.NewDummy(cfg.StateCache.StateV3) } logger.Info("if you run RPCDaemon on same machine with Erigon add --datadir option") } @@ -976,7 +986,7 @@ func (e *remoteConsensusEngine) Initialize(config *chain.Config, chain consensus panic(err) } - e.engine.Initialize(config, chain, header, state, syscall, logger) + e.engine.Initialize(config, chain, header, state, syscall, logger, nil) } func (e *remoteConsensusEngine) VerifyHeader(_ consensus.ChainHeaderReader, _ *types.Header, _ bool) error { diff --git a/cmd/silkworm_api/snapshot_idx.go b/cmd/silkworm_api/snapshot_idx.go index af145023b56..bb258660a61 100644 --- a/cmd/silkworm_api/snapshot_idx.go +++ b/cmd/silkworm_api/snapshot_idx.go @@ -56,7 +56,7 @@ func FindIf(segments []snaptype.FileInfo, predicate func(snaptype.FileInfo) bool } func buildIndex(cliCtx *cli.Context, dataDir string, snapshotPaths []string, minBlock uint64) error { - logger, _, _, err := debug.Setup(cliCtx, true /* rootLogger */) + logger, _, _, _, err := debug.Setup(cliCtx, true /* rootLogger */) if err != nil { return err } diff --git a/cmd/state/commands/check_change_sets.go b/cmd/state/commands/check_change_sets.go new file mode 100644 index 00000000000..aaecc9fb067 --- /dev/null +++ b/cmd/state/commands/check_change_sets.go @@ -0,0 +1,295 @@ +package commands + +import ( + "bytes" + "context" + "fmt" + "os" + "os/signal" + "sort" + "syscall" + "time" + + "github.com/ledgerwatch/log/v3" + "github.com/spf13/cobra" + + chain2 "github.com/ledgerwatch/erigon-lib/chain" + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/datadir" + "github.com/ledgerwatch/erigon-lib/common/hexutility" + "github.com/ledgerwatch/erigon-lib/kv" + kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" + "github.com/ledgerwatch/erigon-lib/kv/temporal/historyv2" + + "github.com/ledgerwatch/erigon/consensus" + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/systemcontracts" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/eth/ethconsensusconfig" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/node/nodecfg" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/turbo/debug" + "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" +) + +var ( + historyfile string + nocheck bool +) + +func init() { + withBlock(checkChangeSetsCmd) + withDataDir(checkChangeSetsCmd) + checkChangeSetsCmd.Flags().StringVar(&historyfile, "historyfile", "", "path to the file where the changesets and history are expected to be. If omitted, the same as /erion/chaindata") + checkChangeSetsCmd.Flags().BoolVar(&nocheck, "nocheck", false, "set to turn off the changeset checking and only execute transaction (for performance testing)") + rootCmd.AddCommand(checkChangeSetsCmd) +} + +var checkChangeSetsCmd = &cobra.Command{ + Use: "checkChangeSets", + Short: "Re-executes historical transactions in read-only mode and checks that their outputs match the database ChangeSets", + RunE: func(cmd *cobra.Command, args []string) error { + logger := debug.SetupCobra(cmd, "check_change_sets") + return CheckChangeSets(cmd.Context(), genesis, block, chaindata, historyfile, nocheck, logger) + }, +} + +// CheckChangeSets re-executes historical transactions in read-only mode +// and checks that their outputs match the database ChangeSets. +func CheckChangeSets(ctx context.Context, genesis *types.Genesis, blockNum uint64, chaindata string, historyfile string, nocheck bool, logger log.Logger) error { + if len(historyfile) == 0 { + historyfile = chaindata + } + + startTime := time.Now() + sigs := make(chan os.Signal, 1) + interruptCh := make(chan bool, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + + go func() { + <-sigs + interruptCh <- true + }() + + db, err := kv2.NewMDBX(logger).Path(chaindata).Open(ctx) + if err != nil { + return err + } + dirs := datadir.New(datadirCli) + allSnapshots := freezeblocks.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), dirs.Snap, 0, logger) + defer allSnapshots.Close() + if err := allSnapshots.ReopenFolder(); err != nil { + return fmt.Errorf("reopen snapshot segments: %w", err) + } + allBorSnapshots := freezeblocks.NewBorRoSnapshots(ethconfig.Defaults.Snapshot, dirs.Snap, 0, logger) + blockReader := freezeblocks.NewBlockReader(allSnapshots, allBorSnapshots) + + chainDb := db + defer chainDb.Close() + historyDb := chainDb + if chaindata != historyfile { + historyDb = kv2.MustOpen(historyfile) + } + historyTx, err1 := historyDb.BeginRo(ctx) + if err1 != nil { + return err1 + } + defer historyTx.Rollback() + chainConfig := genesis.Config + vmConfig := vm.Config{} + + noOpWriter := state.NewNoopWriter() + + interrupt := false + rwtx, err := chainDb.BeginRw(ctx) + if err != nil { + return err + } + defer rwtx.Rollback() + + execAt, err1 := stages.GetStageProgress(rwtx, stages.Execution) + if err1 != nil { + return err1 + } + historyAt, err1 := stages.GetStageProgress(rwtx, stages.StorageHistoryIndex) + if err1 != nil { + return err1 + } + + commitEvery := time.NewTicker(30 * time.Second) + defer commitEvery.Stop() + + engine := initConsensusEngine(ctx, chainConfig, blockReader, logger) + + for !interrupt { + + if blockNum > execAt { + log.Warn(fmt.Sprintf("Force stop: because trying to check blockNumber=%d higher than Exec stage=%d", blockNum, execAt)) + break + } + if blockNum > historyAt { + log.Warn(fmt.Sprintf("Force stop: because trying to check blockNumber=%d higher than History stage=%d", blockNum, historyAt)) + break + } + + blockHash, err := blockReader.CanonicalHash(ctx, historyTx, blockNum) + if err != nil { + return err + } + var b *types.Block + b, _, err = blockReader.BlockWithSenders(ctx, historyTx, blockHash, blockNum) + if err != nil { + return err + } + if b == nil { + break + } + reader := state.NewPlainState(historyTx, blockNum, systemcontracts.SystemContractCodeLookup[chainConfig.ChainName]) + //reader.SetTrace(blockNum == uint64(block)) + intraBlockState := state.New(reader) + csw := state.NewChangeSetWriterPlain(nil /* db */, blockNum) + var blockWriter state.StateWriter + if nocheck { + blockWriter = noOpWriter + } else { + blockWriter = csw + } + + getHeader := func(hash libcommon.Hash, number uint64) *types.Header { + h, e := blockReader.Header(ctx, rwtx, hash, number) + if e != nil { + panic(e) + } + return h + } + receipts, err1 := runBlock(engine, intraBlockState, noOpWriter, blockWriter, chainConfig, getHeader, b, vmConfig, blockNum == block, logger) + if err1 != nil { + return err1 + } + if chainConfig.IsByzantium(blockNum) { + receiptSha := types.DeriveSha(receipts) + if receiptSha != b.ReceiptHash() { + return fmt.Errorf("mismatched receipt headers for block %d", blockNum) + } + } + + if !nocheck { + accountChanges, err := csw.GetAccountChanges() + if err != nil { + return err + } + sort.Sort(accountChanges) + i := 0 + match := true + err = historyv2.ForPrefix(historyTx, kv.AccountChangeSet, hexutility.EncodeTs(blockNum), func(blockN uint64, k, v []byte) error { + if i >= len(accountChanges.Changes) { + if len(v) != 0 { + fmt.Printf("Unexpected account changes in block %d\n", blockNum) + fmt.Printf("In the database: ======================\n") + fmt.Printf("%d: 0x%x: %x\n", i, k, v) + match = false + } + i++ + return nil + } + c := accountChanges.Changes[i] + if bytes.Equal(c.Key, k) && bytes.Equal(c.Value, v) { + i++ + return nil + } + if len(v) == 0 { + return nil + } + + match = false + fmt.Printf("Unexpected account changes in block %d\n", blockNum) + fmt.Printf("In the database: ======================\n") + fmt.Printf("%d: 0x%x: %x\n", i, k, v) + fmt.Printf("Expected: ==========================\n") + fmt.Printf("%d: 0x%x %x\n", i, c.Key, c.Value) + i++ + return nil + }) + if err != nil { + return err + } + + if !match { + return fmt.Errorf("check change set failed") + } + + i = 0 + expectedStorageChanges, err := csw.GetStorageChanges() + if err != nil { + return err + } + if expectedStorageChanges == nil { + expectedStorageChanges = historyv2.NewChangeSet() + } + sort.Sort(expectedStorageChanges) + match = true + err = historyv2.ForPrefix(historyTx, kv.StorageChangeSet, hexutility.EncodeTs(blockNum), func(blockN uint64, k, v []byte) error { + if i >= len(expectedStorageChanges.Changes) { + fmt.Printf("Unexpected storage changes in block %d\nIn the database: ======================\n", blockNum) + fmt.Printf("0x%x: %x\n", k, v) + match = false + i++ + return nil + } + c := expectedStorageChanges.Changes[i] + i++ + if bytes.Equal(c.Key, k) && bytes.Equal(c.Value, v) { + return nil + } + match = false + fmt.Printf("Unexpected storage changes in block %d\nIn the database: ======================\n", blockNum) + fmt.Printf("0x%x: %x\n", k, v) + fmt.Printf("Expected: ==========================\n") + fmt.Printf("0x%x %x\n", c.Key, c.Value) + i++ + return nil + }) + if err != nil { + return err + } + if !match { + return fmt.Errorf("check change set failed") + } + } + + blockNum++ + if blockNum%1000 == 0 { + logger.Info("Checked", "blocks", blockNum) + } + + // Check for interrupts + select { + case interrupt = <-interruptCh: + fmt.Println("interrupted, please wait for cleanup...") + default: + } + } + logger.Info("Checked", "blocks", blockNum, "next time specify --block", blockNum, "duration", time.Since(startTime)) + return nil +} + +func initConsensusEngine(ctx context.Context, cc *chain2.Config, blockReader services.FullBlockReader, logger log.Logger) (engine consensus.Engine) { + config := ethconfig.Defaults + + var consensusConfig interface{} + + if cc.Clique != nil { + consensusConfig = params.CliqueSnapshot + } else if cc.Aura != nil { + consensusConfig = &config.Aura + } else if cc.Bor != nil { + consensusConfig = cc.Bor + } else { + consensusConfig = &config.Ethash + } + return ethconsensusconfig.CreateConsensusEngine(ctx, &nodecfg.Config{Dirs: datadir.New(datadirCli)}, cc, consensusConfig, config.Miner.Notify, config.Miner.Noverify, nil /* heimdallClient */, config.WithoutHeimdall, blockReader, true /* readonly */, logger) +} diff --git a/cmd/state/commands/opcode_tracer.go b/cmd/state/commands/opcode_tracer.go index e91c94070a8..4e2aec7f613 100644 --- a/cmd/state/commands/opcode_tracer.go +++ b/cmd/state/commands/opcode_tracer.go @@ -21,6 +21,7 @@ import ( chain2 "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon/common/debug" @@ -29,9 +30,11 @@ import ( "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/eth/tracers" "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/snapshotsync/freezeblocks" ) @@ -114,7 +117,7 @@ type opcodeTracer struct { saveBblocks bool blockNumber uint64 depth int - env *vm.EVM + env *tracing.VMContext } func NewOpcodeTracer(blockNum uint64, saveOpcodes bool, saveBblocks bool) *opcodeTracer { @@ -163,9 +166,22 @@ type blockTxs struct { Txs slicePtrTx } -func (ot *opcodeTracer) CaptureTxStart(gasLimit uint64) {} +func (ot *opcodeTracer) Tracer() *tracers.Tracer { + return &tracers.Tracer{ + Hooks: &tracing.Hooks{ + OnTxStart: ot.OnTxStart, + OnEnter: ot.OnEnter, + OnExit: ot.OnExit, + OnFault: ot.OnFault, + OnOpcode: ot.OnOpcode, + }, + } +} -func (ot *opcodeTracer) CaptureTxEnd(restGas uint64) {} +func (ot *opcodeTracer) OnTxStart(env *tracing.VMContext, tx types.Transaction, from libcommon.Address) { + ot.env = env + ot.depth = 0 +} func (ot *opcodeTracer) captureStartOrEnter(from, to libcommon.Address, create bool, input []byte) { //fmt.Fprint(ot.summary, ot.lastLine) @@ -195,15 +211,9 @@ func (ot *opcodeTracer) captureStartOrEnter(from, to libcommon.Address, create b ot.stack = append(ot.stack, &newTx) } -func (ot *opcodeTracer) CaptureStart(env *vm.EVM, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { - ot.env = env - ot.depth = 0 - ot.captureStartOrEnter(from, to, create, input) -} - -func (ot *opcodeTracer) CaptureEnter(typ vm.OpCode, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { - ot.depth++ - ot.captureStartOrEnter(from, to, create, input) +func (ot *opcodeTracer) OnEnter(depth int, typ byte, from libcommon.Address, to libcommon.Address, precompile bool, input []byte, gas uint64, value *uint256.Int, code []byte) { + ot.depth = depth + ot.captureStartOrEnter(from, to, vm.OpCode(typ) == vm.CREATE, input) } func (ot *opcodeTracer) captureEndOrExit(err error) { @@ -238,18 +248,13 @@ func (ot *opcodeTracer) captureEndOrExit(err error) { } } -func (ot *opcodeTracer) CaptureEnd(output []byte, usedGas uint64, err error) { +func (ot *opcodeTracer) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) { ot.captureEndOrExit(err) + ot.depth = depth } -func (ot *opcodeTracer) CaptureExit(output []byte, usedGas uint64, err error) { - ot.captureEndOrExit(err) - ot.depth-- -} - -func (ot *opcodeTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, opDepth int, err error) { +func (ot *opcodeTracer) OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, opDepth int, err error) { //CaptureState sees the system as it is before the opcode is run. It seems to never get an error. - contract := scope.Contract //sanity check if pc > uint64(MaxUint16) { @@ -281,8 +286,8 @@ func (ot *opcodeTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, currentEntry.TxHash = new(libcommon.Hash) currentEntry.TxHash.SetBytes(currentTxHash.Bytes()) currentEntry.CodeHash = new(libcommon.Hash) - currentEntry.CodeHash.SetBytes(contract.CodeHash.Bytes()) - currentEntry.CodeSize = len(contract.Code) + currentEntry.CodeHash.SetBytes(scope.CodeHash().Bytes()) + currentEntry.CodeSize = len(scope.Code()) if ot.saveOpcodes { currentEntry.Opcodes = make([]opcode, 0, 200) } @@ -310,7 +315,7 @@ func (ot *opcodeTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, //sanity check if currentEntry.OpcodeFault != "" { panic(fmt.Sprintf("Running opcodes but fault is already set. txFault=%s, opFault=%v, op=%s", - currentEntry.OpcodeFault, err, op.String())) + currentEntry.OpcodeFault, err, vm.OpCode(op).String())) } // if it is a Fault, check whether we already have a record of the opcode. If so, just add the flag to it @@ -322,11 +327,11 @@ func (ot *opcodeTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, faultAndRepeated := false - if pc16 == currentEntry.lastPc16 && op == currentEntry.lastOp { + if pc16 == currentEntry.lastPc16 && vm.OpCode(op) == currentEntry.lastOp { //it's a repeated opcode. We assume this only happens when it's a Fault. if err == nil { panic(fmt.Sprintf("Duplicate opcode with no fault. bn=%d txaddr=%s pc=%x op=%s", - ot.blockNumber, currentEntry.TxAddr, pc, op.String())) + ot.blockNumber, currentEntry.TxAddr, pc, vm.OpCode(op).String())) } faultAndRepeated = true //ot.fsumWriter.WriteString("Fault for EXISTING opcode\n") @@ -338,7 +343,7 @@ func (ot *opcodeTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, } else { // it's a new opcode if ot.saveOpcodes { - newOpcode := opcode{pc16, op, errstr} + newOpcode := opcode{pc16, vm.OpCode(op), errstr} currentEntry.Opcodes = append(currentEntry.Opcodes, newOpcode) } } @@ -369,24 +374,32 @@ func (ot *opcodeTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, //sanity check // we're starting a bblock, so either we're in PC=0 or we have OP=JUMPDEST - if pc16 != 0 && op.String() != "JUMPDEST" { + if pc16 != 0 && vm.OpCode(op).String() != "JUMPDEST" { panic(fmt.Sprintf("Bad bblock? lastpc=%x, lastOp=%s; pc=%x, op=%s; bn=%d txaddr=%s tx=%d-%s", - currentEntry.lastPc16, currentEntry.lastOp.String(), pc, op.String(), ot.blockNumber, currentEntry.TxAddr, currentEntry.Depth, currentEntry.TxHash.String())) + currentEntry.lastPc16, currentEntry.lastOp.String(), pc, vm.OpCode(op).String(), ot.blockNumber, currentEntry.TxAddr, currentEntry.Depth, currentEntry.TxHash.String())) } } } } currentEntry.lastPc16 = pc16 - currentEntry.lastOp = op + currentEntry.lastOp = vm.OpCode(op) } -func (ot *opcodeTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, opDepth int, err error) { +func (ot *opcodeTracer) OnFault(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, opDepth int, err error) { // CaptureFault sees the system as it is after the fault happens // CaptureState might have already recorded the opcode before it failed. Let's centralize the processing there. - ot.CaptureState(pc, op, gas, cost, scope, nil, opDepth, err) + ot.OnOpcode(pc, op, gas, cost, scope, nil, opDepth, err) +} +// GetResult returns an empty json object. +func (ot *opcodeTracer) GetResult() (json.RawMessage, error) { + return json.RawMessage(`{}`), nil +} + +// Stop terminates execution of the tracer at the first opportune moment. +func (ot *opcodeTracer) Stop(err error) { } type segPrefix struct { @@ -421,11 +434,19 @@ func OpcodeTracer(genesis *types.Genesis, blockNum uint64, chaindata string, num } defer historyTx.Rollback() + var historyV3 bool + chainDb.View(context.Background(), func(tx kv.Tx) (err error) { + historyV3, err = kvcfg.HistoryV3.Enabled(tx) + if err != nil { + return err + } + return nil + }) dirs := datadir2.New(filepath.Dir(chainDb.(*mdbx.MdbxKV).Path())) blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, dirs.Snap, 0, log.New()), nil /* BorSnapshots */) chainConfig := genesis.Config - vmConfig := vm.Config{Tracer: ot, Debug: true} + vmConfig := vm.Config{Tracer: ot.Tracer().Hooks, Debug: true} noOpWriter := state.NewNoopWriter() @@ -579,7 +600,7 @@ func OpcodeTracer(genesis *types.Genesis, blockNum uint64, chaindata string, num ot.fsumWriter = bufio.NewWriter(fsum) } - dbstate, err := rpchelper.CreateHistoryStateReader(historyTx, block.NumberU64(), 0, chainConfig.ChainName) + dbstate, err := rpchelper.CreateHistoryStateReader(historyTx, block.NumberU64(), 0, historyV3, chainConfig.ChainName) if err != nil { return err } @@ -706,7 +727,7 @@ func runBlock(engine consensus.Engine, ibs *state.IntraBlockState, txnWriter sta usedGas := new(uint64) usedBlobGas := new(uint64) var receipts types.Receipts - core.InitializeBlockExecution(engine, nil, header, chainConfig, ibs, logger) + core.InitializeBlockExecution(engine, nil, header, chainConfig, ibs, logger, nil) rules := chainConfig.Rules(block.NumberU64(), block.Time()) for i, tx := range block.Transactions() { ibs.SetTxContext(tx.Hash(), block.Hash(), i) @@ -723,7 +744,7 @@ func runBlock(engine consensus.Engine, ibs *state.IntraBlockState, txnWriter sta if !vmConfig.ReadOnly { // Finalize the block, applying any consensus engine specific extras (e.g. block rewards) tx := block.Transactions() - if _, _, _, err := engine.FinalizeAndAssemble(chainConfig, header, ibs, tx, block.Uncles(), receipts, block.Withdrawals(), block.Requests(), nil, nil, nil, logger); err != nil { + if _, _, _, err := engine.FinalizeAndAssemble(chainConfig, header, ibs, tx, block.Uncles(), receipts, block.Withdrawals(), nil, nil, nil, logger); err != nil { return nil, fmt.Errorf("finalize of block %d failed: %w", block.NumberU64(), err) } diff --git a/cmd/state/commands/state_root.go b/cmd/state/commands/state_root.go index d3281879167..13c5eabec28 100644 --- a/cmd/state/commands/state_root.go +++ b/cmd/state/commands/state_root.go @@ -13,6 +13,7 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" datadir2 "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb/blockio" @@ -47,9 +48,16 @@ var stateRootCmd = &cobra.Command{ } func blocksIO(db kv.RoDB) (services.FullBlockReader, *blockio.BlockWriter) { + var histV3 bool + if err := db.View(context.Background(), func(tx kv.Tx) error { + histV3, _ = kvcfg.HistoryV3.Enabled(tx) + return nil + }); err != nil { + panic(err) + } dirs := datadir2.New(filepath.Dir(db.(*kv2.MdbxKV).Path())) br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, dirs.Snap, 0, log.New()), nil /* BorSnapshots */) - bw := blockio.NewBlockWriter() + bw := blockio.NewBlockWriter(histV3) return br, bw } @@ -101,7 +109,7 @@ func StateRoot(ctx context.Context, genesis *types.Genesis, blockNum uint64, dat if rwTx, err = db.BeginRw(ctx); err != nil { return err } - _, genesisIbs, err4 := core.GenesisToBlock(genesis, "", logger) + _, genesisIbs, err4 := core.GenesisToBlock(genesis, "", logger, nil) if err4 != nil { return err4 } @@ -154,7 +162,7 @@ func StateRoot(ctx context.Context, genesis *types.Genesis, blockNum uint64, dat if err = rwTx.ClearBucket(kv.HashedStorage); err != nil { return err } - if err = stagedsync.PromoteHashedStateCleanly("hashedstate", rwTx, stagedsync.StageHashStateCfg(nil, dirs), ctx, logger); err != nil { + if err = stagedsync.PromoteHashedStateCleanly("hashedstate", rwTx, stagedsync.StageHashStateCfg(nil, dirs, false), ctx, logger); err != nil { return err } var root libcommon.Hash diff --git a/cmd/state/exec3/calltracer_v3.go b/cmd/state/exec3/calltracer_v3.go index 951e114dfa8..8bee0355223 100644 --- a/cmd/state/exec3/calltracer_v3.go +++ b/cmd/state/exec3/calltracer_v3.go @@ -1,9 +1,12 @@ package exec3 import ( + "encoding/json" + "github.com/holiman/uint256" libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/core/tracing" + "github.com/ledgerwatch/erigon/eth/tracers" ) type CallTracer struct { @@ -14,33 +17,35 @@ type CallTracer struct { func NewCallTracer() *CallTracer { return &CallTracer{} } + +func (ct *CallTracer) Tracer() *tracers.Tracer { + return &tracers.Tracer{ + Hooks: &tracing.Hooks{ + OnEnter: ct.OnEnter, + }, + } +} + func (ct *CallTracer) Reset() { ct.froms, ct.tos = nil, nil } + func (ct *CallTracer) Froms() map[libcommon.Address]struct{} { return ct.froms } func (ct *CallTracer) Tos() map[libcommon.Address]struct{} { return ct.tos } -func (ct *CallTracer) CaptureTxStart(gasLimit uint64) {} -func (ct *CallTracer) CaptureTxEnd(restGas uint64) {} -func (ct *CallTracer) CaptureStart(env *vm.EVM, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { - if ct.froms == nil { - ct.froms = map[libcommon.Address]struct{}{} - ct.tos = map[libcommon.Address]struct{}{} - } - ct.froms[from], ct.tos[to] = struct{}{}, struct{}{} -} -func (ct *CallTracer) CaptureEnter(typ vm.OpCode, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { +func (ct *CallTracer) OnEnter(depth int, typ byte, from libcommon.Address, to libcommon.Address, precompile bool, input []byte, gas uint64, value *uint256.Int, code []byte) { if ct.froms == nil { ct.froms = map[libcommon.Address]struct{}{} ct.tos = map[libcommon.Address]struct{}{} } ct.froms[from], ct.tos[to] = struct{}{}, struct{}{} } -func (ct *CallTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { -} -func (ct *CallTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { -} -func (ct *CallTracer) CaptureEnd(output []byte, usedGas uint64, err error) { + +// GetResult returns an empty json object. +func (ct *CallTracer) GetResult() (json.RawMessage, error) { + return json.RawMessage(`{}`), nil } -func (ct *CallTracer) CaptureExit(output []byte, usedGas uint64, err error) { + +// Stop terminates execution of the tracer at the first opportune moment. +func (ct *CallTracer) Stop(err error) { } diff --git a/cmd/state/exec3/state.go b/cmd/state/exec3/state.go index de7fce77946..7373e372ad8 100644 --- a/cmd/state/exec3/state.go +++ b/cmd/state/exec3/state.go @@ -78,7 +78,7 @@ func NewWorker(lock sync.Locker, logger log.Logger, ctx context.Context, backgro dirs: dirs, } w.taskGasPool.AddBlobGas(chainConfig.GetMaxBlobGasPerBlock()) - w.vmCfg = vm.Config{Debug: true, Tracer: w.callTracer} + w.vmCfg = vm.Config{Debug: true, Tracer: w.callTracer.Tracer().Hooks} w.ibs = state.New(w.stateReader) return w } @@ -178,7 +178,7 @@ func (rw *Worker) RunTxTaskNoLock(txTask *state.TxTask) { if txTask.BlockNum == 0 { // Genesis block //fmt.Printf("txNum=%d, blockNum=%d, Genesis\n", txTask.TxNum, txTask.BlockNum) - _, ibs, err = core.GenesisToBlock(rw.genesis, rw.dirs.Tmp, rw.logger) + _, ibs, err = core.GenesisToBlock(rw.genesis, rw.dirs.Tmp, rw.logger, nil) if err != nil { panic(err) } @@ -190,9 +190,9 @@ func (rw *Worker) RunTxTaskNoLock(txTask *state.TxTask) { // Block initialisation //fmt.Printf("txNum=%d, blockNum=%d, initialisation of the block\n", txTask.TxNum, txTask.BlockNum) syscall := func(contract libcommon.Address, data []byte, ibs *state.IntraBlockState, header *types.Header, constCall bool) ([]byte, error) { - return core.SysCallContract(contract, data, rw.chainConfig, ibs, header, rw.engine, constCall /* constCall */) + return core.SysCallContract(contract, data, rw.chainConfig, ibs, header, rw.engine, constCall /* constCall */, nil) } - rw.engine.Initialize(rw.chainConfig, rw.chain, header, ibs, syscall, rw.logger) + rw.engine.Initialize(rw.chainConfig, rw.chain, header, ibs, syscall, rw.logger, nil) txTask.Error = ibs.FinalizeTx(rules, noop) case txTask.Final: if txTask.BlockNum == 0 { @@ -202,10 +202,10 @@ func (rw *Worker) RunTxTaskNoLock(txTask *state.TxTask) { //fmt.Printf("txNum=%d, blockNum=%d, finalisation of the block\n", txTask.TxNum, txTask.BlockNum) // End of block transaction in a block syscall := func(contract libcommon.Address, data []byte) ([]byte, error) { - return core.SysCallContract(contract, data, rw.chainConfig, ibs, header, rw.engine, false /* constCall */) + return core.SysCallContract(contract, data, rw.chainConfig, ibs, header, rw.engine, false /* constCall */, nil) } - _, _, err := rw.engine.Finalize(rw.chainConfig, types.CopyHeader(header), ibs, txTask.Txs, txTask.Uncles, txTask.BlockReceipts, txTask.Withdrawals, txTask.Requests, rw.chain, syscall, rw.logger) + _, _, err := rw.engine.Finalize(rw.chainConfig, types.CopyHeader(header), ibs, txTask.Txs, txTask.Uncles, txTask.BlockReceipts, txTask.Withdrawals, rw.chain, syscall, rw.logger) if err != nil { txTask.Error = err } else { @@ -232,7 +232,7 @@ func (rw *Worker) RunTxTaskNoLock(txTask *state.TxTask) { if msg.FeeCap().IsZero() && rw.engine != nil { // Only zero-gas transactions may be service ones syscall := func(contract libcommon.Address, data []byte) ([]byte, error) { - return core.SysCallContract(contract, data, rw.chainConfig, ibs, header, rw.engine, true /* constCall */) + return core.SysCallContract(contract, data, rw.chainConfig, ibs, header, rw.engine, true /* constCall */, nil) } msg.SetIsFree(rw.engine.IsServiceTransaction(msg.From(), syscall)) } diff --git a/cmd/state/exec3/state_recon.go b/cmd/state/exec3/state_recon.go index 3a98157686b..23b8431d477 100644 --- a/cmd/state/exec3/state_recon.go +++ b/cmd/state/exec3/state_recon.go @@ -295,7 +295,7 @@ func (rw *ReconWorker) runTxTask(txTask *state.TxTask) error { if txTask.BlockNum == 0 && txTask.TxIndex == -1 { //fmt.Printf("txNum=%d, blockNum=%d, Genesis\n", txTask.TxNum, txTask.BlockNum) // Genesis block - _, ibs, err = core.GenesisToBlock(rw.genesis, "", rw.logger) + _, ibs, err = core.GenesisToBlock(rw.genesis, "", rw.logger, nil) if err != nil { return err } @@ -306,9 +306,9 @@ func (rw *ReconWorker) runTxTask(txTask *state.TxTask) error { //fmt.Printf("txNum=%d, blockNum=%d, finalisation of the block\n", txTask.TxNum, txTask.BlockNum) // End of block transaction in a block syscall := func(contract libcommon.Address, data []byte) ([]byte, error) { - return core.SysCallContract(contract, data, rw.chainConfig, ibs, header, rw.engine, false /* constCall */) + return core.SysCallContract(contract, data, rw.chainConfig, ibs, header, rw.engine, false /* constCall */, nil) } - if _, _, err := rw.engine.Finalize(rw.chainConfig, types.CopyHeader(header), ibs, txTask.Txs, txTask.Uncles, txTask.BlockReceipts, txTask.Withdrawals, txTask.Requests, rw.chain, syscall, rw.logger); err != nil { + if _, _, err := rw.engine.Finalize(rw.chainConfig, types.CopyHeader(header), ibs, txTask.Txs, txTask.Uncles, txTask.BlockReceipts, txTask.Withdrawals, rw.chain, syscall, rw.logger); err != nil { if _, readError := rw.stateReader.ReadError(); !readError { return fmt.Errorf("finalize of block %d failed: %w", txTask.BlockNum, err) } @@ -317,10 +317,10 @@ func (rw *ReconWorker) runTxTask(txTask *state.TxTask) error { } else if txTask.TxIndex == -1 { // Block initialisation syscall := func(contract libcommon.Address, data []byte, ibState *state.IntraBlockState, header *types.Header, constCall bool) ([]byte, error) { - return core.SysCallContract(contract, data, rw.chainConfig, ibState, header, rw.engine, constCall /* constCall */) + return core.SysCallContract(contract, data, rw.chainConfig, ibState, header, rw.engine, constCall /* constCall */, nil) } - rw.engine.Initialize(rw.chainConfig, rw.chain, header, ibs, syscall, rw.logger) + rw.engine.Initialize(rw.chainConfig, rw.chain, header, ibs, syscall, rw.logger, nil) if err = ibs.FinalizeTx(rules, noop); err != nil { if _, readError := rw.stateReader.ReadError(); !readError { return err @@ -337,7 +337,7 @@ func (rw *ReconWorker) runTxTask(txTask *state.TxTask) error { if msg.FeeCap().IsZero() && rw.engine != nil { // Only zero-gas transactions may be service ones syscall := func(contract libcommon.Address, data []byte) ([]byte, error) { - return core.SysCallContract(contract, data, rw.chainConfig, ibs, header, rw.engine, true /* constCall */) + return core.SysCallContract(contract, data, rw.chainConfig, ibs, header, rw.engine, true /* constCall */, nil) } msg.SetIsFree(rw.engine.IsServiceTransaction(msg.From(), syscall)) } diff --git a/cmd/state/exec3/trace_worker.go b/cmd/state/exec3/trace_worker.go index fb251f34e57..760c3e1a358 100644 --- a/cmd/state/exec3/trace_worker.go +++ b/cmd/state/exec3/trace_worker.go @@ -12,12 +12,13 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/core/vm/evmtypes" + "github.com/ledgerwatch/erigon/eth/tracers" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/turbo/transactions" ) type GenericTracer interface { - vm.EVMLogger + Tracer() *tracers.Tracer SetTransaction(tx types.Transaction) Found() bool } @@ -62,7 +63,7 @@ func NewTraceWorker(tx kv.TemporalTx, cc *chain.Config, engine consensus.EngineR ibs: state.New(stateReader), } if tracer != nil { - ie.vmConfig = &vm.Config{Debug: true, Tracer: tracer} + ie.vmConfig = &vm.Config{Debug: true, Tracer: tracer.Tracer().Hooks} } return ie } @@ -96,7 +97,7 @@ func (e *TraceWorker) ExecTxn(txNum uint64, txIndex int, txn types.Transaction) if msg.FeeCap().IsZero() { // Only zero-gas transactions may be service ones syscall := func(contract common.Address, data []byte) ([]byte, error) { - return core.SysCallContract(contract, data, e.chainConfig, e.ibs, e.header, e.engine, true /* constCall */) + return core.SysCallContract(contract, data, e.chainConfig, e.ibs, e.header, e.engine, true /* constCall */, nil) } msg.SetIsFree(e.engine.IsServiceTransaction(msg.From(), syscall)) } diff --git a/cmd/state/exec3/trace_worker2.go b/cmd/state/exec3/trace_worker2.go index 2afb92bf3d6..a10ad39c079 100644 --- a/cmd/state/exec3/trace_worker2.go +++ b/cmd/state/exec3/trace_worker2.go @@ -130,7 +130,7 @@ func (rw *TraceWorker2) RunTxTask(txTask *state.TxTask) { case txTask.TxIndex == -1: if txTask.BlockNum == 0 { // Genesis block - _, ibs, err = core.GenesisToBlock(rw.execArgs.Genesis, rw.execArgs.Dirs.Tmp, rw.logger) + _, ibs, err = core.GenesisToBlock(rw.execArgs.Genesis, rw.execArgs.Dirs.Tmp, rw.logger, nil) if err != nil { panic(err) } @@ -141,9 +141,9 @@ func (rw *TraceWorker2) RunTxTask(txTask *state.TxTask) { // Block initialisation syscall := func(contract common.Address, data []byte, ibs *state.IntraBlockState, header *types.Header, constCall bool) ([]byte, error) { - return core.SysCallContract(contract, data, rw.execArgs.ChainConfig, ibs, header, rw.execArgs.Engine, constCall /* constCall */) + return core.SysCallContract(contract, data, rw.execArgs.ChainConfig, ibs, header, rw.execArgs.Engine, constCall /* constCall */, nil) } - rw.execArgs.Engine.Initialize(rw.execArgs.ChainConfig, rw.chain, header, ibs, syscall, rw.logger) + rw.execArgs.Engine.Initialize(rw.execArgs.ChainConfig, rw.chain, header, ibs, syscall, rw.logger, nil) txTask.Error = ibs.FinalizeTx(rules, noop) case txTask.Final: if txTask.BlockNum == 0 { @@ -152,10 +152,10 @@ func (rw *TraceWorker2) RunTxTask(txTask *state.TxTask) { // End of block transaction in a block syscall := func(contract common.Address, data []byte) ([]byte, error) { - return core.SysCallContract(contract, data, rw.execArgs.ChainConfig, ibs, header, rw.execArgs.Engine, false /* constCall */) + return core.SysCallContract(contract, data, rw.execArgs.ChainConfig, ibs, header, rw.execArgs.Engine, false /* constCall */, nil) } - _, _, err := rw.execArgs.Engine.Finalize(rw.execArgs.ChainConfig, types.CopyHeader(header), ibs, txTask.Txs, txTask.Uncles, txTask.BlockReceipts, txTask.Withdrawals, txTask.Requests, rw.chain, syscall, rw.logger) + _, _, err := rw.execArgs.Engine.Finalize(rw.execArgs.ChainConfig, types.CopyHeader(header), ibs, txTask.Txs, txTask.Uncles, txTask.BlockReceipts, txTask.Withdrawals, rw.chain, syscall, rw.logger) if err != nil { txTask.Error = err } @@ -164,7 +164,7 @@ func (rw *TraceWorker2) RunTxTask(txTask *state.TxTask) { rw.taskGasPool.Reset(txTask.Tx.GetGas(), rw.execArgs.ChainConfig.GetMaxBlobGasPerBlock()) if tracer := rw.consumer.NewTracer(); tracer != nil { rw.vmConfig.Debug = true - rw.vmConfig.Tracer = tracer + rw.vmConfig.Tracer = tracer.Tracer().Hooks } rw.vmConfig.SkipAnalysis = txTask.SkipAnalysis ibs.SetTxContext(txHash, txTask.BlockHash, txTask.TxIndex) @@ -175,7 +175,7 @@ func (rw *TraceWorker2) RunTxTask(txTask *state.TxTask) { if msg.FeeCap().IsZero() { // Only zero-gas transactions may be service ones syscall := func(contract common.Address, data []byte) ([]byte, error) { - return core.SysCallContract(contract, data, rw.execArgs.ChainConfig, ibs, header, rw.execArgs.Engine, true /* constCall */) + return core.SysCallContract(contract, data, rw.execArgs.ChainConfig, ibs, header, rw.execArgs.Engine, true /* constCall */, nil) } msg.SetIsFree(rw.execArgs.Engine.IsServiceTransaction(msg.From(), syscall)) } diff --git a/cmd/state/verify/verify_txlookup.go b/cmd/state/verify/verify_txlookup.go index bc0f57cb9b9..be2d380407b 100644 --- a/cmd/state/verify/verify_txlookup.go +++ b/cmd/state/verify/verify_txlookup.go @@ -13,6 +13,7 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" datadir2 "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon/core/rawdb/blockio" "github.com/ledgerwatch/erigon/eth/ethconfig" @@ -22,9 +23,16 @@ import ( ) func blocksIO(db kv.RoDB) (services.FullBlockReader, *blockio.BlockWriter) { + var histV3 bool + if err := db.View(context.Background(), func(tx kv.Tx) error { + histV3, _ = kvcfg.HistoryV3.Enabled(tx) + return nil + }); err != nil { + panic(err) + } dirs := datadir2.New(filepath.Dir(db.(*mdbx.MdbxKV).Path())) br := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, dirs.Snap, 0, log.New()), nil /* BorSnapshots */) - bw := blockio.NewBlockWriter() + bw := blockio.NewBlockWriter(histV3) return br, bw } diff --git a/cmd/txpool/main.go b/cmd/txpool/main.go index 60cca0b676e..2003a4ce082 100644 --- a/cmd/txpool/main.go +++ b/cmd/txpool/main.go @@ -16,6 +16,7 @@ import ( remote "github.com/ledgerwatch/erigon-lib/gointerfaces/remoteproto" proto_sentry "github.com/ledgerwatch/erigon-lib/gointerfaces/sentryproto" "github.com/ledgerwatch/erigon-lib/kv/kvcache" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/remotedb" "github.com/ledgerwatch/erigon-lib/kv/remotedbserver" "github.com/ledgerwatch/erigon-lib/txpool" @@ -157,6 +158,7 @@ func doTxpool(ctx context.Context, logger log.Logger) error { cacheConfig := kvcache.DefaultCoherentConfig cacheConfig.MetricsLabel = "txpool" + cacheConfig.StateV3 = kvcfg.HistoryV3.FromDB(coreDB) //TODO: cache to txpool db cfg.TracedSenders = make([]string, len(traceSenders)) for i, senderHex := range traceSenders { diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index e68c27afba2..caddd73cbe5 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -60,7 +60,6 @@ import ( "github.com/ledgerwatch/erigon/p2p/nat" "github.com/ledgerwatch/erigon/p2p/netutil" "github.com/ledgerwatch/erigon/params" - borsnaptype "github.com/ledgerwatch/erigon/polygon/bor/snaptype" "github.com/ledgerwatch/erigon/rpc/rpccfg" "github.com/ledgerwatch/erigon/turbo/logging" ) @@ -138,7 +137,7 @@ var ( Usage: "Lock memory maps for recent ethash mining DAGs", } ExternalConsensusFlag = cli.BoolFlag{ - Name: "externalcl", + Name: "exeternal", Usage: "Enables the external consensus layer", } // Transaction pool settings @@ -1579,7 +1578,6 @@ func setBorConfig(ctx *cli.Context, cfg *ethconfig.Config) { cfg.WithoutHeimdall = ctx.Bool(WithoutHeimdallFlag.Name) cfg.WithHeimdallMilestones = ctx.Bool(WithHeimdallMilestones.Name) cfg.WithHeimdallWaypointRecording = ctx.Bool(WithHeimdallWaypoints.Name) - borsnaptype.RecordWayPoints(cfg.WithHeimdallWaypointRecording) cfg.PolygonSync = ctx.Bool(PolygonSyncFlag.Name) cfg.PolygonSyncStage = ctx.Bool(PolygonSyncStageFlag.Name) } diff --git a/consensus/aura/aura.go b/consensus/aura/aura.go index 2d1d84b97b2..de9fa19720e 100644 --- a/consensus/aura/aura.go +++ b/consensus/aura/aura.go @@ -36,6 +36,7 @@ import ( "github.com/ledgerwatch/erigon/consensus/clique" "github.com/ledgerwatch/erigon/consensus/ethash" "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/rpc" @@ -635,7 +636,7 @@ func (c *AuRa) Prepare(chain consensus.ChainHeaderReader, header *types.Header, } func (c *AuRa) Initialize(config *chain.Config, chain consensus.ChainHeaderReader, header *types.Header, - state *state.IntraBlockState, syscallCustom consensus.SysCallCustom, logger log.Logger, + state *state.IntraBlockState, syscallCustom consensus.SysCallCustom, logger log.Logger, eLogger *tracing.Hooks, ) { blockNum := header.Number.Uint64() @@ -696,14 +697,14 @@ func (c *AuRa) applyRewards(header *types.Header, state *state.IntraBlockState, return err } for _, r := range rewards { - state.AddBalance(r.Beneficiary, &r.Amount) + state.AddBalance(r.Beneficiary, &r.Amount, tracing.BalanceIncreaseRewardMineBlock) } return nil } // word `signal epoch` == word `pending epoch` func (c *AuRa) Finalize(config *chain.Config, header *types.Header, state *state.IntraBlockState, txs types.Transactions, - uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, requests []*types.Request, + uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, chain consensus.ChainReader, syscall consensus.SystemCall, logger log.Logger, ) (types.Transactions, types.Receipts, error) { if err := c.applyRewards(header, state, syscall); err != nil { @@ -842,14 +843,14 @@ func allHeadersUntil(chain consensus.ChainHeaderReader, from *types.Header, to l //} // FinalizeAndAssemble implements consensus.Engine -func (c *AuRa) FinalizeAndAssemble(config *chain.Config, header *types.Header, state *state.IntraBlockState, txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, requests []*types.Request, chain consensus.ChainReader, syscall consensus.SystemCall, call consensus.Call, logger log.Logger) (*types.Block, types.Transactions, types.Receipts, error) { - outTxs, outReceipts, err := c.Finalize(config, header, state, txs, uncles, receipts, withdrawals, requests, chain, syscall, logger) +func (c *AuRa) FinalizeAndAssemble(config *chain.Config, header *types.Header, state *state.IntraBlockState, txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, chain consensus.ChainReader, syscall consensus.SystemCall, call consensus.Call, logger log.Logger) (*types.Block, types.Transactions, types.Receipts, error) { + outTxs, outReceipts, err := c.Finalize(config, header, state, txs, uncles, receipts, withdrawals, chain, syscall, logger) if err != nil { return nil, nil, nil, err } // Assemble and return the final block for sealing - return types.NewBlock(header, outTxs, uncles, outReceipts, withdrawals, requests), outTxs, outReceipts, nil + return types.NewBlock(header, outTxs, uncles, outReceipts, withdrawals), outTxs, outReceipts, nil } // Authorize injects a private key into the consensus engine to mint new blocks diff --git a/consensus/aura/aura_test.go b/consensus/aura/aura_test.go index dd9a94fee8b..b64337055d9 100644 --- a/consensus/aura/aura_test.go +++ b/consensus/aura/aura_test.go @@ -25,7 +25,7 @@ import ( func TestEmptyBlock(t *testing.T) { require := require.New(t) genesis := core.GnosisGenesisBlock() - genesisBlock, _, err := core.GenesisToBlock(genesis, "", log.Root()) + genesisBlock, _, err := core.GenesisToBlock(genesis, "", log.Root(), nil) require.NoError(err) genesis.Config.TerminalTotalDifficultyPassed = false @@ -106,18 +106,18 @@ func TestAuRaSkipGasLimit(t *testing.T) { return fakeVal, err } require.NotPanics(func() { - m.Engine.Initialize(chainConfig, &core.FakeChainReader{}, validPreMergeHeader, nil, syscallCustom, nil) + m.Engine.Initialize(chainConfig, &core.FakeChainReader{}, validPreMergeHeader, nil, syscallCustom, nil, nil) }) invalidPreMergeHeader := validPreMergeHeader invalidPreMergeHeader.GasLimit = 12_123456 //a different, wrong gasLimit require.Panics(func() { - m.Engine.Initialize(chainConfig, &core.FakeChainReader{}, invalidPreMergeHeader, nil, syscallCustom, nil) + m.Engine.Initialize(chainConfig, &core.FakeChainReader{}, invalidPreMergeHeader, nil, syscallCustom, nil, nil) }) invalidPostMergeHeader := invalidPreMergeHeader invalidPostMergeHeader.Difficulty = big.NewInt(0) //zero difficulty detected as PoS require.NotPanics(func() { - m.Engine.Initialize(chainConfig, &core.FakeChainReader{}, invalidPostMergeHeader, nil, syscallCustom, nil) + m.Engine.Initialize(chainConfig, &core.FakeChainReader{}, invalidPostMergeHeader, nil, syscallCustom, nil, nil) }) } diff --git a/consensus/chain_header_reader_mock.go b/consensus/chain_header_reader_mock.go index e5312734761..07a2209c854 100644 --- a/consensus/chain_header_reader_mock.go +++ b/consensus/chain_header_reader_mock.go @@ -118,6 +118,44 @@ func (c *MockChainHeaderReaderConfigCall) DoAndReturn(f func() *chain.Config) *M return c } +// CurrentFinalizedHeader mocks base method. +func (m *MockChainHeaderReader) CurrentFinalizedHeader() *types.Header { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CurrentFinalizedHeader") + ret0, _ := ret[0].(*types.Header) + return ret0 +} + +// CurrentFinalizedHeader indicates an expected call of CurrentFinalizedHeader. +func (mr *MockChainHeaderReaderMockRecorder) CurrentFinalizedHeader() *MockChainHeaderReaderCurrentFinalizedHeaderCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CurrentFinalizedHeader", reflect.TypeOf((*MockChainHeaderReader)(nil).CurrentFinalizedHeader)) + return &MockChainHeaderReaderCurrentFinalizedHeaderCall{Call: call} +} + +// MockChainHeaderReaderCurrentFinalizedHeaderCall wrap *gomock.Call +type MockChainHeaderReaderCurrentFinalizedHeaderCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockChainHeaderReaderCurrentFinalizedHeaderCall) Return(arg0 *types.Header) *MockChainHeaderReaderCurrentFinalizedHeaderCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockChainHeaderReaderCurrentFinalizedHeaderCall) Do(f func() *types.Header) *MockChainHeaderReaderCurrentFinalizedHeaderCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockChainHeaderReaderCurrentFinalizedHeaderCall) DoAndReturn(f func() *types.Header) *MockChainHeaderReaderCurrentFinalizedHeaderCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + // CurrentHeader mocks base method. func (m *MockChainHeaderReader) CurrentHeader() *types.Header { m.ctrl.T.Helper() @@ -156,6 +194,44 @@ func (c *MockChainHeaderReaderCurrentHeaderCall) DoAndReturn(f func() *types.Hea return c } +// CurrentSafeHeader mocks base method. +func (m *MockChainHeaderReader) CurrentSafeHeader() *types.Header { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CurrentSafeHeader") + ret0, _ := ret[0].(*types.Header) + return ret0 +} + +// CurrentSafeHeader indicates an expected call of CurrentSafeHeader. +func (mr *MockChainHeaderReaderMockRecorder) CurrentSafeHeader() *MockChainHeaderReaderCurrentSafeHeaderCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CurrentSafeHeader", reflect.TypeOf((*MockChainHeaderReader)(nil).CurrentSafeHeader)) + return &MockChainHeaderReaderCurrentSafeHeaderCall{Call: call} +} + +// MockChainHeaderReaderCurrentSafeHeaderCall wrap *gomock.Call +type MockChainHeaderReaderCurrentSafeHeaderCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockChainHeaderReaderCurrentSafeHeaderCall) Return(arg0 *types.Header) *MockChainHeaderReaderCurrentSafeHeaderCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockChainHeaderReaderCurrentSafeHeaderCall) Do(f func() *types.Header) *MockChainHeaderReaderCurrentSafeHeaderCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockChainHeaderReaderCurrentSafeHeaderCall) DoAndReturn(f func() *types.Header) *MockChainHeaderReaderCurrentSafeHeaderCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + // FrozenBlocks mocks base method. func (m *MockChainHeaderReader) FrozenBlocks() uint64 { m.ctrl.T.Helper() diff --git a/consensus/chain_reader.go b/consensus/chain_reader.go index c7d81953b7c..83b03f7cb32 100644 --- a/consensus/chain_reader.go +++ b/consensus/chain_reader.go @@ -34,6 +34,34 @@ func (cr ChainReaderImpl) CurrentHeader() *types.Header { return h } +func (cr ChainReaderImpl) CurrentFinalizedHeader() *types.Header { + hash := rawdb.ReadForkchoiceFinalized(cr.Db) + if hash == (libcommon.Hash{}) { + return nil + } + + number := rawdb.ReadHeaderNumber(cr.Db, hash) + if number == nil { + return nil + } + + return rawdb.ReadHeader(cr.Db, hash, *number) +} + +func (cr ChainReaderImpl) CurrentSafeHeader() *types.Header { + hash := rawdb.ReadForkchoiceSafe(cr.Db) + if hash == (libcommon.Hash{}) { + return nil + } + + number := rawdb.ReadHeaderNumber(cr.Db, hash) + if number == nil { + return nil + } + + return rawdb.ReadHeader(cr.Db, hash, *number) +} + // GetHeader retrieves a block header from the database by hash and number. func (cr ChainReaderImpl) GetHeader(hash libcommon.Hash, number uint64) *types.Header { h, _ := cr.BlockReader.Header(context.Background(), cr.Db, hash, number) diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index 6885c8218d4..9ddfee535b7 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -44,6 +44,7 @@ import ( "github.com/ledgerwatch/erigon/common/debug" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/crypto" @@ -367,7 +368,7 @@ func (c *Clique) Prepare(chain consensus.ChainHeaderReader, header *types.Header } func (c *Clique) Initialize(config *chain.Config, chain consensus.ChainHeaderReader, header *types.Header, - state *state.IntraBlockState, syscall consensus.SysCallCustom, logger log.Logger) { + state *state.IntraBlockState, syscall consensus.SysCallCustom, logger log.Logger, eLogger *tracing.Hooks) { } func (c *Clique) CalculateRewards(config *chain.Config, header *types.Header, uncles []*types.Header, syscall consensus.SystemCall, @@ -378,7 +379,7 @@ func (c *Clique) CalculateRewards(config *chain.Config, header *types.Header, un // Finalize implements consensus.Engine, ensuring no uncles are set, nor block // rewards given. func (c *Clique) Finalize(config *chain.Config, header *types.Header, state *state.IntraBlockState, - txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal, requests []*types.Request, + txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal, chain consensus.ChainReader, syscall consensus.SystemCall, logger log.Logger, ) (types.Transactions, types.Receipts, error) { // No block rewards in PoA, so the state remains as is and uncles are dropped @@ -389,13 +390,14 @@ func (c *Clique) Finalize(config *chain.Config, header *types.Header, state *sta // FinalizeAndAssemble implements consensus.Engine, ensuring no uncles are set, // nor block rewards given, and returns the final block. func (c *Clique) FinalizeAndAssemble(chainConfig *chain.Config, header *types.Header, state *state.IntraBlockState, - txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, requests []*types.Request, chain consensus.ChainReader, syscall consensus.SystemCall, call consensus.Call, logger log.Logger, + txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, + chain consensus.ChainReader, syscall consensus.SystemCall, call consensus.Call, logger log.Logger, ) (*types.Block, types.Transactions, types.Receipts, error) { // No block rewards in PoA, so the state remains as is and uncles are dropped header.UncleHash = types.CalcUncleHash(nil) // Assemble and return the final block for sealing - return types.NewBlock(header, txs, nil, receipts, withdrawals, requests), txs, receipts, nil + return types.NewBlock(header, txs, nil, receipts, withdrawals), txs, receipts, nil } // Authorize injects a private key into the consensus engine to mint new blocks diff --git a/consensus/consensus.go b/consensus/consensus.go index 3e58f732144..c917a4cc5ff 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -26,6 +26,7 @@ import ( "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/rpc" @@ -42,6 +43,10 @@ type ChainHeaderReader interface { // CurrentHeader retrieves the current header from the local chain. CurrentHeader() *types.Header + CurrentFinalizedHeader() *types.Header + + CurrentSafeHeader() *types.Header + // GetHeader retrieves a block header from the database by hash and number. GetHeader(hash libcommon.Hash, number uint64) *types.Header @@ -146,7 +151,7 @@ type EngineWriter interface { // Initialize runs any pre-transaction state modifications (e.g. epoch start) Initialize(config *chain.Config, chain ChainHeaderReader, header *types.Header, - state *state.IntraBlockState, syscall SysCallCustom, logger log.Logger) + state *state.IntraBlockState, syscall SysCallCustom, logger log.Logger, eLogger *tracing.Hooks) // Finalize runs any post-transaction state modifications (e.g. block rewards) // but does not assemble the block. @@ -154,7 +159,8 @@ type EngineWriter interface { // Note: The block header and state database might be updated to reflect any // consensus rules that happen at finalization (e.g. block rewards). Finalize(config *chain.Config, header *types.Header, state *state.IntraBlockState, - txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, requests []*types.Request, chain ChainReader, syscall SystemCall, logger log.Logger, + txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, + chain ChainReader, syscall SystemCall, logger log.Logger, ) (types.Transactions, types.Receipts, error) // FinalizeAndAssemble runs any post-transaction state modifications (e.g. block @@ -163,7 +169,8 @@ type EngineWriter interface { // Note: The block header and state database might be updated to reflect any // consensus rules that happen at finalization (e.g. block rewards). FinalizeAndAssemble(config *chain.Config, header *types.Header, state *state.IntraBlockState, - txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, requests []*types.Request, chain ChainReader, syscall SystemCall, call Call, logger log.Logger, + txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, + chain ChainReader, syscall SystemCall, call Call, logger log.Logger, ) (*types.Block, types.Transactions, types.Receipts, error) // Seal generates a new sealing request for the given input block and pushes diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index 5642478d317..9d91ed76551 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -37,6 +37,7 @@ import ( "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/misc" "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" @@ -553,7 +554,7 @@ func (ethash *Ethash) Prepare(chain consensus.ChainHeaderReader, header *types.H } func (ethash *Ethash) Initialize(config *chain.Config, chain consensus.ChainHeaderReader, header *types.Header, - state *state.IntraBlockState, syscall consensus.SysCallCustom, logger log.Logger) { + state *state.IntraBlockState, syscall consensus.SysCallCustom, logger log.Logger, eLogger *tracing.Hooks) { if config.DAOForkBlock != nil && config.DAOForkBlock.Cmp(header.Number) == 0 { misc.ApplyDAOHardFork(state) } @@ -562,7 +563,7 @@ func (ethash *Ethash) Initialize(config *chain.Config, chain consensus.ChainHead // Finalize implements consensus.Engine, accumulating the block and uncle rewards, // setting the final state on the header func (ethash *Ethash) Finalize(config *chain.Config, header *types.Header, state *state.IntraBlockState, - txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal, requests []*types.Request, + txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal, chain consensus.ChainReader, syscall consensus.SystemCall, logger log.Logger, ) (types.Transactions, types.Receipts, error) { // Accumulate any block and uncle rewards and commit the final state root @@ -573,17 +574,17 @@ func (ethash *Ethash) Finalize(config *chain.Config, header *types.Header, state // FinalizeAndAssemble implements consensus.Engine, accumulating the block and // uncle rewards, setting the final state and assembling the block. func (ethash *Ethash) FinalizeAndAssemble(chainConfig *chain.Config, header *types.Header, state *state.IntraBlockState, - txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal, requests []*types.Request, + txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal, chain consensus.ChainReader, syscall consensus.SystemCall, call consensus.Call, logger log.Logger, ) (*types.Block, types.Transactions, types.Receipts, error) { // Finalize block - outTxs, outR, err := ethash.Finalize(chainConfig, header, state, txs, uncles, r, withdrawals, requests, chain, syscall, logger) + outTxs, outR, err := ethash.Finalize(chainConfig, header, state, txs, uncles, r, withdrawals, chain, syscall, logger) if err != nil { return nil, nil, nil, err } // Header seems complete, assemble into a block and return - return types.NewBlock(header, outTxs, uncles, outR, withdrawals, requests), outTxs, outR, nil + return types.NewBlock(header, outTxs, uncles, outR, withdrawals), outTxs, outR, nil } // SealHash returns the hash of a block prior to it being sealed. @@ -668,8 +669,8 @@ func accumulateRewards(config *chain.Config, state *state.IntraBlockState, heade minerReward, uncleRewards := AccumulateRewards(config, header, uncles) for i, uncle := range uncles { if i < len(uncleRewards) { - state.AddBalance(uncle.Coinbase, &uncleRewards[i]) + state.AddBalance(uncle.Coinbase, &uncleRewards[i], tracing.BalanceIncreaseRewardMineUncle) } } - state.AddBalance(header.Coinbase, &minerReward) + state.AddBalance(header.Coinbase, &minerReward, tracing.BalanceIncreaseRewardMineBlock) } diff --git a/consensus/merge/merge.go b/consensus/merge/merge.go index 8852b3184eb..bdf9f89bdd5 100644 --- a/consensus/merge/merge.go +++ b/consensus/merge/merge.go @@ -14,6 +14,7 @@ import ( "github.com/ledgerwatch/erigon/consensus/aura" "github.com/ledgerwatch/erigon/consensus/misc" "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc" @@ -131,11 +132,11 @@ func (s *Merge) CalculateRewards(config *chain.Config, header *types.Header, unc } func (s *Merge) Finalize(config *chain.Config, header *types.Header, state *state.IntraBlockState, - txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal, requests []*types.Request, + txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal, chain consensus.ChainReader, syscall consensus.SystemCall, logger log.Logger, ) (types.Transactions, types.Receipts, error) { if !misc.IsPoSHeader(header) { - return s.eth1Engine.Finalize(config, header, state, txs, uncles, r, withdrawals, requests, chain, syscall, logger) + return s.eth1Engine.Finalize(config, header, state, txs, uncles, r, withdrawals, chain, syscall, logger) } rewards, err := s.CalculateRewards(config, header, uncles, syscall) @@ -143,7 +144,14 @@ func (s *Merge) Finalize(config *chain.Config, header *types.Header, state *stat return nil, nil, err } for _, r := range rewards { - state.AddBalance(r.Beneficiary, &r.Amount) + switch r.Kind { + case consensus.RewardAuthor: + state.AddBalance(r.Beneficiary, &r.Amount, tracing.BalanceIncreaseRewardMineBlock) + case consensus.RewardUncle: + state.AddBalance(r.Beneficiary, &r.Amount, tracing.BalanceIncreaseRewardMineUncle) + default: + state.AddBalance(r.Beneficiary, &r.Amount, tracing.BalanceChangeUnspecified) + } } if withdrawals != nil { @@ -154,7 +162,7 @@ func (s *Merge) Finalize(config *chain.Config, header *types.Header, state *stat } else { for _, w := range withdrawals { amountInWei := new(uint256.Int).Mul(uint256.NewInt(w.Amount), uint256.NewInt(params.GWei)) - state.AddBalance(w.Address, amountInWei) + state.AddBalance(w.Address, amountInWei, tracing.BalanceIncreaseWithdrawal) } } } @@ -163,16 +171,17 @@ func (s *Merge) Finalize(config *chain.Config, header *types.Header, state *stat } func (s *Merge) FinalizeAndAssemble(config *chain.Config, header *types.Header, state *state.IntraBlockState, - txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, requests []*types.Request, chain consensus.ChainReader, syscall consensus.SystemCall, call consensus.Call, logger log.Logger, + txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, + chain consensus.ChainReader, syscall consensus.SystemCall, call consensus.Call, logger log.Logger, ) (*types.Block, types.Transactions, types.Receipts, error) { if !misc.IsPoSHeader(header) { - return s.eth1Engine.FinalizeAndAssemble(config, header, state, txs, uncles, receipts, withdrawals, requests, chain, syscall, call, logger) + return s.eth1Engine.FinalizeAndAssemble(config, header, state, txs, uncles, receipts, withdrawals, chain, syscall, call, logger) } - outTxs, outReceipts, err := s.Finalize(config, header, state, txs, uncles, receipts, withdrawals, requests, chain, syscall, logger) + outTxs, outReceipts, err := s.Finalize(config, header, state, txs, uncles, receipts, withdrawals, chain, syscall, logger) if err != nil { return nil, nil, nil, err } - return types.NewBlock(header, outTxs, uncles, outReceipts, withdrawals, requests), outTxs, outReceipts, nil + return types.NewBlock(header, outTxs, uncles, outReceipts, withdrawals), outTxs, outReceipts, nil } func (s *Merge) SealHash(header *types.Header) (hash libcommon.Hash) { @@ -271,18 +280,15 @@ func (s *Merge) IsServiceTransaction(sender libcommon.Address, syscall consensus } func (s *Merge) Initialize(config *chain.Config, chain consensus.ChainHeaderReader, header *types.Header, - state *state.IntraBlockState, syscall consensus.SysCallCustom, logger log.Logger, + state *state.IntraBlockState, syscall consensus.SysCallCustom, logger log.Logger, eLogger *tracing.Hooks, ) { if !misc.IsPoSHeader(header) { - s.eth1Engine.Initialize(config, chain, header, state, syscall, logger) + s.eth1Engine.Initialize(config, chain, header, state, syscall, logger, eLogger) } if chain.Config().IsCancun(header.Time) { misc.ApplyBeaconRootEip4788(header.ParentBeaconBlockRoot, func(addr libcommon.Address, data []byte) ([]byte, error) { return syscall(addr, data, state, header, false /* constCall */) - }) - } - if chain.Config().IsPrague(header.Time) { - misc.StoreBlockHashesEip2935(header, state, config, chain) + }, eLogger) } } diff --git a/consensus/merge/merge_test.go b/consensus/merge/merge_test.go index aee7810cd2f..09acd3a71c8 100644 --- a/consensus/merge/merge_test.go +++ b/consensus/merge/merge_test.go @@ -21,6 +21,14 @@ func (r readerMock) CurrentHeader() *types.Header { return nil } +func (cr readerMock) CurrentFinalizedHeader() *types.Header { + return nil +} + +func (cr readerMock) CurrentSafeHeader() *types.Header { + return nil +} + func (r readerMock) GetHeader(libcommon.Hash, uint64) *types.Header { return nil } diff --git a/consensus/misc/dao.go b/consensus/misc/dao.go index 934a94577df..5d20982ac10 100644 --- a/consensus/misc/dao.go +++ b/consensus/misc/dao.go @@ -25,6 +25,7 @@ import ( "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/params" ) @@ -76,7 +77,7 @@ func ApplyDAOHardFork(statedb *state.IntraBlockState) { // Move every DAO account and extra-balance account funds into the refund contract for _, addr := range params.DAODrainList() { - statedb.AddBalance(params.DAORefundContract, statedb.GetBalance(addr)) - statedb.SetBalance(addr, new(uint256.Int)) + statedb.AddBalance(params.DAORefundContract, statedb.GetBalance(addr), tracing.BalanceIncreaseDaoContract) + statedb.SetBalance(addr, new(uint256.Int), tracing.BalanceDecreaseDaoAccount) } } diff --git a/consensus/misc/eip2935.go b/consensus/misc/eip2935.go deleted file mode 100644 index 64d4bef1586..00000000000 --- a/consensus/misc/eip2935.go +++ /dev/null @@ -1,42 +0,0 @@ -package misc - -import ( - "github.com/holiman/uint256" - - "github.com/ledgerwatch/erigon-lib/chain" - libcommon "github.com/ledgerwatch/erigon-lib/common" - - "github.com/ledgerwatch/erigon/consensus" - "github.com/ledgerwatch/erigon/core/state" - "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/params" -) - -func StoreBlockHashesEip2935(header *types.Header, state *state.IntraBlockState, config *chain.Config, headerReader consensus.ChainHeaderReader) { - headerNum := header.Number.Uint64() - if headerNum == 0 { // Activation of fork at Genesis - return - } - storeHash(headerNum-1, header.ParentHash, state) - // If this is the fork block, add the parent's direct `HISTORY_SERVE_WINDOW - 1` ancestors as well - parent := headerReader.GetHeader(header.ParentHash, headerNum-1) - if parent.Time < config.PragueTime.Uint64() { - p := headerNum - 1 - window := params.BlockHashHistoryServeWindow - 1 - if p < window { - window = p - } - for i := window; i > 0; i-- { - p = p - 1 - storeHash(p, parent.ParentHash, state) - parent = headerReader.GetHeader(parent.ParentHash, p) - } - } -} - -func storeHash(num uint64, hash libcommon.Hash, state *state.IntraBlockState) { - slotNum := num % params.BlockHashHistoryServeWindow - storageSlot := libcommon.BytesToHash(uint256.NewInt(slotNum).Bytes()) - parentHashInt := uint256.NewInt(0).SetBytes32(hash.Bytes()) - state.SetState(params.HistoryStorageAddress, &storageSlot, *parentHashInt) -} diff --git a/consensus/misc/eip4788.go b/consensus/misc/eip4788.go index 26293004b28..be32991d29a 100644 --- a/consensus/misc/eip4788.go +++ b/consensus/misc/eip4788.go @@ -5,10 +5,19 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/consensus" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/params" ) -func ApplyBeaconRootEip4788(parentBeaconBlockRoot *libcommon.Hash, syscall consensus.SystemCall) { +func ApplyBeaconRootEip4788(parentBeaconBlockRoot *libcommon.Hash, syscall consensus.SystemCall, eLogger *tracing.Hooks) { + if eLogger != nil && eLogger.OnSystemCallStart != nil { + eLogger.OnSystemCallStart() + } + + if eLogger != nil && eLogger.OnSystemCallEnd != nil { + defer eLogger.OnSystemCallEnd() + } + _, err := syscall(params.BeaconRootsAddress, parentBeaconBlockRoot.Bytes()) if err != nil { log.Warn("Failed to call beacon roots contract", "err", err) diff --git a/core/blockchain.go b/core/blockchain.go index 0120dbd7cbb..16fa59d1b13 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -35,6 +35,7 @@ import ( "github.com/ledgerwatch/erigon/common/u256" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/core/vm/evmtypes" @@ -83,13 +84,14 @@ func ExecuteBlockEphemerally( blockHashFunc func(n uint64) libcommon.Hash, engine consensus.Engine, block *types.Block, stateReader state.StateReader, stateWriter state.WriterWithChangeSets, - chainReader consensus.ChainReader, getTracer func(txIndex int, txHash libcommon.Hash) (vm.EVMLogger, error), + chainReader consensus.ChainReader, getTracer func(txIndex int, txHash libcommon.Hash) (*tracing.Hooks, error), logger log.Logger, -) (*EphemeralExecResult, error) { +) (res *EphemeralExecResult, executeBlockErr error) { defer blockExecutionTimer.ObserveDuration(time.Now()) block.Uncles() ibs := state.New(stateReader) + ibs.SetLogger(vmConfig.Tracer) header := block.Header() usedGas := new(uint64) @@ -97,34 +99,54 @@ func ExecuteBlockEphemerally( gp := new(GasPool) gp.AddGas(block.GasLimit()).AddBlobGas(chainConfig.GetMaxBlobGasPerBlock()) - if err := InitializeBlockExecution(engine, chainReader, block.Header(), chainConfig, ibs, logger); err != nil { + var ( + rejectedTxs []*RejectedTx + includedTxs types.Transactions + receipts types.Receipts + ) + + if vmConfig.Tracer != nil && vmConfig.Tracer.OnBlockStart != nil { + td := chainReader.GetTd(block.ParentHash(), block.NumberU64()-1) + vmConfig.Tracer.OnBlockStart(tracing.BlockEvent{ + Block: block, + TD: td, + Finalized: chainReader.CurrentFinalizedHeader(), + Safe: chainReader.CurrentSafeHeader(), + }) + } + if vmConfig.Tracer != nil && vmConfig.Tracer.OnBlockEnd != nil { + defer func() { + vmConfig.Tracer.OnBlockEnd(executeBlockErr) + }() + } + + if err := InitializeBlockExecution(engine, chainReader, block.Header(), chainConfig, ibs, logger, vmConfig.Tracer); err != nil { return nil, err } - var rejectedTxs []*RejectedTx - includedTxs := make(types.Transactions, 0, block.Transactions().Len()) - receipts := make(types.Receipts, 0, block.Transactions().Len()) + includedTxs = make(types.Transactions, 0, block.Transactions().Len()) + receipts = make(types.Receipts, 0, block.Transactions().Len()) noop := state.NewNoopWriter() - var allLogs types.Logs for i, tx := range block.Transactions() { ibs.SetTxContext(tx.Hash(), block.Hash(), i) - writeTrace := false + // writeTrace := false if vmConfig.Debug && vmConfig.Tracer == nil { tracer, err := getTracer(i, tx.Hash()) if err != nil { return nil, fmt.Errorf("could not obtain tracer: %w", err) } vmConfig.Tracer = tracer - writeTrace = true + // writeTrace = true } receipt, _, err := ApplyTransaction(chainConfig, blockHashFunc, engine, nil, gp, ibs, noop, header, tx, usedGas, usedBlobGas, *vmConfig) - if writeTrace { - if ftracer, ok := vmConfig.Tracer.(vm.FlushableTracer); ok { - ftracer.Flush(tx) - } - - vmConfig.Tracer = nil - } + // Todo: check how to implement flushable tracer + // if writeTrace { + // if ftracer, ok := vmConfig.Tracer.(vm.FlushableTracer); ok { + // ftracer.Flush(tx) + // } + + // vmConfig.Tracer = nil + // } if err != nil { if !vmConfig.StatelessExec { return nil, fmt.Errorf("could not apply tx %d from block %d [%v]: %w", i, block.NumberU64(), tx.Hash().Hex(), err) @@ -136,7 +158,6 @@ func ExecuteBlockEphemerally( receipts = append(receipts, receipt) } } - allLogs = append(allLogs, receipt.Logs...) } receiptSha := types.DeriveSha(receipts) @@ -165,7 +186,7 @@ func ExecuteBlockEphemerally( } if !vmConfig.ReadOnly { txs := block.Transactions() - if _, _, _, err := FinalizeBlockExecution(engine, stateReader, block.Header(), txs, block.Uncles(), stateWriter, chainConfig, ibs, receipts, block.Withdrawals(), block.Requests(), chainReader, false, logger); err != nil { + if _, _, _, err := FinalizeBlockExecution(engine, stateReader, block.Header(), txs, block.Uncles(), stateWriter, chainConfig, ibs, receipts, block.Withdrawals(), chainReader, false, logger); err != nil { return nil, err } } @@ -203,19 +224,6 @@ func ExecuteBlockEphemerally( execRs.StateSyncReceipt = stateSyncReceipt } - if chainConfig.IsPrague(block.Time()) { - requests, err := types.ParseDepositLogs(allLogs, chainConfig.DepositContract) - if err != nil { - return nil, fmt.Errorf("error: could not parse requests logs: %v", err) - } - - rh := types.DeriveSha(requests) - if *block.Header().RequestsRoot != rh && !vmConfig.NoReceipts { - // TODO(racytech): do we have to check it here? - return nil, fmt.Errorf("error: invalid requests root hash, expected: %v, got :%v", *block.Header().RequestsRoot, rh) - } - } - return execRs, nil } @@ -254,7 +262,7 @@ func rlpHash(x interface{}) (h libcommon.Hash) { return h } -func SysCallContract(contract libcommon.Address, data []byte, chainConfig *chain.Config, ibs *state.IntraBlockState, header *types.Header, engine consensus.EngineReader, constCall bool) (result []byte, err error) { +func SysCallContract(contract libcommon.Address, data []byte, chainConfig *chain.Config, ibs *state.IntraBlockState, header *types.Header, engine consensus.EngineReader, constCall bool, bcLogger *tracing.Hooks) (result []byte, err error) { msg := types.NewMessage( state.SystemAddress, &contract, @@ -266,7 +274,7 @@ func SysCallContract(contract libcommon.Address, data []byte, chainConfig *chain true, // isFree nil, // maxFeePerBlobGas ) - vmConfig := vm.Config{NoReceipts: true, RestoreState: constCall} + vmConfig := vm.Config{NoReceipts: true, RestoreState: constCall, Tracer: bcLogger} // Create a new context to be used in the EVM environment isBor := chainConfig.Bor != nil var txContext evmtypes.TxContext @@ -330,17 +338,17 @@ func FinalizeBlockExecution( header *types.Header, txs types.Transactions, uncles []*types.Header, stateWriter state.StateWriter, cc *chain.Config, ibs *state.IntraBlockState, receipts types.Receipts, - withdrawals []*types.Withdrawal, requests []*types.Request, chainReader consensus.ChainReader, + withdrawals []*types.Withdrawal, chainReader consensus.ChainReader, isMining bool, logger log.Logger, ) (newBlock *types.Block, newTxs types.Transactions, newReceipt types.Receipts, err error) { syscall := func(contract libcommon.Address, data []byte) ([]byte, error) { - return SysCallContract(contract, data, cc, ibs, header, engine, false /* constCall */) + return SysCallContract(contract, data, cc, ibs, header, engine, false /* constCall */, nil) } if isMining { - newBlock, newTxs, newReceipt, err = engine.FinalizeAndAssemble(cc, header, ibs, txs, uncles, receipts, withdrawals, requests, chainReader, syscall, nil, logger) + newBlock, newTxs, newReceipt, err = engine.FinalizeAndAssemble(cc, header, ibs, txs, uncles, receipts, withdrawals, chainReader, syscall, nil, logger) } else { - _, _, err = engine.Finalize(cc, header, ibs, txs, uncles, receipts, withdrawals, requests, chainReader, syscall, logger) + _, _, err = engine.Finalize(cc, header, ibs, txs, uncles, receipts, withdrawals, chainReader, syscall, logger) } if err != nil { return nil, nil, nil, err @@ -359,11 +367,11 @@ func FinalizeBlockExecution( } func InitializeBlockExecution(engine consensus.Engine, chain consensus.ChainHeaderReader, header *types.Header, - cc *chain.Config, ibs *state.IntraBlockState, logger log.Logger, + cc *chain.Config, ibs *state.IntraBlockState, logger log.Logger, bcLogger *tracing.Hooks, ) error { engine.Initialize(cc, chain, header, ibs, func(contract libcommon.Address, data []byte, ibState *state.IntraBlockState, header *types.Header, constCall bool) ([]byte, error) { - return SysCallContract(contract, data, cc, ibState, header, engine, constCall) - }, logger) + return SysCallContract(contract, data, cc, ibState, header, engine, constCall, bcLogger) + }, logger, bcLogger) noop := state.NewNoopWriter() ibs.FinalizeTx(cc.Rules(header.Number.Uint64(), header.Time), noop) return nil diff --git a/core/chain_makers.go b/core/chain_makers.go index 934906e6623..12e3702ee37 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -367,7 +367,7 @@ func GenerateChain(config *chain.Config, parent *types.Block, engine consensus.E } } if b.engine != nil { - err := InitializeBlockExecution(b.engine, nil, b.header, config, ibs, logger) + err := InitializeBlockExecution(b.engine, nil, b.header, config, ibs, logger, nil) if err != nil { return nil, nil, fmt.Errorf("call to InitializeBlockExecution: %w", err) } @@ -379,7 +379,7 @@ func GenerateChain(config *chain.Config, parent *types.Block, engine consensus.E txNumIncrement() if b.engine != nil { // Finalize and seal the block - if _, _, _, err := b.engine.FinalizeAndAssemble(config, b.header, ibs, b.txs, b.uncles, b.receipts, nil, nil, nil, nil, nil, logger); err != nil { + if _, _, _, err := b.engine.FinalizeAndAssemble(config, b.header, ibs, b.txs, b.uncles, b.receipts, nil, nil, nil, nil, logger); err != nil { return nil, nil, fmt.Errorf("call to FinaliseAndAssemble: %w", err) } // Write state changes to db @@ -407,7 +407,7 @@ func GenerateChain(config *chain.Config, parent *types.Block, engine consensus.E } _ = err // Recreating block to make sure Root makes it into the header - block := types.NewBlock(b.header, b.txs, b.uncles, b.receipts, nil /* withdrawals */, nil /*requests*/) + block := types.NewBlock(b.header, b.txs, b.uncles, b.receipts, nil /* withdrawals */) return block, b.receipts, nil } return nil, nil, fmt.Errorf("no engine to generate blocks") @@ -477,7 +477,7 @@ func CalcHashRootForTests(tx kv.RwTx, header *types.Header, histV4, trace bool) h := libcommon.NewHasher() defer libcommon.ReturnHasherToPool(h) - it, err := tx.(state2.HasAggTx).AggTx().(*state2.AggregatorRoTx).DomainRangeLatest(tx, kv.AccountsDomain, nil, nil, -1) + it, err := tx.(state2.HasAggCtx).AggCtx().(*state2.AggregatorRoTx).DomainRangeLatest(tx, kv.AccountsDomain, nil, nil, -1) if err != nil { return libcommon.Hash{}, err } @@ -502,7 +502,7 @@ func CalcHashRootForTests(tx kv.RwTx, header *types.Header, histV4, trace bool) } } - it, err = tx.(state2.HasAggTx).AggTx().(*state2.AggregatorRoTx).DomainRangeLatest(tx, kv.StorageDomain, nil, nil, -1) + it, err = tx.(state2.HasAggCtx).AggCtx().(*state2.AggregatorRoTx).DomainRangeLatest(tx, kv.StorageDomain, nil, nil, -1) if err != nil { return libcommon.Hash{}, err } @@ -692,6 +692,8 @@ func (cr *FakeChainReader) Config() *chain.Config { } func (cr *FakeChainReader) CurrentHeader() *types.Header { return cr.current.Header() } +func (cr *FakeChainReader) CurrentFinalizedHeader() *types.Header { return cr.current.Header() } +func (cr *FakeChainReader) CurrentSafeHeader() *types.Header { return cr.current.Header() } func (cr *FakeChainReader) GetHeaderByNumber(number uint64) *types.Header { return nil } func (cr *FakeChainReader) GetHeaderByHash(hash libcommon.Hash) *types.Header { return nil } func (cr *FakeChainReader) GetHeader(hash libcommon.Hash, number uint64) *types.Header { return nil } diff --git a/core/evm.go b/core/evm.go index 3421f8a9a45..f4a27fedb88 100644 --- a/core/evm.go +++ b/core/evm.go @@ -27,6 +27,7 @@ import ( "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/merge" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm/evmtypes" ) @@ -134,9 +135,9 @@ func CanTransfer(db evmtypes.IntraBlockState, addr libcommon.Address, amount *ui // Transfer subtracts amount from sender and adds amount to recipient using the given Db func Transfer(db evmtypes.IntraBlockState, sender, recipient libcommon.Address, amount *uint256.Int, bailout bool) { if !bailout { - db.SubBalance(sender, amount) + db.SubBalance(sender, amount, tracing.BalanceChangeTransfer) } - db.AddBalance(recipient, amount) + db.AddBalance(recipient, amount, tracing.BalanceChangeTransfer) } // BorTransfer transfer in Bor @@ -146,9 +147,9 @@ func BorTransfer(db evmtypes.IntraBlockState, sender, recipient libcommon.Addres input2 := db.GetBalance(recipient).Clone() if !bailout { - db.SubBalance(sender, amount) + db.SubBalance(sender, amount, tracing.BalanceChangeTransfer) } - db.AddBalance(recipient, amount) + db.AddBalance(recipient, amount, tracing.BalanceChangeTransfer) // get outputs after output1 := db.GetBalance(sender).Clone() diff --git a/core/genesis_test.go b/core/genesis_test.go index d45eb444d46..130c597010e 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -28,7 +28,7 @@ import ( func TestGenesisBlockHashes(t *testing.T) { t.Parallel() logger := log.New() - db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + _, db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) check := func(network string) { genesis := core.GenesisBlockByChainName(network) tx, err := db.BeginRw(context.Background()) @@ -36,7 +36,7 @@ func TestGenesisBlockHashes(t *testing.T) { t.Fatal(err) } defer tx.Rollback() - _, block, err := core.WriteGenesisBlock(tx, genesis, nil, "", logger) + _, block, err := core.WriteGenesisBlock(tx, genesis, nil, "", logger, nil) require.NoError(t, err) expect := params.GenesisHashByChainName(network) require.NotNil(t, expect, network) @@ -52,12 +52,12 @@ func TestGenesisBlockRoots(t *testing.T) { require := require.New(t) var err error - block, _, _ := core.GenesisToBlock(core.MainnetGenesisBlock(), "", log.Root()) + block, _, _ := core.GenesisToBlock(core.MainnetGenesisBlock(), "", log.Root(), nil) if block.Hash() != params.MainnetGenesisHash { t.Errorf("wrong mainnet genesis hash, got %v, want %v", block.Hash(), params.MainnetGenesisHash) } - block, _, err = core.GenesisToBlock(core.GnosisGenesisBlock(), "", log.Root()) + block, _, err = core.GenesisToBlock(core.GnosisGenesisBlock(), "", log.Root(), nil) require.NoError(err) if block.Root() != params.GnosisGenesisStateRoot { t.Errorf("wrong Gnosis Chain genesis state root, got %v, want %v", block.Root(), params.GnosisGenesisStateRoot) @@ -66,7 +66,7 @@ func TestGenesisBlockRoots(t *testing.T) { t.Errorf("wrong Gnosis Chain genesis hash, got %v, want %v", block.Hash(), params.GnosisGenesisHash) } - block, _, err = core.GenesisToBlock(core.ChiadoGenesisBlock(), "", log.Root()) + block, _, err = core.GenesisToBlock(core.ChiadoGenesisBlock(), "", log.Root(), nil) require.NoError(err) if block.Root() != params.ChiadoGenesisStateRoot { t.Errorf("wrong Chiado genesis state root, got %v, want %v", block.Root(), params.ChiadoGenesisStateRoot) @@ -75,7 +75,7 @@ func TestGenesisBlockRoots(t *testing.T) { t.Errorf("wrong Chiado genesis hash, got %v, want %v", block.Hash(), params.ChiadoGenesisHash) } - block, _, err = core.GenesisToBlock(core.TestGenesisBlock(), "", log.Root()) + block, _, err = core.GenesisToBlock(core.TestGenesisBlock(), "", log.Root(), nil) require.NoError(err) if block.Root() != params.TestGenesisStateRoot { t.Errorf("wrong Chiado genesis state root, got %v, want %v", block.Root(), params.TestGenesisStateRoot) @@ -88,19 +88,19 @@ func TestGenesisBlockRoots(t *testing.T) { func TestCommitGenesisIdempotency(t *testing.T) { t.Parallel() logger := log.New() - db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + _, db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) tx, err := db.BeginRw(context.Background()) require.NoError(t, err) defer tx.Rollback() genesis := core.GenesisBlockByChainName(networkname.MainnetChainName) - _, _, err = core.WriteGenesisBlock(tx, genesis, nil, "", logger) + _, _, err = core.WriteGenesisBlock(tx, genesis, nil, "", logger, nil) require.NoError(t, err) seq, err := tx.ReadSequence(kv.EthTx) require.NoError(t, err) require.Equal(t, uint64(2), seq) - _, _, err = core.WriteGenesisBlock(tx, genesis, nil, "", logger) + _, _, err = core.WriteGenesisBlock(tx, genesis, nil, "", logger, nil) require.NoError(t, err) seq, err = tx.ReadSequence(kv.EthTx) require.NoError(t, err) @@ -133,7 +133,7 @@ func TestAllocConstructor(t *testing.T) { defer tx.Rollback() //TODO: support historyV3 - reader, err := rpchelper.CreateHistoryStateReader(tx, 1, 0, genSpec.Config.ChainName) + reader, err := rpchelper.CreateHistoryStateReader(tx, 1, 0, m.HistoryV3, genSpec.Config.ChainName) require.NoError(err) state := state.New(reader) balance := state.GetBalance(address) diff --git a/core/genesis_write.go b/core/genesis_write.go index ae5c095753e..aa0b0b2dd42 100644 --- a/core/genesis_write.go +++ b/core/genesis_write.go @@ -20,6 +20,7 @@ import ( "context" "crypto/ecdsa" "embed" + "encoding/binary" "encoding/json" "fmt" "math/big" @@ -35,6 +36,7 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/hexutil" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" "github.com/ledgerwatch/erigon/common" @@ -42,6 +44,7 @@ import ( "github.com/ledgerwatch/erigon/consensus/merge" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/params" @@ -64,17 +67,17 @@ var allocs embed.FS // error is a *params.ConfigCompatError and the new, unwritten config is returned. // // The returned chain configuration is never nil. -func CommitGenesisBlock(db kv.RwDB, genesis *types.Genesis, tmpDir string, logger log.Logger) (*chain.Config, *types.Block, error) { - return CommitGenesisBlockWithOverride(db, genesis, nil, tmpDir, logger) +func CommitGenesisBlock(db kv.RwDB, genesis *types.Genesis, tmpDir string, logger log.Logger, bcLogger *tracing.Hooks) (*chain.Config, *types.Block, error) { + return CommitGenesisBlockWithOverride(db, genesis, nil, tmpDir, logger, bcLogger) } -func CommitGenesisBlockWithOverride(db kv.RwDB, genesis *types.Genesis, overridePragueTime *big.Int, tmpDir string, logger log.Logger) (*chain.Config, *types.Block, error) { +func CommitGenesisBlockWithOverride(db kv.RwDB, genesis *types.Genesis, overridePragueTime *big.Int, tmpDir string, logger log.Logger, bcLogger *tracing.Hooks) (*chain.Config, *types.Block, error) { tx, err := db.BeginRw(context.Background()) if err != nil { return nil, nil, err } defer tx.Rollback() - c, b, err := WriteGenesisBlock(tx, genesis, overridePragueTime, tmpDir, logger) + c, b, err := WriteGenesisBlock(tx, genesis, overridePragueTime, tmpDir, logger, bcLogger) if err != nil { return c, b, err } @@ -85,7 +88,7 @@ func CommitGenesisBlockWithOverride(db kv.RwDB, genesis *types.Genesis, override return c, b, nil } -func WriteGenesisBlock(tx kv.RwTx, genesis *types.Genesis, overridePragueTime *big.Int, tmpDir string, logger log.Logger) (*chain.Config, *types.Block, error) { +func WriteGenesisBlock(tx kv.RwTx, genesis *types.Genesis, overridePragueTime *big.Int, tmpDir string, logger log.Logger, bcLogger *tracing.Hooks) (*chain.Config, *types.Block, error) { var storedBlock *types.Block if genesis != nil && genesis.Config == nil { return params.AllProtocolChanges, nil, types.ErrGenesisNoConfig @@ -110,7 +113,7 @@ func WriteGenesisBlock(tx kv.RwTx, genesis *types.Genesis, overridePragueTime *b custom = false } applyOverrides(genesis.Config) - block, _, err1 := write(tx, genesis, tmpDir, logger) + block, _, err1 := write(tx, genesis, tmpDir, logger, bcLogger) if err1 != nil { return genesis.Config, nil, err1 } @@ -122,7 +125,7 @@ func WriteGenesisBlock(tx kv.RwTx, genesis *types.Genesis, overridePragueTime *b // Check whether the genesis block is already written. if genesis != nil { - block, _, err1 := GenesisToBlock(genesis, tmpDir, logger) + block, _, err1 := GenesisToBlock(genesis, tmpDir, logger, nil) if err1 != nil { return genesis.Config, nil, err1 } @@ -179,14 +182,32 @@ func WriteGenesisBlock(tx kv.RwTx, genesis *types.Genesis, overridePragueTime *b return newCfg, storedBlock, nil } -func WriteGenesisState(g *types.Genesis, tx kv.RwTx, tmpDir string, logger log.Logger) (*types.Block, *state.IntraBlockState, error) { - block, statedb, err := GenesisToBlock(g, tmpDir, logger) +func WriteGenesisState(g *types.Genesis, tx kv.RwTx, tmpDir string, logger log.Logger, bcLogger *tracing.Hooks) (*types.Block, *state.IntraBlockState, error) { + block, statedb, err := GenesisToBlock(g, tmpDir, logger, bcLogger) if err != nil { return nil, nil, err } + histV3, err := kvcfg.HistoryV3.Enabled(tx) + if err != nil { + panic(err) + } var stateWriter state.StateWriter - stateWriter = state.NewNoopWriter() + if histV3 { + stateWriter = state.NewNoopWriter() + } else { + for addr, account := range g.Alloc { + if len(account.Code) > 0 || len(account.Storage) > 0 { + // Special case for weird tests - inaccessible storage + var b [8]byte + binary.BigEndian.PutUint64(b[:], state.FirstContractIncarnation) + if err := tx.Put(kv.IncarnationMap, addr[:], b[:]); err != nil { + return nil, nil, err + } + } + } + stateWriter = state.NewPlainStateWriter(tx, tx, 0) + } if block.Number().Sign() != 0 { return nil, statedb, fmt.Errorf("can't commit genesis block with number > 0") @@ -195,16 +216,26 @@ func WriteGenesisState(g *types.Genesis, tx kv.RwTx, tmpDir string, logger log.L return nil, statedb, fmt.Errorf("cannot write state: %w", err) } + if !histV3 { + if csw, ok := stateWriter.(state.WriterWithChangeSets); ok { + if err := csw.WriteChangeSets(); err != nil { + return nil, statedb, fmt.Errorf("cannot write change sets: %w", err) + } + if err := csw.WriteHistory(); err != nil { + return nil, statedb, fmt.Errorf("cannot write history: %w", err) + } + } + } return block, statedb, nil } -func MustCommitGenesis(g *types.Genesis, db kv.RwDB, tmpDir string, logger log.Logger) *types.Block { +func MustCommitGenesis(g *types.Genesis, db kv.RwDB, tmpDir string, logger log.Logger, bcLogger *tracing.Hooks) *types.Block { tx, err := db.BeginRw(context.Background()) if err != nil { panic(err) } defer tx.Rollback() - block, _, err := write(tx, g, tmpDir, logger) + block, _, err := write(tx, g, tmpDir, logger, bcLogger) if err != nil { panic(err) } @@ -217,8 +248,8 @@ func MustCommitGenesis(g *types.Genesis, db kv.RwDB, tmpDir string, logger log.L // Write writes the block and state of a genesis specification to the database. // The block is committed as the canonical head block. -func write(tx kv.RwTx, g *types.Genesis, tmpDir string, logger log.Logger) (*types.Block, *state.IntraBlockState, error) { - block, statedb, err2 := WriteGenesisState(g, tx, tmpDir, logger) +func write(tx kv.RwTx, g *types.Genesis, tmpDir string, logger log.Logger, bcLogger *tracing.Hooks) (*types.Block, *state.IntraBlockState, error) { + block, statedb, err2 := WriteGenesisState(g, tx, tmpDir, logger, bcLogger) if err2 != nil { return block, statedb, err2 } @@ -255,6 +286,10 @@ func write(tx kv.RwTx, g *types.Genesis, tmpDir string, logger log.Logger) (*typ return nil, nil, err } + if bcLogger != nil && bcLogger.OnGenesisBlock != nil { + bcLogger.OnGenesisBlock(block, g.Alloc) + } + // We support ethash/merge for issuance (for now) if g.Config.Consensus != chain.EtHashConsensus { return block, statedb, nil @@ -280,7 +315,7 @@ func write(tx kv.RwTx, g *types.Genesis, tmpDir string, logger log.Logger) (*typ // GenesisBlockForTesting creates and writes a block in which addr has the given wei balance. func GenesisBlockForTesting(db kv.RwDB, addr libcommon.Address, balance *big.Int, tmpDir string, logger log.Logger) *types.Block { g := types.Genesis{Alloc: types.GenesisAlloc{addr: {Balance: balance}}, Config: params.TestChainConfig} - block := MustCommitGenesis(&g, db, tmpDir, logger) + block := MustCommitGenesis(&g, db, tmpDir, logger, nil) return block } @@ -289,14 +324,14 @@ type GenAccount struct { Balance *big.Int } -func GenesisWithAccounts(db kv.RwDB, accs []GenAccount, tmpDir string, logger log.Logger) *types.Block { +func GenesisWithAccounts(db kv.RwDB, accs []GenAccount, tmpDir string, logger log.Logger, bcLogger *tracing.Hooks) *types.Block { g := types.Genesis{Config: params.TestChainConfig} allocs := make(map[libcommon.Address]types.GenesisAccount) for _, acc := range accs { allocs[acc.Addr] = types.GenesisAccount{Balance: acc.Balance} } g.Alloc = allocs - block := MustCommitGenesis(&g, db, tmpDir, logger) + block := MustCommitGenesis(&g, db, tmpDir, logger, bcLogger) return block } @@ -461,7 +496,7 @@ func DeveloperGenesisBlock(period uint64, faucet libcommon.Address) *types.Genes // ToBlock creates the genesis block and writes state of a genesis specification // to the given database (or discards it if nil). -func GenesisToBlock(g *types.Genesis, tmpDir string, logger log.Logger) (*types.Block, *state.IntraBlockState, error) { +func GenesisToBlock(g *types.Genesis, tmpDir string, logger log.Logger, bcLogger *tracing.Hooks) (*types.Block, *state.IntraBlockState, error) { _ = g.Alloc //nil-check head := &types.Header{ @@ -518,11 +553,6 @@ func GenesisToBlock(g *types.Genesis, tmpDir string, logger log.Logger) (*types. } } - var requests []*types.Request // TODO(racytech): revisit this after merge, make sure everythin is correct - if g.Config != nil && g.Config.IsPrague(g.Timestamp) { - requests = []*types.Request{} - } - var root libcommon.Hash var statedb *state.IntraBlockState wg := sync.WaitGroup{} @@ -566,7 +596,9 @@ func GenesisToBlock(g *types.Genesis, tmpDir string, logger log.Logger) (*types. if overflow { panic("overflow at genesis allocs") } - statedb.AddBalance(addr, balance) + // This is not actually logged via tracer because OnGenesisBlock + // already captures the allocations. + statedb.AddBalance(addr, balance, tracing.BalanceIncreaseGenesisBalance) statedb.SetCode(addr, account.Code) statedb.SetNonce(addr, account.Nonce) for key, value := range account.Storage { @@ -599,7 +631,7 @@ func GenesisToBlock(g *types.Genesis, tmpDir string, logger log.Logger) (*types. head.Root = root - return types.NewBlock(head, nil, nil, nil, withdrawals, requests), statedb, nil + return types.NewBlock(head, nil, nil, nil, withdrawals), statedb, nil } func sortedAllocKeys(m types.GenesisAlloc) []string { diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index c2bbfa318a6..67d77f0472e 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -610,7 +610,6 @@ func ReadBody(db kv.Getter, hash common.Hash, number uint64) (*types.Body, uint6 body := new(types.Body) body.Uncles = bodyForStorage.Uncles body.Withdrawals = bodyForStorage.Withdrawals - body.Requests = bodyForStorage.Requests if bodyForStorage.TxAmount < 2 { panic(fmt.Sprintf("block body hash too few txs amount: %d, %d", number, bodyForStorage.TxAmount)) @@ -655,7 +654,6 @@ func WriteRawBody(db kv.RwTx, hash common.Hash, number uint64, body *types.RawBo TxAmount: uint32(len(body.Transactions)) + 2, /*system txs*/ Uncles: body.Uncles, Withdrawals: body.Withdrawals, - Requests: body.Requests, } if err = WriteBodyForStorage(db, hash, number, &data); err != nil { return false, fmt.Errorf("WriteBodyForStorage: %w", err) @@ -679,7 +677,6 @@ func WriteBody(db kv.RwTx, hash common.Hash, number uint64, body *types.Body) (e TxAmount: uint32(len(body.Transactions)) + 2, Uncles: body.Uncles, Withdrawals: body.Withdrawals, - Requests: body.Requests, } if err = WriteBodyForStorage(db, hash, number, &data); err != nil { return fmt.Errorf("failed to write body: %w", err) @@ -982,7 +979,7 @@ func ReadBlock(tx kv.Getter, hash common.Hash, number uint64) *types.Block { if body == nil { return nil } - return types.NewBlockFromStorage(hash, header, body.Transactions, body.Uncles, body.Withdrawals, body.Requests) + return types.NewBlockFromStorage(hash, header, body.Transactions, body.Uncles, body.Withdrawals) } // HasBlock - is more efficient than ReadBlock because doesn't read transactions. diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index 9449f8ef641..9b6e9a84e77 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -25,7 +25,6 @@ import ( "testing" libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/turbo/stages/mock" @@ -574,32 +573,6 @@ func TestBlockWithdrawalsStorage(t *testing.T) { withdrawals = append(withdrawals, &w) withdrawals = append(withdrawals, &w2) - pk := [48]byte{} - copy(pk[:], libcommon.Hex2Bytes("3d1291c96ad36914068b56d93974c1b1d5afcb3fcd37b2ac4b144afd3f6fec5b")) - sig := [96]byte{} - copy(sig[:], libcommon.Hex2Bytes("20a0a807c717055ecb60dc9d5071fbd336f7f238d61a288173de20f33f79ebf4")) - r1 := types.Deposit{ - Pubkey: pk, - WithdrawalCredentials: libcommon.Hash(hexutility.Hex2Bytes("15095f80cde9763665d2eee3f8dfffc4a4405544c6fece33130e6e98809c4b98")), - Amount: 12324, - Signature: sig, - Index: 0, - } - pk2 := [48]byte{} - copy(pk2[:], libcommon.Hex2Bytes("d40ffb510bfc52b058d5e934026ce3eddaf0a4b1703920f03b32b97de2196a93")) - sig2 := [96]byte{} - copy(sig2[:], libcommon.Hex2Bytes("dc40cf2c33c6fb17e11e3ffe455063f1bf2280a3b08563f8b33aa359a16a383c")) - r2 := types.Deposit{ - Pubkey: pk2, - WithdrawalCredentials: libcommon.Hash(hexutility.Hex2Bytes("d73d9332eb1229e58aa7e33e9a5079d9474f68f747544551461bf3ff9f7ccd64")), - Amount: 12324, - Signature: sig2, - Index: 0, - } - deposits := make(types.Deposits, 0) - deposits = append(deposits, &r1) - deposits = append(deposits, &r2) - reqs := deposits.ToRequests() // Create a test block to move around the database and make sure it's really new block := types.NewBlockWithHeader(&types.Header{ Number: big.NewInt(1), @@ -619,7 +592,8 @@ func TestBlockWithdrawalsStorage(t *testing.T) { } // Write withdrawals to block - wBlock := types.NewBlockFromStorage(block.Hash(), block.Header(), block.Transactions(), block.Uncles(), withdrawals, reqs) + wBlock := types.NewBlockFromStorage(block.Hash(), block.Header(), block.Transactions(), block.Uncles(), withdrawals) + if err := rawdb.WriteHeader(tx, wBlock.HeaderNoCopy()); err != nil { t.Fatalf("Could not write body: %v", err) } @@ -673,28 +647,6 @@ func TestBlockWithdrawalsStorage(t *testing.T) { require.Equal(libcommon.Address{0: 0xff}, rw2.Address) require.Equal(uint64(1001), rw2.Amount) - readRequests := entry.Requests - require.True(len(entry.Requests) == 2) - rd1 := readRequests[0] - rd2 := readRequests[1] - require.True(rd1.Type() == types.DepositRequestType) - require.True(rd2.Type() == types.DepositRequestType) - - readDeposits := (types.Requests)(readRequests).Deposits() - d1 := readDeposits[0] - d2 := readDeposits[1] - require.Equal(d1.Pubkey, r1.Pubkey) - require.Equal(d1.Amount, r1.Amount) - require.Equal(d1.Signature, r1.Signature) - require.Equal(d1.WithdrawalCredentials, r1.WithdrawalCredentials) - require.Equal(d1.Index, r1.Index) - - require.Equal(d2.Pubkey, r2.Pubkey) - require.Equal(d2.Amount, r2.Amount) - require.Equal(d2.Signature, r2.Signature) - require.Equal(d2.WithdrawalCredentials, r2.WithdrawalCredentials) - require.Equal(d2.Index, r2.Index) - // Delete the block and verify the execution if err := rawdb.TruncateBlocks(context.Background(), tx, block.NumberU64()); err != nil { t.Fatal(err) diff --git a/core/rawdb/accessors_indexes_test.go b/core/rawdb/accessors_indexes_test.go index 2845724b61e..6901a6c5eb3 100644 --- a/core/rawdb/accessors_indexes_test.go +++ b/core/rawdb/accessors_indexes_test.go @@ -63,7 +63,7 @@ func TestLookupStorage(t *testing.T) { tx3 := types.NewTransaction(3, libcommon.BytesToAddress([]byte{0x33}), uint256.NewInt(333), 3333, uint256.NewInt(33333), []byte{0x33, 0x33, 0x33}) txs := []types.Transaction{tx1, tx2, tx3} - block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, nil, nil /*requests*/) + block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, nil) // Check that no transactions entries are in a pristine database for i, txn := range txs { diff --git a/core/rawdb/blockio/block_writer.go b/core/rawdb/blockio/block_writer.go index 21151f0299c..4de3d4820ab 100644 --- a/core/rawdb/blockio/block_writer.go +++ b/core/rawdb/blockio/block_writer.go @@ -25,10 +25,15 @@ import ( // BlockReader can read blocks from db and snapshots type BlockWriter struct { + historyV3 bool + + // adding Auto-Increment BlockID + // allow store non-canonical Txs/Senders + txsV3 bool } -func NewBlockWriter() *BlockWriter { - return &BlockWriter{} +func NewBlockWriter(historyV3 bool) *BlockWriter { + return &BlockWriter{historyV3: historyV3, txsV3: true} } func (w *BlockWriter) FillHeaderNumberIndex(logPrefix string, tx kv.RwTx, tmpDir string, from, to uint64, ctx context.Context, logger log.Logger) error { @@ -54,19 +59,23 @@ func (w *BlockWriter) FillHeaderNumberIndex(logPrefix string, tx kv.RwTx, tmpDir } func (w *BlockWriter) MakeBodiesCanonical(tx kv.RwTx, from uint64) error { - if err := rawdb.AppendCanonicalTxNums(tx, from); err != nil { - var e1 rawdbv3.ErrTxNumsAppendWithGap - if ok := errors.As(err, &e1); ok { - // try again starting from latest available block - return rawdb.AppendCanonicalTxNums(tx, e1.LastBlock()+1) + if w.historyV3 { + if err := rawdb.AppendCanonicalTxNums(tx, from); err != nil { + var e1 rawdbv3.ErrTxNumsAppendWithGap + if ok := errors.As(err, &e1); ok { + // try again starting from latest available block + return rawdb.AppendCanonicalTxNums(tx, e1.LastBlock()+1) + } + return err } - return err } return nil } func (w *BlockWriter) MakeBodiesNonCanonical(tx kv.RwTx, from uint64) error { - if err := rawdbv3.TxNums.Truncate(tx, from); err != nil { - return err + if w.historyV3 { + if err := rawdbv3.TxNums.Truncate(tx, from); err != nil { + return err + } } return nil } diff --git a/core/rawdb/rawdbreset/reset_stages.go b/core/rawdb/rawdbreset/reset_stages.go index 1bd985b2c90..5820dab2b64 100644 --- a/core/rawdb/rawdbreset/reset_stages.go +++ b/core/rawdb/rawdbreset/reset_stages.go @@ -8,8 +8,10 @@ import ( "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/backup" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/temporal" "github.com/ledgerwatch/erigon-lib/state" + "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/rawdb/blockio" "github.com/ledgerwatch/erigon/eth/stagedsync" @@ -119,18 +121,25 @@ func WarmupExec(ctx context.Context, db kv.RwDB) (err error) { for _, tbl := range stateBuckets { backup.WarmupTable(ctx, db, tbl, log.LvlInfo, backup.ReadAheadThreads) } - for _, tbl := range stateHistoryV3Buckets { - backup.WarmupTable(ctx, db, tbl, log.LvlInfo, backup.ReadAheadThreads) + historyV3 := kvcfg.HistoryV3.FromDB(db) + if historyV3 { //hist v2 is too big, if you have so much ram, just use `cat mdbx.dat > /dev/null` to warmup + for _, tbl := range stateHistoryV3Buckets { + backup.WarmupTable(ctx, db, tbl, log.LvlInfo, backup.ReadAheadThreads) + } } return } func ResetExec(ctx context.Context, db kv.RwDB, chain string, tmpDir string, logger log.Logger) (err error) { + historyV3 := kvcfg.HistoryV3.FromDB(db) + cleanupList := make([]string, 0) - cleanupList = append(cleanupList, stateBuckets...) - cleanupList = append(cleanupList, stateHistoryBuckets...) - cleanupList = append(cleanupList, stateHistoryV3Buckets...) - cleanupList = append(cleanupList, stateV3Buckets...) + if historyV3 { + cleanupList = append(cleanupList, stateBuckets...) + cleanupList = append(cleanupList, stateHistoryBuckets...) + cleanupList = append(cleanupList, stateHistoryV3Buckets...) + cleanupList = append(cleanupList, stateV3Buckets...) + } return db.Update(ctx, func(tx kv.RwTx) error { if err := clearStageProgress(tx, stages.Execution, stages.HashState, stages.IntermediateHashes); err != nil { @@ -140,22 +149,30 @@ func ResetExec(ctx context.Context, db kv.RwDB, chain string, tmpDir string, log if err := backup.ClearTables(ctx, db, tx, cleanupList...); err != nil { return nil } - v3db := db.(*temporal.DB) - agg := v3db.Agg() - aggTx := agg.BeginFilesRo() - defer aggTx.Close() - doms, err := state.NewSharedDomains(tx, logger) - if err != nil { - return err - } - defer doms.Close() + if !historyV3 { + _ = stages.SaveStageProgress(tx, stages.Execution, 0) + genesis := core.GenesisBlockByChainName(chain) + if _, _, err := core.WriteGenesisState(genesis, tx, tmpDir, logger, nil); err != nil { + return err + } + } else { + v3db := db.(*temporal.DB) + agg := v3db.Agg() + aggTx := agg.BeginFilesRo() + defer aggTx.Close() + doms, err := state.NewSharedDomains(tx, logger) + if err != nil { + return err + } + defer doms.Close() - _ = stages.SaveStageProgress(tx, stages.Execution, doms.BlockNum()) - mxs := agg.EndTxNumMinimax() / agg.StepSize() - if mxs > 0 { - mxs-- + _ = stages.SaveStageProgress(tx, stages.Execution, doms.BlockNum()) + mxs := agg.EndTxNumMinimax() / agg.StepSize() + if mxs > 0 { + mxs-- + } + log.Info("[reset] exec", "toBlock", doms.BlockNum(), "toTxNum", doms.TxNum(), "maxStepInFiles", mxs) } - log.Info("[reset] exec", "toBlock", doms.BlockNum(), "toTxNum", doms.TxNum(), "maxStepInFiles", mxs) return nil }) diff --git a/core/snaptype/block_types.go b/core/snaptype/block_types.go index 0c193d9a1b9..f38a8c25d9e 100644 --- a/core/snaptype/block_types.go +++ b/core/snaptype/block_types.go @@ -105,7 +105,7 @@ var ( []snaptype.Index{Indexes.BodyHash}, snaptype.IndexBuilderFunc( func(ctx context.Context, info snaptype.FileInfo, salt uint32, _ *chain.Config, tmpDir string, p *background.Progress, lvl log.Lvl, logger log.Logger) (err error) { - num := make([]byte, binary.MaxVarintLen64) + num := make([]byte, 8) if err := snaptype.BuildIndex(ctx, info, salt, info.From, tmpDir, log.LvlDebug, p, func(idx *recsplit.RecSplit, i, offset uint64, _ []byte) error { if p != nil { diff --git a/core/state/database_test.go b/core/state/database_test.go index 101797b7616..d04894c66b3 100644 --- a/core/state/database_test.go +++ b/core/state/database_test.go @@ -861,13 +861,13 @@ func TestReproduceCrash(t *testing.T) { t.Errorf("error finalising 1st tx: %v", err) } // Start the 3rd transaction - intraBlockState.AddBalance(contract, uint256.NewInt(1000000000)) + intraBlockState.AddBalance(contract, uint256.NewInt(1000000000), 0x0) intraBlockState.SetState(contract, &storageKey2, *value2) if err := intraBlockState.FinalizeTx(&chain.Rules{}, tsw); err != nil { t.Errorf("error finalising 1st tx: %v", err) } // Start the 4th transaction - clearing both storage cells - intraBlockState.SubBalance(contract, uint256.NewInt(1000000000)) + intraBlockState.SubBalance(contract, uint256.NewInt(1000000000), 0x0) intraBlockState.SetState(contract, &storageKey1, *value0) intraBlockState.SetState(contract, &storageKey2, *value0) if err := intraBlockState.FinalizeTx(&chain.Rules{}, tsw); err != nil { @@ -1248,7 +1248,7 @@ func TestChangeAccountCodeBetweenBlocks(t *testing.T) { oldCode := []byte{0x01, 0x02, 0x03, 0x04} intraBlockState.SetCode(contract, oldCode) - intraBlockState.AddBalance(contract, uint256.NewInt(1000000000)) + intraBlockState.AddBalance(contract, uint256.NewInt(1000000000), 0x0) if err := intraBlockState.FinalizeTx(&chain.Rules{}, tsw); err != nil { t.Errorf("error finalising 1st tx: %v", err) } @@ -1287,7 +1287,7 @@ func TestCacheCodeSizeSeparately(t *testing.T) { code := []byte{0x01, 0x02, 0x03, 0x04} intraBlockState.SetCode(contract, code) - intraBlockState.AddBalance(contract, uint256.NewInt(1000000000)) + intraBlockState.AddBalance(contract, uint256.NewInt(1000000000), 0x0) if err := intraBlockState.FinalizeTx(&chain.Rules{}, w); err != nil { t.Errorf("error finalising 1st tx: %v", err) } @@ -1321,7 +1321,7 @@ func TestCacheCodeSizeInTrie(t *testing.T) { code := []byte{0x01, 0x02, 0x03, 0x04} intraBlockState.SetCode(contract, code) - intraBlockState.AddBalance(contract, uint256.NewInt(1000000000)) + intraBlockState.AddBalance(contract, uint256.NewInt(1000000000), 0x0) if err := intraBlockState.FinalizeTx(&chain.Rules{}, w); err != nil { t.Errorf("error finalising 1st tx: %v", err) } diff --git a/core/state/intra_block_state.go b/core/state/intra_block_state.go index 6352750461a..58bd621ca40 100644 --- a/core/state/intra_block_state.go +++ b/core/state/intra_block_state.go @@ -26,6 +26,7 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" types2 "github.com/ledgerwatch/erigon-lib/types" "github.com/ledgerwatch/erigon/common/u256" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/core/vm/evmtypes" @@ -90,6 +91,7 @@ type IntraBlockState struct { validRevisions []revision nextRevisionID int trace bool + logger *tracing.Hooks balanceInc map[libcommon.Address]*BalanceIncrease // Map of balance increases (without first reading the account) } @@ -109,6 +111,11 @@ func New(stateReader StateReader) *IntraBlockState { } } +// SetLogger sets the logger for account update hooks. +func (sdb *IntraBlockState) SetLogger(l *tracing.Hooks) { + sdb.logger = l +} + func (sdb *IntraBlockState) SetTrace(trace bool) { sdb.trace = trace } @@ -163,6 +170,9 @@ func (sdb *IntraBlockState) AddLog(log2 *types.Log) { log2.BlockHash = sdb.bhash log2.TxIndex = uint(sdb.txIndex) log2.Index = sdb.logSize + if sdb.logger != nil && sdb.logger.OnLog != nil { + sdb.logger.OnLog(log2) + } sdb.logs[sdb.thash] = append(sdb.logs[sdb.thash], log2) sdb.logSize++ } @@ -316,52 +326,55 @@ func (sdb *IntraBlockState) HasSelfdestructed(addr libcommon.Address) bool { // AddBalance adds amount to the account associated with addr. // DESCRIBED: docs/programmers_guide/guide.md#address---identifier-of-an-account -func (sdb *IntraBlockState) AddBalance(addr libcommon.Address, amount *uint256.Int) { +func (sdb *IntraBlockState) AddBalance(addr libcommon.Address, amount *uint256.Int, reason tracing.BalanceChangeReason) { if sdb.trace { fmt.Printf("AddBalance %x, %d\n", addr, amount) } - // If this account has not been read, add to the balance increment map - _, needAccount := sdb.stateObjects[addr] - if !needAccount && addr == ripemd && amount.IsZero() { - needAccount = true - } - if !needAccount { - sdb.journal.append(balanceIncrease{ - account: &addr, - increase: *amount, - }) - bi, ok := sdb.balanceInc[addr] - if !ok { - bi = &BalanceIncrease{} - sdb.balanceInc[addr] = bi + + if sdb.logger == nil { + // If this account has not been read, add to the balance increment map + _, needAccount := sdb.stateObjects[addr] + if !needAccount && addr == ripemd && amount.IsZero() { + needAccount = true + } + if !needAccount { + sdb.journal.append(balanceIncrease{ + account: &addr, + increase: *amount, + }) + bi, ok := sdb.balanceInc[addr] + if !ok { + bi = &BalanceIncrease{} + sdb.balanceInc[addr] = bi + } + bi.increase.Add(&bi.increase, amount) + bi.count++ + return } - bi.increase.Add(&bi.increase, amount) - bi.count++ - return } stateObject := sdb.GetOrNewStateObject(addr) - stateObject.AddBalance(amount) + stateObject.AddBalance(amount, reason) } // SubBalance subtracts amount from the account associated with addr. // DESCRIBED: docs/programmers_guide/guide.md#address---identifier-of-an-account -func (sdb *IntraBlockState) SubBalance(addr libcommon.Address, amount *uint256.Int) { +func (sdb *IntraBlockState) SubBalance(addr libcommon.Address, amount *uint256.Int, reason tracing.BalanceChangeReason) { if sdb.trace { fmt.Printf("SubBalance %x, %d\n", addr, amount) } stateObject := sdb.GetOrNewStateObject(addr) if stateObject != nil { - stateObject.SubBalance(amount) + stateObject.SubBalance(amount, reason) } } // DESCRIBED: docs/programmers_guide/guide.md#address---identifier-of-an-account -func (sdb *IntraBlockState) SetBalance(addr libcommon.Address, amount *uint256.Int) { +func (sdb *IntraBlockState) SetBalance(addr libcommon.Address, amount *uint256.Int, reason tracing.BalanceChangeReason) { stateObject := sdb.GetOrNewStateObject(addr) if stateObject != nil { - stateObject.SetBalance(amount) + stateObject.SetBalance(amount, reason) } } @@ -425,11 +438,18 @@ func (sdb *IntraBlockState) Selfdestruct(addr libcommon.Address) bool { if stateObject == nil || stateObject.deleted { return false } + + prevBalance := *stateObject.Balance() sdb.journal.append(selfdestructChange{ account: &addr, prev: stateObject.selfdestructed, - prevbalance: *stateObject.Balance(), + prevbalance: prevBalance, }) + + if sdb.logger != nil && sdb.logger.OnBalanceChange != nil && !prevBalance.IsZero() { + sdb.logger.OnBalanceChange(addr, &prevBalance, uint256.NewInt(0), tracing.BalanceDecreaseSelfdestruct) + } + stateObject.markSelfdestructed() stateObject.createdContract = false stateObject.data.Balance.Clear() @@ -545,6 +565,7 @@ func (sdb *IntraBlockState) createObject(addr libcommon.Address, previous *state } else { sdb.journal.append(resetObjectChange{account: &addr, prev: previous}) } + newobj.newlyCreated = true sdb.setStateObject(addr, newobj) return newobj @@ -620,9 +641,14 @@ func (sdb *IntraBlockState) GetRefund() uint64 { return sdb.refund } -func updateAccount(EIP161Enabled bool, isAura bool, stateWriter StateWriter, addr libcommon.Address, stateObject *stateObject, isDirty bool) error { +func updateAccount(EIP161Enabled bool, isAura bool, stateWriter StateWriter, addr libcommon.Address, stateObject *stateObject, isDirty bool, logger *tracing.Hooks) error { emptyRemoval := EIP161Enabled && stateObject.empty() && (!isAura || addr != SystemAddress) if stateObject.selfdestructed || (isDirty && emptyRemoval) { + // If ether was sent to account post-selfdestruct it is burnt. + if logger != nil && logger.OnBalanceChange != nil && !stateObject.Balance().IsZero() && stateObject.selfdestructed { + logger.OnBalanceChange(stateObject.address, stateObject.Balance(), uint256.NewInt(0), tracing.BalanceDecreaseSelfdestructBurn) + } + if err := stateWriter.DeleteAccount(addr, &stateObject.original); err != nil { return err } @@ -694,7 +720,7 @@ func (sdb *IntraBlockState) FinalizeTx(chainRules *chain.Rules, stateWriter Stat } //fmt.Printf("FinalizeTx: %x, balance=%d %T\n", addr, so.data.Balance.Uint64(), stateWriter) - if err := updateAccount(chainRules.IsSpuriousDragon, chainRules.IsAura, stateWriter, addr, so, true); err != nil { + if err := updateAccount(chainRules.IsSpuriousDragon, chainRules.IsAura, stateWriter, addr, so, true, sdb.logger); err != nil { return err } so.newlyCreated = false @@ -750,7 +776,7 @@ func (sdb *IntraBlockState) MakeWriteSet(chainRules *chain.Rules, stateWriter St } for addr, stateObject := range sdb.stateObjects { _, isDirty := sdb.stateObjectsDirty[addr] - if err := updateAccount(chainRules.IsSpuriousDragon, chainRules.IsAura, stateWriter, addr, stateObject, isDirty); err != nil { + if err := updateAccount(chainRules.IsSpuriousDragon, chainRules.IsAura, stateWriter, addr, stateObject, isDirty, sdb.logger); err != nil { return err } } diff --git a/core/state/plain_readonly.go b/core/state/plain_readonly.go index b002ae61197..6f77efce1bc 100644 --- a/core/state/plain_readonly.go +++ b/core/state/plain_readonly.go @@ -29,6 +29,7 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon/core/state/historyv2read" @@ -57,6 +58,10 @@ type PlainState struct { } func NewPlainState(tx kv.Tx, blockNr uint64, systemContractLookup map[libcommon.Address][]libcommon.CodeRecord) *PlainState { + histV3, _ := kvcfg.HistoryV3.Enabled(tx) + if histV3 { + panic("Please use HistoryStateReaderV3 with HistoryV3") + } ps := &PlainState{ tx: tx, blockNr: blockNr, diff --git a/core/state/rw_v3.go b/core/state/rw_v3.go index d793a53efdf..71ec3b7a4e3 100644 --- a/core/state/rw_v3.go +++ b/core/state/rw_v3.go @@ -5,7 +5,6 @@ import ( "encoding/binary" "fmt" "sync" - "time" "github.com/holiman/uint256" "github.com/ledgerwatch/log/v3" @@ -249,21 +248,12 @@ func (rs *StateV3) ApplyLogsAndTraces4(txTask *TxTask, domains *libstate.SharedD return nil } -var ( - mxState3UnwindRunning = metrics.GetOrCreateGauge("state3_unwind_running") - mxState3Unwind = metrics.GetOrCreateSummary("state3_unwind") -) - func (rs *StateV3) Unwind(ctx context.Context, tx kv.RwTx, blockUnwindTo, txUnwindTo uint64, accumulator *shards.Accumulator) error { - unwindToLimit := tx.(libstate.HasAggTx).AggTx().(*libstate.AggregatorRoTx).CanUnwindDomainsToTxNum() + unwindToLimit := tx.(libstate.HasAggCtx).AggCtx().(*libstate.AggregatorRoTx).CanUnwindDomainsToTxNum() if txUnwindTo < unwindToLimit { return fmt.Errorf("can't unwind to txNum=%d, limit is %d", txUnwindTo, unwindToLimit) } - mxState3UnwindRunning.Inc() - defer mxState3UnwindRunning.Dec() - st := time.Now() - defer mxState3Unwind.ObserveDuration(st) var currentInc uint64 handle := func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { @@ -306,7 +296,6 @@ func (rs *StateV3) Unwind(ctx context.Context, tx kv.RwTx, blockUnwindTo, txUnwi ttx := tx.(kv.TemporalTx) - // todo these updates could be collected during rs.domains.Unwind (as passed collect function eg) { iter, err := ttx.HistoryRange(kv.AccountsHistory, int(txUnwindTo), -1, order.Asc, -1) if err != nil { diff --git a/core/state/state_object.go b/core/state/state_object.go index a4b947f1008..e6f668844d6 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -26,6 +26,7 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/rlp" @@ -225,6 +226,9 @@ func (so *stateObject) SetState(key *libcommon.Hash, value uint256.Int) { key: *key, prevalue: prev, }) + if so.db.logger != nil && so.db.logger.OnStorageChange != nil { + so.db.logger.OnStorageChange(so.address, key, prev, value) + } so.setState(key, value) } @@ -270,7 +274,7 @@ func (so *stateObject) printTrie() { // AddBalance adds amount to so's balance. // It is used to add funds to the destination account of a transfer. -func (so *stateObject) AddBalance(amount *uint256.Int) { +func (so *stateObject) AddBalance(amount *uint256.Int, reason tracing.BalanceChangeReason) { // EIP161: We must check emptiness for the objects such that the account // clearing (0,0,0 objects) can take effect. if amount.IsZero() { @@ -281,23 +285,26 @@ func (so *stateObject) AddBalance(amount *uint256.Int) { return } - so.SetBalance(new(uint256.Int).Add(so.Balance(), amount)) + so.SetBalance(new(uint256.Int).Add(so.Balance(), amount), reason) } // SubBalance removes amount from so's balance. // It is used to remove funds from the origin account of a transfer. -func (so *stateObject) SubBalance(amount *uint256.Int) { +func (so *stateObject) SubBalance(amount *uint256.Int, reason tracing.BalanceChangeReason) { if amount.IsZero() { return } - so.SetBalance(new(uint256.Int).Sub(so.Balance(), amount)) + so.SetBalance(new(uint256.Int).Sub(so.Balance(), amount), reason) } -func (so *stateObject) SetBalance(amount *uint256.Int) { +func (so *stateObject) SetBalance(amount *uint256.Int, reason tracing.BalanceChangeReason) { so.db.journal.append(balanceChange{ account: &so.address, prev: so.data.Balance, }) + if so.db.logger != nil && so.db.logger.OnBalanceChange != nil { + so.db.logger.OnBalanceChange(so.address, so.Balance(), amount, reason) + } so.setBalance(amount) } @@ -345,6 +352,9 @@ func (so *stateObject) SetCode(codeHash libcommon.Hash, code []byte) { prevhash: so.data.CodeHash, prevcode: prevcode, }) + if so.db.logger != nil && so.db.logger.OnCodeChange != nil { + so.db.logger.OnCodeChange(so.address, so.data.CodeHash, prevcode, codeHash, code) + } so.setCode(codeHash, code) } @@ -359,6 +369,9 @@ func (so *stateObject) SetNonce(nonce uint64) { account: &so.address, prev: so.data.Nonce, }) + if so.db.logger != nil && so.db.logger.OnNonceChange != nil { + so.db.logger.OnNonceChange(so.address, so.data.Nonce, nonce) + } so.setNonce(nonce) } diff --git a/core/state/state_test.go b/core/state/state_test.go index 4ae22af0a42..4be4b62b90a 100644 --- a/core/state/state_test.go +++ b/core/state/state_test.go @@ -25,9 +25,11 @@ import ( "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/memdb" checker "gopkg.in/check.v1" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/crypto" ) @@ -47,11 +49,11 @@ var _ = checker.Suite(&StateSuite{}) func (s *StateSuite) TestDump(c *checker.C) { // generate a few entries obj1 := s.state.GetOrNewStateObject(toAddr([]byte{0x01})) - obj1.AddBalance(uint256.NewInt(22)) + obj1.AddBalance(uint256.NewInt(22), 0x0) obj2 := s.state.GetOrNewStateObject(toAddr([]byte{0x01, 0x02})) obj2.SetCode(crypto.Keccak256Hash([]byte{3, 3, 3, 3, 3, 3, 3}), []byte{3, 3, 3, 3, 3, 3, 3}) obj3 := s.state.GetOrNewStateObject(toAddr([]byte{0x02})) - obj3.SetBalance(uint256.NewInt(44)) + obj3.SetBalance(uint256.NewInt(44), tracing.BalanceChangeUnspecified) // write some of them to the trie err := s.w.UpdateAccountData(obj1.address, &obj1.data, new(accounts.Account)) @@ -72,7 +74,10 @@ func (s *StateSuite) TestDump(c *checker.C) { } defer tx.Rollback() - historyV3 := false //TODO: https://github.com/ledgerwatch/erigon/issues/10323 + historyV3, err := kvcfg.HistoryV3.Enabled(tx) + if err != nil { + panic(err) + } got := string(NewDumper(tx, 1, historyV3).DefaultDump()) want := `{ "root": "71edff0130dd2385947095001c73d9e28d862fc286fca2b922ca6f6f3cddfdd2", @@ -156,7 +161,7 @@ func (s *StateSuite) TestTouchDelete(c *checker.C) { s.state.Reset() snapshot := s.state.Snapshot() - s.state.AddBalance(common.Address{}, new(uint256.Int)) + s.state.AddBalance(common.Address{}, new(uint256.Int), 0x0) if len(s.state.journal.dirties) != 1 { c.Fatal("expected one dirty state object") @@ -222,7 +227,7 @@ func TestSnapshot2(t *testing.T) { // db, trie are already non-empty values so0 := state.getStateObject(stateobjaddr0) - so0.SetBalance(uint256.NewInt(42)) + so0.SetBalance(uint256.NewInt(42), tracing.BalanceChangeUnspecified) so0.SetNonce(43) so0.SetCode(crypto.Keccak256Hash([]byte{'c', 'a', 'f', 'e'}), []byte{'c', 'a', 'f', 'e'}) so0.selfdestructed = false @@ -242,7 +247,7 @@ func TestSnapshot2(t *testing.T) { // and one with deleted == true so1 := state.getStateObject(stateobjaddr1) - so1.SetBalance(uint256.NewInt(52)) + so1.SetBalance(uint256.NewInt(52), tracing.BalanceChangeUnspecified) so1.SetNonce(53) so1.SetCode(crypto.Keccak256Hash([]byte{'c', 'a', 'f', 'e', '2'}), []byte{'c', 'a', 'f', 'e', '2'}) so1.selfdestructed = true @@ -328,12 +333,12 @@ func TestDump(t *testing.T) { // generate a few entries obj1 := state.GetOrNewStateObject(toAddr([]byte{0x01})) - obj1.AddBalance(uint256.NewInt(22)) + obj1.AddBalance(uint256.NewInt(22), 0x0) obj2 := state.GetOrNewStateObject(toAddr([]byte{0x01, 0x02})) obj2.SetCode(crypto.Keccak256Hash([]byte{3, 3, 3, 3, 3, 3, 3}), []byte{3, 3, 3, 3, 3, 3, 3}) obj2.setIncarnation(1) obj3 := state.GetOrNewStateObject(toAddr([]byte{0x02})) - obj3.SetBalance(uint256.NewInt(44)) + obj3.SetBalance(uint256.NewInt(44), tracing.BalanceChangeUnspecified) // write some of them to the trie err := w.UpdateAccountData(obj1.address, &obj1.data, new(accounts.Account)) @@ -365,7 +370,10 @@ func TestDump(t *testing.T) { } // check that dump contains the state objects that are in trie - historyV3 := false + historyV3, err := kvcfg.HistoryV3.Enabled(tx) + if err != nil { + panic(err) + } got := string(NewDumper(tx, 2, historyV3).DefaultDump()) want := `{ "root": "0000000000000000000000000000000000000000000000000000000000000000", diff --git a/core/state/txtask.go b/core/state/txtask.go index c78f84684c3..616f2094b36 100644 --- a/core/state/txtask.go +++ b/core/state/txtask.go @@ -62,8 +62,6 @@ type TxTask struct { // Need investigate if we can pass here - only limited amount of receipts // And remove this field if possible - because it will make problems for parallel-execution BlockReceipts types.Receipts - - Requests types.Requests } func (t *TxTask) Reset() { diff --git a/core/state_processor.go b/core/state_processor.go index c5b81a49786..a868adbf4e6 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -35,6 +35,11 @@ import ( func applyTransaction(config *chain.Config, engine consensus.EngineReader, gp *GasPool, ibs *state.IntraBlockState, stateWriter state.StateWriter, header *types.Header, tx types.Transaction, usedGas, usedBlobGas *uint64, evm *vm.EVM, cfg vm.Config) (*types.Receipt, []byte, error) { + var ( + receipt *types.Receipt + err error + ) + rules := evm.ChainRules() msg, err := tx.AsMessage(*types.MakeSigner(config, header.Number.Uint64(), header.Time), header.BaseFee, rules) if err != nil { @@ -42,10 +47,21 @@ func applyTransaction(config *chain.Config, engine consensus.EngineReader, gp *G } msg.SetCheckNonce(!cfg.StatelessExec) + if evm.Config().Tracer != nil { + if evm.Config().Tracer != nil && evm.Config().Tracer.OnTxStart != nil { + evm.Config().Tracer.OnTxStart(evm.GetVMContext(), tx, msg.From()) + } + if evm.Config().Tracer.OnTxEnd != nil { + defer func() { + evm.Config().Tracer.OnTxEnd(receipt, err) + }() + } + } + if msg.FeeCap().IsZero() && engine != nil { // Only zero-gas transactions may be service ones syscall := func(contract libcommon.Address, data []byte) ([]byte, error) { - return SysCallContract(contract, data, config, ibs, header, engine, true /* constCall */) + return SysCallContract(contract, data, config, ibs, header, engine, true /* constCall */, evm.Config().Tracer) } msg.SetIsFree(engine.IsServiceTransaction(msg.From(), syscall)) } @@ -58,7 +74,8 @@ func applyTransaction(config *chain.Config, engine consensus.EngineReader, gp *G // Update the evm with the new transaction context. evm.Reset(txContext, ibs) - result, err := ApplyMessage(evm, msg, gp, true /* refunds */, false /* gasBailout */) + var result *ExecutionResult + result, err = ApplyMessage(evm, msg, gp, true /* refunds */, false /* gasBailout */) if err != nil { return nil, nil, err } @@ -73,7 +90,6 @@ func applyTransaction(config *chain.Config, engine consensus.EngineReader, gp *G // Set the receipt logs and create the bloom filter. // based on the eip phase, we're passing whether the root touch-delete accounts. - var receipt *types.Receipt if !cfg.NoReceipts { // by the tx. receipt = &types.Receipt{Type: tx.Type(), CumulativeGasUsed: *usedGas} diff --git a/core/state_transition.go b/core/state_transition.go index 7e1e29993dd..4ffcb63516f 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -28,6 +28,7 @@ import ( cmath "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/common/u256" "github.com/ledgerwatch/erigon/consensus/misc" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/core/vm/evmtypes" "github.com/ledgerwatch/erigon/crypto" @@ -255,12 +256,18 @@ func (st *StateTransition) buyGas(gasBailout bool) error { return err } } + + if st.evm.Config().Tracer != nil && st.evm.Config().Tracer.OnGasChange != nil { + st.evm.Config().Tracer.OnGasChange(0, st.msg.Gas(), tracing.GasChangeTxInitialBalance) + } + st.gasRemaining += st.msg.Gas() st.initialGas = st.msg.Gas() if subBalance { - st.state.SubBalance(st.msg.From(), gasVal) - st.state.SubBalance(st.msg.From(), blobGasVal) + // CS TODO: cross check + st.state.SubBalance(st.msg.From(), gasVal, tracing.BalanceDecreaseGasBuy) + st.state.SubBalance(st.msg.From(), blobGasVal, tracing.BalanceDecreaseGasBuy) } return nil } @@ -368,12 +375,6 @@ func (st *StateTransition) TransitionDb(refunds bool, gasBailout bool) (*Executi if err := st.preCheck(gasBailout); err != nil { return nil, err } - if st.evm.Config().Debug { - st.evm.Config().Tracer.CaptureTxStart(st.initialGas) - defer func() { - st.evm.Config().Tracer.CaptureTxEnd(st.gasRemaining) - }() - } msg := st.msg sender := vm.AccountRef(msg.From()) @@ -390,6 +391,10 @@ func (st *StateTransition) TransitionDb(refunds bool, gasBailout bool) (*Executi if st.gasRemaining < gas { return nil, fmt.Errorf("%w: have %d, want %d", ErrIntrinsicGas, st.gasRemaining, gas) } + + if t := st.evm.Config().Tracer; t != nil && t.OnGasChange != nil { + t.OnGasChange(st.gasRemaining, st.gasRemaining-gas, tracing.GasChangeTxIntrinsicGas) + } st.gasRemaining -= gas var bailout bool @@ -444,12 +449,12 @@ func (st *StateTransition) TransitionDb(refunds bool, gasBailout bool) (*Executi } amount := new(uint256.Int).SetUint64(st.gasUsed()) amount.Mul(amount, effectiveTip) // gasUsed * effectiveTip = how much goes to the block producer (miner, validator) - st.state.AddBalance(coinbase, amount) + st.state.AddBalance(coinbase, amount, tracing.BalanceIncreaseRewardTransactionFee) if !msg.IsFree() && rules.IsLondon { burntContractAddress := st.evm.ChainConfig().GetBurntContract(st.evm.Context.BlockNumber) if burntContractAddress != nil { burnAmount := new(uint256.Int).Mul(new(uint256.Int).SetUint64(st.gasUsed()), st.evm.Context.BaseFee) - st.state.AddBalance(*burntContractAddress, burnAmount) + st.state.AddBalance(*burntContractAddress, burnAmount, tracing.BalanceChangeUnspecified) } } if st.isBor { @@ -484,11 +489,19 @@ func (st *StateTransition) refundGas(refundQuotient uint64) { if refund > st.state.GetRefund() { refund = st.state.GetRefund() } + + if st.evm.Config().Tracer != nil && st.evm.Config().Tracer.OnGasChange != nil && refund > 0 { + st.evm.Config().Tracer.OnGasChange(st.gasRemaining, st.gasRemaining+refund, tracing.GasChangeTxRefunds) + } st.gasRemaining += refund // Return ETH for remaining gas, exchanged at the original rate. remaining := new(uint256.Int).Mul(new(uint256.Int).SetUint64(st.gasRemaining), st.gasPrice) - st.state.AddBalance(st.msg.From(), remaining) + st.state.AddBalance(st.msg.From(), remaining, tracing.BalanceIncreaseGasReturn) + + if st.evm.Config().Tracer != nil && st.evm.Config().Tracer.OnGasChange != nil && st.gasRemaining > 0 { + st.evm.Config().Tracer.OnGasChange(st.gasRemaining, 0, tracing.GasChangeTxLeftOverReturned) + } // Also return remaining gas to the block gas counter so it is // available for the next transaction. diff --git a/core/test/domains_restart_test.go b/core/test/domains_restart_test.go index ac383b8685f..70021f94232 100644 --- a/core/test/domains_restart_test.go +++ b/core/test/domains_restart_test.go @@ -22,6 +22,7 @@ import ( "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" "github.com/ledgerwatch/erigon-lib/kv/temporal" @@ -59,6 +60,12 @@ func testDbAndAggregatorv3(t *testing.T, fpath string, aggStep uint64) (kv.RwDB, agg.DisableFsync() require.NoError(t, err) + // v3 setup + err = db.Update(context.Background(), func(tx kv.RwTx) error { + return kvcfg.HistoryV3.ForceWrite(tx, true) + }) + require.NoError(t, err) + tdb, err := temporal.New(db, agg) require.NoError(t, err) db = tdb diff --git a/core/tracing/hooks.go b/core/tracing/hooks.go new file mode 100644 index 00000000000..4d57e8981f1 --- /dev/null +++ b/core/tracing/hooks.go @@ -0,0 +1,293 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package tracing + +import ( + "math/big" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + + "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/erigon/core/types" + + "github.com/holiman/uint256" +) + +// OpContext provides the context at which the opcode is being +// executed in, including the memory, stack and various contract-level information. +type OpContext interface { + MemoryData() []byte + StackData() []uint256.Int + Caller() libcommon.Address + Address() libcommon.Address + CallValue() *uint256.Int + CallInput() []byte + Code() []byte + CodeHash() libcommon.Hash +} + +// IntraBlockState gives tracers access to the whole state. +type IntraBlockState interface { + GetBalance(libcommon.Address) *uint256.Int + GetNonce(libcommon.Address) uint64 + GetCode(libcommon.Address) []byte + GetState(addr libcommon.Address, key *libcommon.Hash, value *uint256.Int) + Exist(libcommon.Address) bool + GetRefund() uint64 +} + +// VMContext provides the context for the EVM execution. +type VMContext struct { + Coinbase libcommon.Address + BlockNumber uint64 + Time uint64 + Random *libcommon.Hash + // Effective tx gas price + GasPrice *uint256.Int + ChainConfig *chain.Config + IntraBlockState IntraBlockState + + TxHash libcommon.Hash +} + +// BlockEvent is emitted upon tracing an incoming block. +// It contains the block as well as consensus related information. +type BlockEvent struct { + Block *types.Block + TD *big.Int + Finalized *types.Header + Safe *types.Header +} + +type ( + /* + - VM events - + */ + + // TxStartHook is called before the execution of a transaction starts. + // Call simulations don't come with a valid signature. `from` field + // to be used for address of the caller. + TxStartHook = func(vm *VMContext, tx types.Transaction, from libcommon.Address) + + // TxEndHook is called after the execution of a transaction ends. + TxEndHook = func(receipt *types.Receipt, err error) + + // EnterHook is invoked when the processing of a message starts. + EnterHook = func(depth int, typ byte, from libcommon.Address, to libcommon.Address, precompile bool, input []byte, gas uint64, value *uint256.Int, code []byte) + + // ExitHook is invoked when the processing of a message ends. + // `revert` is true when there was an error during the execution. + // Exceptionally, before the homestead hardfork a contract creation that + // ran out of gas when attempting to persist the code to database did not + // count as a call failure and did not cause a revert of the call. This will + // be indicated by `reverted == false` and `err == ErrCodeStoreOutOfGas`. + ExitHook = func(depth int, output []byte, gasUsed uint64, err error, reverted bool) + + // OpcodeHook is invoked just prior to the execution of an opcode. + OpcodeHook = func(pc uint64, op byte, gas, cost uint64, scope OpContext, rData []byte, depth int, err error) + + // FaultHook is invoked when an error occurs during the execution of an opcode. + FaultHook = func(pc uint64, op byte, gas, cost uint64, scope OpContext, depth int, err error) + + // GasChangeHook is invoked when the gas changes. + GasChangeHook = func(old, new uint64, reason GasChangeReason) + + /* + - Chain events - + */ + + // BlockchainInitHook is called when the blockchain is initialized. + BlockchainInitHook = func(chainConfig *chain.Config) + + // BlockStartHook is called before executing `block`. + // `td` is the total difficulty prior to `block`. + BlockStartHook = func(event BlockEvent) + + // BlockEndHook is called after executing a block. + BlockEndHook = func(err error) + + // GenesisBlockHook is called when the genesis block is being processed. + GenesisBlockHook = func(genesis *types.Block, alloc types.GenesisAlloc) + + // OnSystemCallStartHook is called when a system call is about to be executed. Today, + // this hook is invoked when the EIP-4788 system call is about to be executed to set the + // beacon block root. + // + // After this hook, the EVM call tracing will happened as usual so you will receive a `OnEnter/OnExit` + // as well as state hooks between this hook and the `OnSystemCallEndHook`. + // + // Note that system call happens outside normal transaction execution, so the `OnTxStart/OnTxEnd` hooks + // will not be invoked. + OnSystemCallStartHook = func() + + // OnSystemCallEndHook is called when a system call has finished executing. Today, + // this hook is invoked when the EIP-4788 system call is about to be executed to set the + // beacon block root. + OnSystemCallEndHook = func() + + /* + - State events - + */ + + // BalanceChangeHook is called when the balance of an account changes. + BalanceChangeHook = func(addr libcommon.Address, prev, new *uint256.Int, reason BalanceChangeReason) + + // NonceChangeHook is called when the nonce of an account changes. + NonceChangeHook = func(addr libcommon.Address, prev, new uint64) + + // CodeChangeHook is called when the code of an account changes. + CodeChangeHook = func(addr libcommon.Address, prevCodeHash libcommon.Hash, prevCode []byte, codeHash libcommon.Hash, code []byte) + + // StorageChangeHook is called when the storage of an account changes. + StorageChangeHook = func(addr libcommon.Address, slot *libcommon.Hash, prev, new uint256.Int) + + // LogHook is called when a log is emitted. + LogHook = func(log *types.Log) +) + +type Hooks struct { + // VM events + OnTxStart TxStartHook + OnTxEnd TxEndHook + OnEnter EnterHook + OnExit ExitHook + OnOpcode OpcodeHook + OnFault FaultHook + OnGasChange GasChangeHook + // Chain events + OnBlockchainInit BlockchainInitHook + OnBlockStart BlockStartHook + OnBlockEnd BlockEndHook + OnGenesisBlock GenesisBlockHook + OnSystemCallStart OnSystemCallStartHook + OnSystemCallEnd OnSystemCallEndHook + // State events + OnBalanceChange BalanceChangeHook + OnNonceChange NonceChangeHook + OnCodeChange CodeChangeHook + OnStorageChange StorageChangeHook + OnLog LogHook +} + +// BalanceChangeReason is used to indicate the reason for a balance change, useful +// for tracing and reporting. +type BalanceChangeReason byte + +const ( + BalanceChangeUnspecified BalanceChangeReason = 0 + + // Issuance + // BalanceIncreaseRewardMineUncle is a reward for mining an uncle block. + BalanceIncreaseRewardMineUncle BalanceChangeReason = 1 + // BalanceIncreaseRewardMineBlock is a reward for mining a block. + BalanceIncreaseRewardMineBlock BalanceChangeReason = 2 + // BalanceIncreaseWithdrawal is ether withdrawn from the beacon chain. + BalanceIncreaseWithdrawal BalanceChangeReason = 3 + // BalanceIncreaseGenesisBalance is ether allocated at the genesis block. + BalanceIncreaseGenesisBalance BalanceChangeReason = 4 + + // Transaction fees + // BalanceIncreaseRewardTransactionFee is the transaction tip increasing block builder's balance. + BalanceIncreaseRewardTransactionFee BalanceChangeReason = 5 + // BalanceDecreaseGasBuy is spent to purchase gas for execution a transaction. + // Part of this gas will be burnt as per EIP-1559 rules. + BalanceDecreaseGasBuy BalanceChangeReason = 6 + // BalanceIncreaseGasReturn is ether returned for unused gas at the end of execution. + BalanceIncreaseGasReturn BalanceChangeReason = 7 + + // DAO fork + // BalanceIncreaseDaoContract is ether sent to the DAO refund contract. + BalanceIncreaseDaoContract BalanceChangeReason = 8 + // BalanceDecreaseDaoAccount is ether taken from a DAO account to be moved to the refund contract. + BalanceDecreaseDaoAccount BalanceChangeReason = 9 + + // BalanceChangeTransfer is ether transferred via a call. + // it is a decrease for the sender and an increase for the recipient. + BalanceChangeTransfer BalanceChangeReason = 10 + // BalanceChangeTouchAccount is a transfer of zero value. It is only there to + // touch-create an account. + BalanceChangeTouchAccount BalanceChangeReason = 11 + + // BalanceIncreaseSelfdestruct is added to the recipient as indicated by a selfdestructing account. + BalanceIncreaseSelfdestruct BalanceChangeReason = 12 + // BalanceDecreaseSelfdestruct is deducted from a contract due to self-destruct. + BalanceDecreaseSelfdestruct BalanceChangeReason = 13 + // BalanceDecreaseSelfdestructBurn is ether that is sent to an already self-destructed + // account within the same tx (captured at end of tx). + // Note it doesn't account for a self-destruct which appoints itself as recipient. + BalanceDecreaseSelfdestructBurn BalanceChangeReason = 14 +) + +// GasChangeReason is used to indicate the reason for a gas change, useful +// for tracing and reporting. +// +// There is essentially two types of gas changes, those that can be emitted once per transaction +// and those that can be emitted on a call basis, so possibly multiple times per transaction. +// +// They can be recognized easily by their name, those that start with `GasChangeTx` are emitted +// once per transaction, while those that start with `GasChangeCall` are emitted on a call basis. +type GasChangeReason byte + +const ( + GasChangeUnspecified GasChangeReason = 0 + + // GasChangeTxInitialBalance is the initial balance for the call which will be equal to the gasLimit of the call. There is only + // one such gas change per transaction. + GasChangeTxInitialBalance GasChangeReason = 1 + // GasChangeTxIntrinsicGas is the amount of gas that will be charged for the intrinsic cost of the transaction, there is + // always exactly one of those per transaction. + GasChangeTxIntrinsicGas GasChangeReason = 2 + // GasChangeTxRefunds is the sum of all refunds which happened during the tx execution (e.g. storage slot being cleared) + // this generates an increase in gas. There is at most one of such gas change per transaction. + GasChangeTxRefunds GasChangeReason = 3 + // GasChangeTxLeftOverReturned is the amount of gas left over at the end of transaction's execution that will be returned + // to the chain. This change will always be a negative change as we "drain" left over gas towards 0. If there was no gas + // left at the end of execution, no such even will be emitted. The returned gas's value in Wei is returned to caller. + // There is at most one of such gas change per transaction. + GasChangeTxLeftOverReturned GasChangeReason = 4 + + // GasChangeCallInitialBalance is the initial balance for the call which will be equal to the gasLimit of the call. There is only + // one such gas change per call. + GasChangeCallInitialBalance GasChangeReason = 5 + // GasChangeCallLeftOverReturned is the amount of gas left over that will be returned to the caller, this change will always + // be a negative change as we "drain" left over gas towards 0. If there was no gas left at the end of execution, no such even + // will be emitted. + GasChangeCallLeftOverReturned GasChangeReason = 6 + // GasChangeCallLeftOverRefunded is the amount of gas that will be refunded to the call after the child call execution it + // executed completed. This value is always positive as we are giving gas back to the you, the left over gas of the child. + // If there was no gas left to be refunded, no such even will be emitted. + GasChangeCallLeftOverRefunded GasChangeReason = 7 + // GasChangeCallContractCreation is the amount of gas that will be burned for a CREATE. + GasChangeCallContractCreation GasChangeReason = 8 + // GasChangeContractCreation is the amount of gas that will be burned for a CREATE2. + GasChangeCallContractCreation2 GasChangeReason = 9 + // GasChangeCallCodeStorage is the amount of gas that will be charged for code storage. + GasChangeCallCodeStorage GasChangeReason = 10 + // GasChangeCallOpCode is the amount of gas that will be charged for an opcode executed by the EVM, exact opcode that was + // performed can be check by `OnOpcode` handling. + GasChangeCallOpCode GasChangeReason = 11 + // GasChangeCallPrecompiledContract is the amount of gas that will be charged for a precompiled contract execution. + GasChangeCallPrecompiledContract GasChangeReason = 12 + // GasChangeCallStorageColdAccess is the amount of gas that will be charged for a cold storage access as controlled by EIP2929 rules. + GasChangeCallStorageColdAccess GasChangeReason = 13 + // GasChangeCallFailedExecution is the burning of the remaining gas when the execution failed without a revert. + GasChangeCallFailedExecution GasChangeReason = 14 + + // GasChangeIgnored is a special value that can be used to indicate that the gas change should be ignored as + // it will be "manually" tracked by a direct emit of the gas change event. + GasChangeIgnored GasChangeReason = 0xFF +) diff --git a/core/types/access_list_tx.go b/core/types/access_list_tx.go index f795ba0da0f..c0b659fec12 100644 --- a/core/types/access_list_tx.go +++ b/core/types/access_list_tx.go @@ -504,13 +504,6 @@ func (tx *AccessListTx) GetChainID() *uint256.Int { return tx.ChainID } -func (tx *AccessListTx) cashedSender() (sender libcommon.Address, ok bool) { - s := tx.from.Load() - if s == nil { - return sender, false - } - return s.(libcommon.Address), true -} func (tx *AccessListTx) Sender(signer Signer) (libcommon.Address, error) { if sc := tx.from.Load(); sc != nil { return sc.(libcommon.Address), nil diff --git a/core/types/blob_tx.go b/core/types/blob_tx.go index 7355c0acb72..0953d97d451 100644 --- a/core/types/blob_tx.go +++ b/core/types/blob_tx.go @@ -80,14 +80,6 @@ func (stx *BlobTx) AsMessage(s Signer, baseFee *big.Int, rules *chain.Rules) (Me return msg, err } -func (stx *BlobTx) cashedSender() (sender libcommon.Address, ok bool) { - s := stx.from.Load() - if s == nil { - return sender, false - } - return s.(libcommon.Address), true -} - func (stx *BlobTx) Sender(signer Signer) (libcommon.Address, error) { if sc := stx.from.Load(); sc != nil { return sc.(libcommon.Address), nil @@ -377,7 +369,7 @@ func decodeBlobVersionedHashes(hashes *[]libcommon.Hash, s *rlp.Stream) error { copy((_hash)[:], b) *hashes = append(*hashes, _hash) } else { - return fmt.Errorf("wrong size for blobVersionedHashes: %d", len(b)) + return fmt.Errorf("wrong size for blobVersionedHashes: %d, %v", len(b), b[0]) } } diff --git a/core/types/blob_tx_wrapper.go b/core/types/blob_tx_wrapper.go index d7cd8781cf8..2565da90770 100644 --- a/core/types/blob_tx_wrapper.go +++ b/core/types/blob_tx_wrapper.go @@ -331,8 +331,6 @@ func (txw *BlobTxWrapper) RawSignatureValues() (*uint256.Int, *uint256.Int, *uin return txw.Tx.RawSignatureValues() } -func (txw *BlobTxWrapper) cashedSender() (libcommon.Address, bool) { return txw.Tx.cashedSender() } - func (txw *BlobTxWrapper) Sender(s Signer) (libcommon.Address, error) { return txw.Tx.Sender(s) } func (txw *BlobTxWrapper) GetSender() (libcommon.Address, bool) { return txw.Tx.GetSender() } diff --git a/core/types/block.go b/core/types/block.go index fb0282e67d2..f35ef67901f 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -105,8 +105,6 @@ type Header struct { ParentBeaconBlockRoot *libcommon.Hash `json:"parentBeaconBlockRoot"` // EIP-4788 - RequestsRoot *libcommon.Hash `json:"requestsRoot"` // EIP-7685 - // The verkle proof is ignored in legacy headers Verkle bool VerkleProof []byte @@ -163,10 +161,6 @@ func (h *Header) EncodingSize() int { encodingSize += 33 } - if h.RequestsRoot != nil { - encodingSize += 33 - } - if h.Verkle { // Encoding of Verkle Proof encodingSize += rlp2.StringLen(h.VerkleProof) @@ -316,16 +310,6 @@ func (h *Header) EncodeRLP(w io.Writer) error { } } - if h.RequestsRoot != nil { - b[0] = 128 + 32 - if _, err := w.Write(b[:1]); err != nil { - return err - } - if _, err := w.Write(h.RequestsRoot.Bytes()); err != nil { - return err - } - } - if h.Verkle { if err := rlp.EncodeString(h.VerkleProof, w, b[:]); err != nil { return err @@ -514,23 +498,6 @@ func (h *Header) DecodeRLP(s *rlp.Stream) error { h.ParentBeaconBlockRoot = new(libcommon.Hash) h.ParentBeaconBlockRoot.SetBytes(b) - // RequestsRoot - if b, err = s.Bytes(); err != nil { - if errors.Is(err, rlp.EOL) { - h.RequestsRoot = nil - if err := s.ListEnd(); err != nil { - return fmt.Errorf("close header struct (no RequestsRoot): %w", err) - } - return nil - } - return fmt.Errorf("read RequestsRoot: %w", err) - } - if len(b) != 32 { - return fmt.Errorf("wrong size for RequestsRoot: %d", len(b)) - } - h.RequestsRoot = new(libcommon.Hash) - h.RequestsRoot.SetBytes(b) - if h.Verkle { if h.VerkleProof, err = s.Bytes(); err != nil { return fmt.Errorf("read VerkleProof: %w", err) @@ -590,9 +557,6 @@ func (h *Header) Size() common.StorageSize { if h.ParentBeaconBlockRoot != nil { s += common.StorageSize(32) } - if h.RequestsRoot != nil { - s += common.StorageSize(32) - } return s } @@ -627,7 +591,6 @@ type Body struct { Transactions []Transaction Uncles []*Header Withdrawals []*Withdrawal - Requests []*Request } // RawBody is semi-parsed variant of Body, where transactions are still unparsed RLP strings @@ -637,7 +600,6 @@ type RawBody struct { Transactions [][]byte Uncles []*Header Withdrawals []*Withdrawal - Requests []*Request } type BodyForStorage struct { @@ -645,7 +607,6 @@ type BodyForStorage struct { TxAmount uint32 Uncles []*Header Withdrawals []*Withdrawal - Requests []*Request } // Alternative representation of the Block. @@ -677,7 +638,6 @@ type Block struct { uncles []*Header transactions Transactions withdrawals []*Withdrawal - requests []*Request // caches hash atomic.Value @@ -706,11 +666,11 @@ func (b *Body) SendersFromTxs() []libcommon.Address { } func (rb RawBody) EncodingSize() int { - payloadSize, _, _, _, _ := rb.payloadSize() + payloadSize, _, _, _ := rb.payloadSize() return payloadSize } -func (rb RawBody) payloadSize() (payloadSize, txsLen, unclesLen, withdrawalsLen, requestsLen int) { +func (rb RawBody) payloadSize() (payloadSize, txsLen, unclesLen, withdrawalsLen int) { // size of Transactions for _, tx := range rb.Transactions { txsLen += len(tx) @@ -727,17 +687,11 @@ func (rb RawBody) payloadSize() (payloadSize, txsLen, unclesLen, withdrawalsLen, payloadSize += rlp2.ListPrefixLen(withdrawalsLen) + withdrawalsLen } - // size of requests - if rb.Requests != nil { - requestsLen += encodingSizeGeneric(rb.Requests) - payloadSize += rlp2.ListPrefixLen(requestsLen) + requestsLen - } - - return payloadSize, txsLen, unclesLen, withdrawalsLen, requestsLen + return payloadSize, txsLen, unclesLen, withdrawalsLen } func (rb RawBody) EncodeRLP(w io.Writer) error { - payloadSize, txsLen, unclesLen, withdrawalsLen, requestsLen := rb.payloadSize() + payloadSize, txsLen, unclesLen, withdrawalsLen := rb.payloadSize() var b [33]byte // prefix if err := EncodeStructSizePrefix(payloadSize, w, b[:]); err != nil { @@ -762,12 +716,6 @@ func (rb RawBody) EncodeRLP(w io.Writer) error { return err } } - // encode Requests - if rb.Requests != nil { - if err := encodeRLPGeneric(rb.Requests, requestsLen, w, b[:]); err != nil { - return err - } - } return nil } @@ -803,16 +751,11 @@ func (rb *RawBody) DecodeRLP(s *rlp.Stream) error { if err := decodeWithdrawals(&rb.Withdrawals, s); err != nil { return err } - // decode Requests - rb.Requests = []*Request{} - if err := decodeRequests(&rb.Requests, s); err != nil { - return err - } return s.ListEnd() } -func (bfs BodyForStorage) payloadSize() (payloadSize, unclesLen, withdrawalsLen, requestsLen int) { +func (bfs BodyForStorage) payloadSize() (payloadSize, unclesLen, withdrawalsLen int) { baseTxIdLen := 1 + rlp.IntLenExcludingHead(bfs.BaseTxId) txAmountLen := 1 + rlp.IntLenExcludingHead(uint64(bfs.TxAmount)) @@ -829,17 +772,11 @@ func (bfs BodyForStorage) payloadSize() (payloadSize, unclesLen, withdrawalsLen, payloadSize += rlp2.ListPrefixLen(withdrawalsLen) + withdrawalsLen } - // size of Requests - if bfs.Requests != nil { - requestsLen += encodingSizeGeneric(bfs.Requests) - payloadSize += rlp2.ListPrefixLen(requestsLen) + requestsLen - } - - return payloadSize, unclesLen, withdrawalsLen, requestsLen + return payloadSize, unclesLen, withdrawalsLen } func (bfs BodyForStorage) EncodeRLP(w io.Writer) error { - payloadSize, unclesLen, withdrawalsLen, requestsLen := bfs.payloadSize() + payloadSize, unclesLen, withdrawalsLen := bfs.payloadSize() var b [33]byte // prefix @@ -868,12 +805,6 @@ func (bfs BodyForStorage) EncodeRLP(w io.Writer) error { return err } } - // encode Requests - if bfs.Requests != nil { - if err := encodeRLPGeneric(bfs.Requests, requestsLen, w, b[:]); err != nil { - return err - } - } return nil } @@ -900,20 +831,16 @@ func (bfs *BodyForStorage) DecodeRLP(s *rlp.Stream) error { if err := decodeWithdrawals(&bfs.Withdrawals, s); err != nil { return err } - // decode Requests - bfs.Requests = []*Request{} - if err := decodeRequests(&bfs.Requests, s); err != nil { - return err - } + return s.ListEnd() } func (bb Body) EncodingSize() int { - payloadSize, _, _, _, _ := bb.payloadSize() + payloadSize, _, _, _ := bb.payloadSize() return payloadSize } -func (bb Body) payloadSize() (payloadSize int, txsLen, unclesLen, withdrawalsLen, requestsLen int) { +func (bb Body) payloadSize() (payloadSize int, txsLen, unclesLen, withdrawalsLen int) { // size of Transactions txsLen += encodingSizeGeneric(bb.Transactions) payloadSize += rlp2.ListPrefixLen(txsLen) + txsLen @@ -928,17 +855,11 @@ func (bb Body) payloadSize() (payloadSize int, txsLen, unclesLen, withdrawalsLen payloadSize += rlp2.ListPrefixLen(withdrawalsLen) + withdrawalsLen } - // size of Requests - if bb.Requests != nil { - requestsLen += encodingSizeGeneric(bb.Requests) - payloadSize += rlp2.ListPrefixLen(requestsLen) + requestsLen - } - - return payloadSize, txsLen, unclesLen, withdrawalsLen, requestsLen + return payloadSize, txsLen, unclesLen, withdrawalsLen } func (bb Body) EncodeRLP(w io.Writer) error { - payloadSize, txsLen, unclesLen, withdrawalsLen, requestsLen := bb.payloadSize() + payloadSize, txsLen, unclesLen, withdrawalsLen := bb.payloadSize() var b [33]byte // prefix if err := EncodeStructSizePrefix(payloadSize, w, b[:]); err != nil { @@ -958,12 +879,6 @@ func (bb Body) EncodeRLP(w io.Writer) error { return err } } - // encode Requests - if bb.Requests != nil { - if err := encodeRLPGeneric(bb.Requests, requestsLen, w, b[:]); err != nil { - return err - } - } return nil } @@ -985,10 +900,6 @@ func (bb *Body) DecodeRLP(s *rlp.Stream) error { if err := decodeWithdrawals(&bb.Withdrawals, s); err != nil { return err } - // decode Requests - if err := decodeRequests(&bb.Requests, s); err != nil { - return err - } return s.ListEnd() } @@ -999,7 +910,7 @@ func (bb *Body) DecodeRLP(s *rlp.Stream) error { // The values of TxHash, UncleHash, ReceiptHash, Bloom, and WithdrawalHash // in the header are ignored and set to the values derived from // the given txs, uncles, receipts, and withdrawals. -func NewBlock(header *Header, txs []Transaction, uncles []*Header, receipts []*Receipt, withdrawals []*Withdrawal, requests []*Request) *Block { +func NewBlock(header *Header, txs []Transaction, uncles []*Header, receipts []*Receipt, withdrawals []*Withdrawal) *Block { b := &Block{header: CopyHeader(header)} // TODO: panic if len(txs) != len(receipts) @@ -1046,28 +957,13 @@ func NewBlock(header *Header, txs []Transaction, uncles []*Header, receipts []*R b.header.ParentBeaconBlockRoot = header.ParentBeaconBlockRoot - if requests == nil { - b.header.RequestsRoot = nil - } else if len(requests) == 0 { - b.header.RequestsRoot = &EmptyRootHash // TODO(racytech): is this correct? - b.requests = make(Requests, len(requests)) - } else { - h := DeriveSha(Requests(requests)) - b.header.RequestsRoot = &h - b.requests = make(Requests, len(requests)) - for i, r := range requests { - rCopy := *r - b.requests[i] = &rCopy - } - } - return b } // NewBlockFromStorage like NewBlock but used to create Block object when read it from DB // in this case no reason to copy parts, or re-calculate headers fields - they are all stored in DB -func NewBlockFromStorage(hash libcommon.Hash, header *Header, txs []Transaction, uncles []*Header, withdrawals []*Withdrawal, requests []*Request) *Block { - b := &Block{header: header, transactions: txs, uncles: uncles, withdrawals: withdrawals, requests: requests} +func NewBlockFromStorage(hash libcommon.Hash, header *Header, txs []Transaction, uncles []*Header, withdrawals []*Withdrawal) *Block { + b := &Block{header: header, transactions: txs, uncles: uncles, withdrawals: withdrawals} b.hash.Store(hash) return b } @@ -1087,7 +983,6 @@ func NewBlockFromNetwork(header *Header, body *Body) *Block { transactions: body.Transactions, uncles: body.Uncles, withdrawals: body.Withdrawals, - requests: body.Requests, } } @@ -1129,10 +1024,6 @@ func CopyHeader(h *Header) *Header { cpy.ParentBeaconBlockRoot = new(libcommon.Hash) cpy.ParentBeaconBlockRoot.SetBytes(h.ParentBeaconBlockRoot.Bytes()) } - if h.RequestsRoot != nil { - cpy.RequestsRoot = new(libcommon.Hash) - cpy.RequestsRoot.SetBytes(h.RequestsRoot.Bytes()) - } return &cpy } @@ -1164,16 +1055,11 @@ func (bb *Block) DecodeRLP(s *rlp.Stream) error { if err := decodeWithdrawals(&bb.withdrawals, s); err != nil { return err } - // decode Requests - bb.requests = []*Request{} - if err := decodeRequests(&bb.requests, s); err != nil { - return err - } return s.ListEnd() } -func (bb Block) payloadSize() (payloadSize int, txsLen, unclesLen, withdrawalsLen, requestsLen int) { +func (bb Block) payloadSize() (payloadSize int, txsLen, unclesLen, withdrawalsLen int) { // size of Header headerLen := bb.header.EncodingSize() payloadSize += rlp2.ListPrefixLen(headerLen) + headerLen @@ -1192,23 +1078,17 @@ func (bb Block) payloadSize() (payloadSize int, txsLen, unclesLen, withdrawalsLe payloadSize += rlp2.ListPrefixLen(withdrawalsLen) + withdrawalsLen } - // size of Requests - if bb.requests != nil { - requestsLen += encodingSizeGeneric(bb.requests) - payloadSize += rlp2.ListPrefixLen(requestsLen) + requestsLen - } - - return payloadSize, txsLen, unclesLen, withdrawalsLen, requestsLen + return payloadSize, txsLen, unclesLen, withdrawalsLen } func (bb Block) EncodingSize() int { - payloadSize, _, _, _, _ := bb.payloadSize() + payloadSize, _, _, _ := bb.payloadSize() return payloadSize } // EncodeRLP serializes b into the Ethereum RLP block format. func (bb Block) EncodeRLP(w io.Writer) error { - payloadSize, txsLen, unclesLen, withdrawalsLen, requestsLen := bb.payloadSize() + payloadSize, txsLen, unclesLen, withdrawalsLen := bb.payloadSize() var b [33]byte // prefix if err := EncodeStructSizePrefix(payloadSize, w, b[:]); err != nil { @@ -1232,12 +1112,6 @@ func (bb Block) EncodeRLP(w io.Writer) error { return err } } - // encode Requests - if bb.requests != nil { - if err := encodeRLPGeneric(bb.requests, requestsLen, w, b[:]); err != nil { - return err - } - } return nil } @@ -1280,8 +1154,6 @@ func (b *Block) BaseFee() *big.Int { func (b *Block) WithdrawalsHash() *libcommon.Hash { return b.header.WithdrawalsHash } func (b *Block) Withdrawals() Withdrawals { return b.withdrawals } func (b *Block) ParentBeaconBlockRoot() *libcommon.Hash { return b.header.ParentBeaconBlockRoot } -func (b *Block) RequestsRoot() *libcommon.Hash { return b.header.RequestsRoot } -func (b *Block) Requests() Requests { return b.requests } // Header returns a deep-copy of the entire block header using CopyHeader() func (b *Block) Header() *Header { return CopyHeader(b.header) } @@ -1289,7 +1161,7 @@ func (b *Block) HeaderNoCopy() *Header { return b.header } // Body returns the non-header content of the block. func (b *Block) Body() *Body { - bd := &Body{Transactions: b.transactions, Uncles: b.uncles, Withdrawals: b.withdrawals, Requests: b.requests} + bd := &Body{Transactions: b.transactions, Uncles: b.uncles, Withdrawals: b.withdrawals} bd.SendersFromTxs() return bd } @@ -1574,25 +1446,6 @@ func decodeWithdrawals(appendList *[]*Withdrawal, s *rlp.Stream) error { return checkErrListEnd(s, err) } -func decodeRequests(appendList *[]*Request, s *rlp.Stream) error { - var err error - if _, err = s.List(); err != nil { - if errors.Is(err, rlp.EOL) { - *appendList = nil - return nil - } - return fmt.Errorf("read requests: %v", err) - } - for err == nil { - var r Request - if err = r.DecodeRLP(s); err != nil { - break - } - *appendList = append(*appendList, &r) - } - return checkErrListEnd(s, err) -} - func checkErrListEnd(s *rlp.Stream, err error) error { if !errors.Is(err, rlp.EOL) { return err diff --git a/core/types/block_test.go b/core/types/block_test.go index 7d7ac4a4da3..9db421134ac 100644 --- a/core/types/block_test.go +++ b/core/types/block_test.go @@ -358,7 +358,7 @@ func makeBenchBlock() *Block { Extra: []byte("benchmark uncle"), } } - return NewBlock(header, txs, uncles, receipts, nil /* withdrawals */, nil /*requests*/) + return NewBlock(header, txs, uncles, receipts, nil /* withdrawals */) } func TestCanEncodeAndDecodeRawBody(t *testing.T) { @@ -506,7 +506,7 @@ func TestWithdrawalsEncoding(t *testing.T) { Amount: 5_000_000_000, } - block := NewBlock(&header, nil, nil, nil, withdrawals, nil /*requests*/) + block := NewBlock(&header, nil, nil, nil, withdrawals) _ = block.Size() encoded, err := rlp.EncodeToBytes(block) @@ -518,7 +518,7 @@ func TestWithdrawalsEncoding(t *testing.T) { assert.Equal(t, block, &decoded) // Now test with empty withdrawals - block2 := NewBlock(&header, nil, nil, nil, []*Withdrawal{}, nil /*requests*/) + block2 := NewBlock(&header, nil, nil, nil, []*Withdrawal{}) _ = block2.Size() encoded2, err := rlp.EncodeToBytes(block2) diff --git a/core/types/deposit.go b/core/types/deposit.go deleted file mode 100644 index 00b25a66080..00000000000 --- a/core/types/deposit.go +++ /dev/null @@ -1,111 +0,0 @@ -package types - -import ( - "bytes" - "encoding/binary" - "fmt" - - libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon/accounts/abi" - "github.com/ledgerwatch/erigon/rlp" -) - -const ( - pLen = 48 // pubkey size - wLen = 32 // withdrawalCredentials size - sLen = 96 // signature size -) - -var ( - // DepositABI is an ABI instance of beacon chain deposit events. - DepositABI = abi.ABI{Events: map[string]abi.Event{"DepositEvent": depositEvent}} - bytesT, _ = abi.NewType("bytes", "", nil) - depositEvent = abi.NewEvent("DepositEvent", "DepositEvent", false, abi.Arguments{ - {Name: "pubkey", Type: bytesT, Indexed: false}, - {Name: "withdrawal_credentials", Type: bytesT, Indexed: false}, - {Name: "amount", Type: bytesT, Indexed: false}, - {Name: "signature", Type: bytesT, Indexed: false}, - {Name: "index", Type: bytesT, Indexed: false}}, - ) -) - -type Deposit struct { - Pubkey [pLen]byte `json:"pubkey"` // public key of validator - WithdrawalCredentials libcommon.Hash `json:"withdrawalCredentials"` // beneficiary of the validator - Amount uint64 `json:"amount"` // deposit size in Gwei - Signature [sLen]byte `json:"signature"` // signature over deposit msg - Index uint64 `json:"index"` // deposit count value -} - -func (d *Deposit) requestType() byte { return DepositRequestType } -func (d *Deposit) encodeRLP(w *bytes.Buffer) error { return rlp.Encode(w, d) } -func (d *Deposit) decodeRLP(data []byte) error { return rlp.DecodeBytes(data, d) } -func (d *Deposit) copy() RequestData { - return &Deposit{ - Pubkey: d.Pubkey, - WithdrawalCredentials: d.WithdrawalCredentials, - Amount: d.Amount, - Signature: d.Signature, - Index: d.Index, - } -} - -func (d *Deposit) encodingSize() (encodingSize int) { - encodingSize++ - encodingSize += rlp.IntLenExcludingHead(d.Amount) - encodingSize++ - encodingSize += rlp.IntLenExcludingHead(d.Index) - - encodingSize += 180 // 1 + 48 + 1 + 32 + 1 + 1 + 96 (0x80 + pLen, 0x80 + wLen, 0xb8 + 2 + sLen) - return encodingSize -} - -// field type overrides for abi upacking -type depositUnpacking struct { - Pubkey []byte - WithdrawalCredentials []byte - Amount []byte - Signature []byte - Index []byte -} - -// unpackIntoDeposit unpacks a serialized DepositEvent. -func unpackIntoDeposit(data []byte) (*Deposit, error) { - var du depositUnpacking - if err := DepositABI.UnpackIntoInterface(&du, "DepositEvent", data); err != nil { - return nil, err - } - var d Deposit - copy(d.Pubkey[:], du.Pubkey) - copy(d.WithdrawalCredentials[:], du.WithdrawalCredentials) - d.Amount = binary.LittleEndian.Uint64(du.Amount) - copy(d.Signature[:], du.Signature) - d.Index = binary.LittleEndian.Uint64(du.Index) - - return &d, nil -} - -// ParseDepositLogs extracts the EIP-6110 deposit values from logs emitted by -// BeaconDepositContract. -func ParseDepositLogs(logs []*Log, depositContractAddress *libcommon.Address) (Requests, error) { - var deposits Requests - for _, log := range logs { - if log.Address == *depositContractAddress { - d, err := unpackIntoDeposit(log.Data) - if err != nil { - return nil, fmt.Errorf("unable to parse deposit data: %v", err) - } - deposits = append(deposits, NewRequest(d)) - } - } - return deposits, nil -} - -type Deposits []*Deposit - -func (ds Deposits) ToRequests() (reqs Requests) { - for _, d := range ds { - reqs = append(reqs, NewRequest(d)) - } - return -} diff --git a/core/types/dynamic_fee_tx.go b/core/types/dynamic_fee_tx.go index f41fb0402ad..11e4ec8b45b 100644 --- a/core/types/dynamic_fee_tx.go +++ b/core/types/dynamic_fee_tx.go @@ -426,13 +426,6 @@ func (tx *DynamicFeeTransaction) GetChainID() *uint256.Int { return tx.ChainID } -func (tx *DynamicFeeTransaction) cashedSender() (sender libcommon.Address, ok bool) { - s := tx.from.Load() - if s == nil { - return sender, false - } - return s.(libcommon.Address), true -} func (tx *DynamicFeeTransaction) Sender(signer Signer) (libcommon.Address, error) { if sc := tx.from.Load(); sc != nil { return sc.(libcommon.Address), nil diff --git a/core/types/encdec_test.go b/core/types/encdec_test.go index 082bc8245cf..97951782588 100644 --- a/core/types/encdec_test.go +++ b/core/types/encdec_test.go @@ -69,23 +69,6 @@ func (tr *TRand) RandWithdrawal() *Withdrawal { } } -func (tr *TRand) RandDeposit() *Deposit { - return &Deposit{ - Pubkey: [48]byte(tr.RandBytes(48)), - WithdrawalCredentials: tr.RandHash(), - Amount: *tr.RandUint64(), - Signature: [96]byte(tr.RandBytes(96)), - Index: *tr.RandUint64(), - } -} - -func (tr *TRand) RandRequest() *Request { - d := tr.RandDeposit() - var r Request - r.inner = d.copy() - return &r -} - func (tr *TRand) RandHeader() *Header { wHash := tr.RandHash() pHash := tr.RandHash() @@ -227,21 +210,11 @@ func (tr *TRand) RandWithdrawals(size int) []*Withdrawal { } return withdrawals } - -func (tr *TRand) RandRequests(size int) []*Request { - requests := make([]*Request, size) - for i := 0; i < size; i++ { - requests[i] = tr.RandRequest() - } - return requests -} - func (tr *TRand) RandRawBody() *RawBody { return &RawBody{ Transactions: tr.RandRawTransactions(tr.RandIntInRange(1, 6)), Uncles: tr.RandHeaders(tr.RandIntInRange(1, 6)), Withdrawals: tr.RandWithdrawals(tr.RandIntInRange(1, 6)), - Requests: tr.RandRequests(tr.RandIntInRange(1, 6)), } } @@ -268,7 +241,6 @@ func (tr *TRand) RandBody() *Body { Transactions: tr.RandTransactions(tr.RandIntInRange(1, 6)), Uncles: tr.RandHeaders(tr.RandIntInRange(1, 6)), Withdrawals: tr.RandWithdrawals(tr.RandIntInRange(1, 6)), - Requests: tr.RandRequests(tr.RandIntInRange(1, 6)), } } @@ -282,13 +254,13 @@ func isEqualBytes(a, b []byte) bool { return true } -func check(t *testing.T, f string, want, got interface{}) { - if !reflect.DeepEqual(want, got) { - t.Errorf("%s mismatch: want %v, got %v", f, want, got) +func check(t *testing.T, f string, got, want interface{}) { + if !reflect.DeepEqual(got, want) { + t.Errorf("%s mismatch: got %v, want %v", f, got, want) } } -func checkHeaders(t *testing.T, a, b *Header) { +func compareHeaders(t *testing.T, a, b *Header) { check(t, "Header.ParentHash", a.ParentHash, b.ParentHash) check(t, "Header.UncleHash", a.UncleHash, b.UncleHash) check(t, "Header.Coinbase", a.Coinbase, b.Coinbase) @@ -311,7 +283,7 @@ func checkHeaders(t *testing.T, a, b *Header) { check(t, "Header.ParentBeaconBlockRoot", a.ParentBeaconBlockRoot, b.ParentBeaconBlockRoot) } -func checkWithdrawals(t *testing.T, a, b *Withdrawal) { +func compareWithdrawals(t *testing.T, a, b *Withdrawal) { check(t, "Withdrawal.Index", a.Index, b.Index) check(t, "Withdrawal.Validator", a.Validator, b.Validator) check(t, "Withdrawal.Address", a.Address, b.Address) @@ -339,99 +311,85 @@ func compareTransactions(t *testing.T, a, b Transaction) { check(t, "Tx.S", s1, s2) } -func compareDeposits(t *testing.T, a, b *Deposit) { - check(t, "Deposit.Pubkey", a.Pubkey, b.Pubkey) - check(t, "Deposit.WithdrawalCredentials", a.WithdrawalCredentials, b.WithdrawalCredentials) - check(t, "Deposit.Amount", a.Amount, b.Amount) - check(t, "Deposit.Signature", a.Signature, b.Signature) - check(t, "Deposit.Index", a.Index, b.Index) -} +// func compareDeposits(t *testing.T, a, b *Deposit) { +// check(t, "Deposit.Pubkey", a.Index, b.Index) +// check(t, "Deposit.WithdrawalCredentials", a.WithdrawalCredentials, b.WithdrawalCredentials) +// check(t, "Deposit.Amount", a.Amount, b.Amount) +// check(t, "Deposit.Signature", a.Signature, b.Signature) +// check(t, "Deposit.Index", a.Index, b.Index) +// } + +func compareRawBodies(t *testing.T, a, b *RawBody) error { -func checkRequests(t *testing.T, a, b *Request) { - if a.Type() != b.Type() { - t.Errorf("request type mismatch: request-a: %v, request-b: %v", a.Type(), b.Type()) + atLen, btLen := len(a.Transactions), len(b.Transactions) + if atLen != btLen { + return fmt.Errorf("transactions len mismatch: expected: %v, got: %v", atLen, btLen) } - switch a.Type() { - case DepositRequestType: - c := a.inner.(*Deposit) - d := b.inner.(*Deposit) - compareDeposits(t, c, d) - default: - t.Errorf("unknown request type: %v", a.Type()) + for i := 0; i < atLen; i++ { + if !isEqualBytes(a.Transactions[i], b.Transactions[i]) { + return fmt.Errorf("byte transactions are not equal") + } } -} -func compareHeaders(t *testing.T, a, b []*Header) error { - auLen, buLen := len(a), len(b) + auLen, buLen := len(a.Uncles), len(b.Uncles) if auLen != buLen { return fmt.Errorf("uncles len mismatch: expected: %v, got: %v", auLen, buLen) } for i := 0; i < auLen; i++ { - checkHeaders(t, a[i], b[i]) + compareHeaders(t, a.Uncles[i], b.Uncles[i]) } - return nil -} -func compareWithdrawals(t *testing.T, a, b []*Withdrawal) error { - awLen, bwLen := len(a), len(b) + awLen, bwLen := len(a.Withdrawals), len(b.Withdrawals) if awLen != bwLen { - return fmt.Errorf("withdrawals len mismatch: expected: %v, got: %v", awLen, bwLen) + return fmt.Errorf("withdrawals len mismatch: expected: %v, got: %v", auLen, buLen) } for i := 0; i < awLen; i++ { - checkWithdrawals(t, a[i], b[i]) - } - return nil -} - -func compareRequests(t *testing.T, a, b []*Request) error { - arLen, brLen := len(a), len(b) - if arLen != brLen { - return fmt.Errorf("requests len mismatch: expected: %v, got: %v", arLen, brLen) + compareWithdrawals(t, a.Withdrawals[i], b.Withdrawals[i]) } - for i := 0; i < arLen; i++ { - checkRequests(t, a[i], b[i]) - } return nil } -func compareRawBodies(t *testing.T, a, b *RawBody) error { +func compareBodies(t *testing.T, a, b *Body) error { atLen, btLen := len(a.Transactions), len(b.Transactions) if atLen != btLen { - return fmt.Errorf("transactions len mismatch: expected: %v, got: %v", atLen, btLen) + return fmt.Errorf("txns len mismatch: expected: %v, got: %v", atLen, btLen) } for i := 0; i < atLen; i++ { - if !isEqualBytes(a.Transactions[i], b.Transactions[i]) { - return fmt.Errorf("byte transactions are not equal") - } + compareTransactions(t, a.Transactions[i], b.Transactions[i]) } - compareHeaders(t, a.Uncles, b.Uncles) - compareWithdrawals(t, a.Withdrawals, b.Withdrawals) - compareRequests(t, a.Requests, b.Requests) - - return nil -} + auLen, buLen := len(a.Uncles), len(b.Uncles) + if auLen != buLen { + return fmt.Errorf("uncles len mismatch: expected: %v, got: %v", auLen, buLen) + } -func compareBodies(t *testing.T, a, b *Body) error { + for i := 0; i < auLen; i++ { + compareHeaders(t, a.Uncles[i], b.Uncles[i]) + } - atLen, btLen := len(a.Transactions), len(b.Transactions) - if atLen != btLen { - return fmt.Errorf("txns len mismatch: expected: %v, got: %v", atLen, btLen) + awLen, bwLen := len(a.Withdrawals), len(b.Withdrawals) + if awLen != bwLen { + return fmt.Errorf("withdrawals len mismatch: expected: %v, got: %v", awLen, bwLen) } - for i := 0; i < atLen; i++ { - compareTransactions(t, a.Transactions[i], b.Transactions[i]) + for i := 0; i < awLen; i++ { + compareWithdrawals(t, a.Withdrawals[i], b.Withdrawals[i]) } - compareHeaders(t, a.Uncles, b.Uncles) - compareWithdrawals(t, a.Withdrawals, b.Withdrawals) - compareRequests(t, a.Requests, b.Requests) + // adLen, bdLen := len(a.deposits), len(b.deposits) + // if adLen != bdLen { + // return fmt.Errorf("deposits len mismatch: expected: %v, got: %v", adLen, bdLen) + // } + + // for i := 0; i < adLen; i++ { + // compareDeposits(t, a.deposits[i], b.deposits[i]) + // } return nil } @@ -478,27 +436,7 @@ func TestBodyEncodeDecodeRLP(t *testing.T) { } if err := compareBodies(t, enc, dec); err != nil { - t.Errorf("error: compareBodies: %v", err) - } - } -} - -func TestDepositEncodeDecode(t *testing.T) { - tr := NewTRand() - var buf bytes.Buffer - for i := 0; i < RUNS; i++ { - enc := tr.RandRequest() - buf.Reset() - if err := enc.EncodeRLP(&buf); err != nil { - t.Errorf("error: deposit.EncodeRLP(): %v", err) - } - s := rlp.NewStream(bytes.NewReader(buf.Bytes()), 0) - dec := &Request{} - if err := dec.DecodeRLP(s); err != nil { - t.Errorf("error: Deposit.DecodeRLP(): %v", err) + t.Errorf("error: compareRawBodies: %v", err) } - a := enc.inner.(*Deposit) - b := dec.inner.(*Deposit) - compareDeposits(t, a, b) } } diff --git a/core/types/legacy_tx.go b/core/types/legacy_tx.go index efe0d7ed583..21139d9aa22 100644 --- a/core/types/legacy_tx.go +++ b/core/types/legacy_tx.go @@ -289,11 +289,10 @@ func (tx *LegacyTx) EncodeRLP(w io.Writer) error { return nil } -func (tx *LegacyTx) DecodeRLP(s *rlp.Stream) error { - _, err := s.List() - if err != nil { - return fmt.Errorf("legacy tx must be a list: %w", err) - } +// DecodeRLP decodes LegacyTx but with the list token already consumed and encodingSize being presented +func (tx *LegacyTx) DecodeRLP(s *rlp.Stream, encodingSize uint64) error { + var err error + s.NewList(encodingSize) if tx.Nonce, err = s.Uint(); err != nil { return fmt.Errorf("read Nonce: %w", err) } @@ -431,13 +430,6 @@ func (tx *LegacyTx) GetChainID() *uint256.Int { return DeriveChainId(&tx.V) } -func (tx *LegacyTx) cashedSender() (sender libcommon.Address, ok bool) { - s := tx.from.Load() - if s == nil { - return sender, false - } - return s.(libcommon.Address), true -} func (tx *LegacyTx) Sender(signer Signer) (libcommon.Address, error) { if sc := tx.from.Load(); sc != nil { return sc.(libcommon.Address), nil diff --git a/core/types/receipt.go b/core/types/receipt.go index e8378b966c5..8741cbc6eae 100644 --- a/core/types/receipt.go +++ b/core/types/receipt.go @@ -32,6 +32,14 @@ import ( // go:generate gencodec -type Receipt -field-override receiptMarshaling -out gen_receipt_json.go +// disabling codecgen generation since it does not work for go1.22 +// to get it working need to update github.com/ugorji/go/codec to v1.2.12 which has the fix: +// - https://github.com/ugorji/go/commit/8286c2dc986535d23e3fad8d3e816b9dd1e5aea6 +// however updating the lib has caused us issues in the past, and we don't have good unit test coverage for updating atm +// we also use this for storing Receipts and Logs in the DB - we won't be doing that in Erigon 3 +// do not regen, more context: https://github.com/ledgerwatch/erigon/pull/10105#pullrequestreview-2027423601 +// go:generate codecgen -o receipt_codecgen_gen.go -r "^Receipts$|^Receipt$|^Logs$|^Log$" -st "codec" -j=false -nx=true -ta=true -oe=false -d 2 receipt.go log.go + var ( receiptStatusFailedRLP = []byte{} receiptStatusSuccessfulRLP = []byte{0x01} @@ -52,23 +60,21 @@ type Receipt struct { Type uint8 `json:"type,omitempty"` PostState []byte `json:"root" codec:"1"` Status uint64 `json:"status" codec:"2"` - CumulativeGasUsed uint64 `json:"cumulativeGasUsed" gencodec:"required"` - Bloom Bloom `json:"logsBloom" gencodec:"required"` - Logs Logs `json:"logs" gencodec:"required"` + CumulativeGasUsed uint64 `json:"cumulativeGasUsed" gencodec:"required" codec:"3"` + Bloom Bloom `json:"logsBloom" gencodec:"required" codec:"-"` + Logs Logs `json:"logs" gencodec:"required" codec:"-"` // Implementation fields: These fields are added by geth when processing a transaction. // They are stored in the chain database. - TxHash libcommon.Hash `json:"transactionHash" gencodec:"required"` - ContractAddress libcommon.Address `json:"contractAddress"` - GasUsed uint64 `json:"gasUsed" gencodec:"required"` + TxHash libcommon.Hash `json:"transactionHash" gencodec:"required" codec:"-"` + ContractAddress libcommon.Address `json:"contractAddress" codec:"-"` + GasUsed uint64 `json:"gasUsed" gencodec:"required" codec:"-"` // Inclusion information: These fields provide information about the inclusion of the // transaction corresponding to this receipt. - BlockHash libcommon.Hash `json:"blockHash,omitempty"` - BlockNumber *big.Int `json:"blockNumber,omitempty"` - TransactionIndex uint `json:"transactionIndex"` - - firstLogIndex uint32 `json:"-"` // field which used to store in db and re-calc + BlockHash libcommon.Hash `json:"blockHash,omitempty" codec:"-"` + BlockNumber *big.Int `json:"blockNumber,omitempty" codec:"-"` + TransactionIndex uint `json:"transactionIndex" codec:"-"` } type receiptMarshaling struct { @@ -93,7 +99,28 @@ type receiptRLP struct { type storedReceiptRLP struct { PostStateOrStatus []byte CumulativeGasUsed uint64 - FirstLogIndex uint32 // Logs have their own incremental Index within block. To allow calc it without re-executing whole block - can store it in Receipt + Logs []*LogForStorage +} + +// v4StoredReceiptRLP is the storage encoding of a receipt used in database version 4. +type v4StoredReceiptRLP struct { + PostStateOrStatus []byte + CumulativeGasUsed uint64 + TxHash libcommon.Hash + ContractAddress libcommon.Address + Logs []*LogForStorage + GasUsed uint64 +} + +// v3StoredReceiptRLP is the original storage encoding of a receipt including some unnecessary fields. +type v3StoredReceiptRLP struct { + PostStateOrStatus []byte + CumulativeGasUsed uint64 + //Bloom Bloom + //TxHash libcommon.Hash + ContractAddress libcommon.Address + Logs []*LogForStorage + GasUsed uint64 } // NewReceipt creates a barebone transaction receipt, copying the init fields. @@ -301,45 +328,99 @@ func (r *Receipt) Copy() *Receipt { type ReceiptsForStorage []*ReceiptForStorage -// ReceiptForStorage is a wrapper around a Receipt with RLP serialization -// that omits the Bloom field and deserialization that re-computes it. +// ReceiptForStorage is a wrapper around a Receipt that flattens and parses the +// entire content of a receipt, as opposed to only the consensus fields originally. type ReceiptForStorage Receipt // EncodeRLP implements rlp.Encoder, and flattens all content fields of a receipt // into an RLP stream. func (r *ReceiptForStorage) EncodeRLP(w io.Writer) error { - var firstLogIndex uint32 - if len(r.Logs) > 0 { - firstLogIndex = uint32(r.Logs[0].Index) - } - return rlp.Encode(w, &storedReceiptRLP{ + enc := &storedReceiptRLP{ PostStateOrStatus: (*Receipt)(r).statusEncoding(), CumulativeGasUsed: r.CumulativeGasUsed, - FirstLogIndex: firstLogIndex, - }) + Logs: make([]*LogForStorage, len(r.Logs)), + } + for i, log := range r.Logs { + enc.Logs[i] = (*LogForStorage)(log) + } + return rlp.Encode(w, enc) } // DecodeRLP implements rlp.Decoder, and loads both consensus and implementation // fields of a receipt from an RLP stream. func (r *ReceiptForStorage) DecodeRLP(s *rlp.Stream) error { + // Retrieve the entire receipt blob as we need to try multiple decoders + blob, err := s.Raw() + if err != nil { + return err + } + // Try decoding from the newest format for future proofness, then the older one + // for old nodes that just upgraded. V4 was an intermediate unreleased format so + // we do need to decode it, but it's not common (try last). + if err := decodeStoredReceiptRLP(r, blob); err == nil { + return nil + } + if err := decodeV3StoredReceiptRLP(r, blob); err == nil { + return nil + } + return decodeV4StoredReceiptRLP(r, blob) +} + +func decodeStoredReceiptRLP(r *ReceiptForStorage, blob []byte) error { var stored storedReceiptRLP - if err := s.Decode(&stored); err != nil { + if err := rlp.DecodeBytes(blob, &stored); err != nil { return err } if err := (*Receipt)(r).setStatus(stored.PostStateOrStatus); err != nil { return err } r.CumulativeGasUsed = stored.CumulativeGasUsed - r.firstLogIndex = stored.FirstLogIndex + r.Logs = make([]*Log, len(stored.Logs)) + for i, log := range stored.Logs { + r.Logs[i] = (*Log)(log) + } + //r.Bloom = CreateBloom(Receipts{(*Receipt)(r)}) + + return nil +} - //r.Logs = make([]*Log, len(stored.Logs)) - //for i, log := range stored.Logs { - // r.Logs[i] = (*Log)(log) - //} +func decodeV4StoredReceiptRLP(r *ReceiptForStorage, blob []byte) error { + var stored v4StoredReceiptRLP + if err := rlp.DecodeBytes(blob, &stored); err != nil { + return err + } + if err := (*Receipt)(r).setStatus(stored.PostStateOrStatus); err != nil { + return err + } + r.CumulativeGasUsed = stored.CumulativeGasUsed + r.TxHash = stored.TxHash + r.ContractAddress = stored.ContractAddress + r.GasUsed = stored.GasUsed + r.Logs = make([]*Log, len(stored.Logs)) + for i, log := range stored.Logs { + r.Logs[i] = (*Log)(log) + } //r.Bloom = CreateBloom(Receipts{(*Receipt)(r)}) return nil +} +func decodeV3StoredReceiptRLP(r *ReceiptForStorage, blob []byte) error { + var stored v3StoredReceiptRLP + if err := rlp.DecodeBytes(blob, &stored); err != nil { + return err + } + if err := (*Receipt)(r).setStatus(stored.PostStateOrStatus); err != nil { + return err + } + r.CumulativeGasUsed = stored.CumulativeGasUsed + r.ContractAddress = stored.ContractAddress + r.GasUsed = stored.GasUsed + r.Logs = make([]*Log, len(stored.Logs)) + for i, log := range stored.Logs { + r.Logs[i] = (*Log)(log) + } + return nil } // Receipts implements DerivableList for receipts. @@ -427,49 +508,3 @@ func (r Receipts) DeriveFields(hash libcommon.Hash, number uint64, txs Transacti } return nil } - -// DeriveFields fills the receipts with their computed fields based on consensus -// data and contextual infos like containing block and transactions. -func (rl Receipts) DeriveFieldsV3ForSingleReceipt(i int, blockHash libcommon.Hash, blockNum uint64, txn Transaction) (*Receipt, error) { - r := rl[i] - logIndex := r.firstLogIndex // logIdx is unique within the block and starts from 0 - - sender, ok := txn.cashedSender() - if !ok { - return nil, fmt.Errorf("tx must have cached sender") - } - - blockNumber := new(big.Int).SetUint64(blockNum) - // The transaction type and hash can be retrieved from the transaction itself - r.Type = txn.Type() - r.TxHash = txn.Hash() - - // block location fields - r.BlockHash = blockHash - r.BlockNumber = blockNumber - r.TransactionIndex = uint(i) - - // The contract address can be derived from the transaction itself - if txn.GetTo() == nil { - // If one wants to deploy a contract, one needs to send a transaction that does not have `To` field - // and then the address of the contract one is creating this way will depend on the `tx.From` - // and the nonce of the creating account (which is `tx.From`). - r.ContractAddress = crypto.CreateAddress(sender, txn.GetNonce()) - } - // The used gas can be calculated based on previous r - if i == 0 { - r.GasUsed = r.CumulativeGasUsed - } else { - r.GasUsed = r.CumulativeGasUsed - rl[i-1].CumulativeGasUsed - } - // The derived log fields can simply be set from the block and transaction - for j := 0; j < len(r.Logs); j++ { - r.Logs[j].BlockNumber = blockNum - r.Logs[j].BlockHash = blockHash - r.Logs[j].TxHash = r.TxHash - r.Logs[j].TxIndex = uint(i) - r.Logs[j].Index = uint(logIndex) - logIndex++ - } - return r, nil -} diff --git a/core/types/receipt_codecgen_gen.go b/core/types/receipt_codecgen_gen.go new file mode 100644 index 00000000000..e2bc7db9db1 --- /dev/null +++ b/core/types/receipt_codecgen_gen.go @@ -0,0 +1,769 @@ +//go:build go1.6 +// +build go1.6 + +// Code generated by codecgen - DO NOT EDIT. + +package types + +import ( + "errors" + libcommon "github.com/ledgerwatch/erigon-lib/common" + codec1978 "github.com/ugorji/go/codec" + pkg2_big "math/big" + "runtime" + "strconv" +) + +const ( + // ----- content types ---- + codecSelferCcUTF82 = 1 + codecSelferCcRAW2 = 255 + // ----- value types used ---- + codecSelferValueTypeArray2 = 10 + codecSelferValueTypeMap2 = 9 + codecSelferValueTypeString2 = 6 + codecSelferValueTypeInt2 = 2 + codecSelferValueTypeUint2 = 3 + codecSelferValueTypeFloat2 = 4 + codecSelferValueTypeNil2 = 1 + codecSelferBitsize2 = uint8(32 << (^uint(0) >> 63)) + codecSelferDecContainerLenNil2 = -2147483648 +) + +var ( + errCodecSelferOnlyMapOrArrayEncodeToStruct2 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer2 struct{} + +func codecSelfer2False() bool { return false } +func codecSelfer2True() bool { return true } + +func init() { + if codec1978.GenVersion != 19 { + _, file, _, _ := runtime.Caller(0) + ver := strconv.FormatInt(int64(codec1978.GenVersion), 10) + panic(errors.New("codecgen version mismatch: current: 19, need " + ver + ". Re-generate file: " + file)) + } + if false { // reference the types, but skip this branch at build/run time + var _ libcommon.Address + var _ pkg2_big.Int + } +} + +func (x *Receipt) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer2 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + if !z.EncBinary() && z.IsJSONHandle() { + z.EncJSONMarshal(*x) + } else { + yy2arr2 := z.EncBasicHandle().StructToArray + _ = yy2arr2 + const yyr2 bool = false // struct tag has 'toArray' + z.EncWriteArrayStart(4) + z.EncWriteArrayElem() + r.EncodeUint(uint64(x.Type)) + z.EncWriteArrayElem() + if x.PostState == nil { + r.EncodeNil() + } else { + r.EncodeStringBytesRaw([]byte(x.PostState)) + } // end block: if x.PostState slice == nil + z.EncWriteArrayElem() + r.EncodeUint(uint64(x.Status)) + z.EncWriteArrayElem() + r.EncodeUint(uint64(x.CumulativeGasUsed)) + z.EncWriteArrayEnd() + } + } +} + +func (x *Receipt) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer2 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + if !z.DecBinary() && z.IsJSONHandle() { + z.DecJSONUnmarshal(x) + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeNil2 { + *(x) = Receipt{} + } else if yyct2 == codecSelferValueTypeMap2 { + yyl2 := z.DecReadMapStart() + if yyl2 == 0 { + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + z.DecReadMapEnd() + } else if yyct2 == codecSelferValueTypeArray2 { + yyl2 := z.DecReadArrayStart() + if yyl2 != 0 { + x.codecDecodeSelfFromArray(yyl2, d) + } + z.DecReadArrayEnd() + } else { + panic(errCodecSelferOnlyMapOrArrayEncodeToStruct2) + } + } +} + +func (x *Receipt) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer2 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if z.DecCheckBreak() { + break + } + } + z.DecReadMapElemKey() + yys3 := z.StringView(r.DecodeStringAsBytes()) + z.DecReadMapElemValue() + switch yys3 { + case "Type": + x.Type = (uint8)(z.C.UintV(r.DecodeUint64(), 8)) + case "1": + x.PostState = r.DecodeBytes(([]byte)(x.PostState), false) + case "2": + x.Status = (uint64)(r.DecodeUint64()) + case "3": + x.CumulativeGasUsed = (uint64)(r.DecodeUint64()) + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 +} + +func (x *Receipt) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer2 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj9 int + var yyb9 bool + var yyhl9 bool = l >= 0 + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = z.DecCheckBreak() + } + if yyb9 { + z.DecReadArrayEnd() + return + } + z.DecReadArrayElem() + x.Type = (uint8)(z.C.UintV(r.DecodeUint64(), 8)) + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = z.DecCheckBreak() + } + if yyb9 { + z.DecReadArrayEnd() + return + } + z.DecReadArrayElem() + x.PostState = r.DecodeBytes(([]byte)(x.PostState), false) + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = z.DecCheckBreak() + } + if yyb9 { + z.DecReadArrayEnd() + return + } + z.DecReadArrayElem() + x.Status = (uint64)(r.DecodeUint64()) + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = z.DecCheckBreak() + } + if yyb9 { + z.DecReadArrayEnd() + return + } + z.DecReadArrayElem() + x.CumulativeGasUsed = (uint64)(r.DecodeUint64()) + for { + yyj9++ + if yyhl9 { + yyb9 = yyj9 > l + } else { + yyb9 = z.DecCheckBreak() + } + if yyb9 { + break + } + z.DecReadArrayElem() + z.DecStructFieldNotFound(yyj9-1, "") + } +} + +func (x *Receipt) IsCodecEmpty() bool { + return !(x.Type != 0 && len(x.PostState) != 0 && x.Status != 0 && x.CumulativeGasUsed != 0 && true) +} + +func (x Receipts) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer2 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + h.encReceipts((Receipts)(x), e) + } // end block: if x slice == nil +} + +func (x *Receipts) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer2 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + h.decReceipts((*Receipts)(x), d) +} + +func (x *Log) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer2 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + if !z.EncBinary() && z.IsJSONHandle() { + z.EncJSONMarshal(*x) + } else { + yy2arr2 := z.EncBasicHandle().StructToArray + _ = yy2arr2 + const yyr2 bool = false // struct tag has 'toArray' + z.EncWriteArrayStart(3) + z.EncWriteArrayElem() + yy6 := &x.Address + if !z.EncBinary() { + z.EncTextMarshal(*yy6) + } else { + h.enccommon_Address((*libcommon.Address)(yy6), e) + } + z.EncWriteArrayElem() + if x.Topics == nil { + r.EncodeNil() + } else { + h.encSlicecommon_Hash(([]libcommon.Hash)(x.Topics), e) + } // end block: if x.Topics slice == nil + z.EncWriteArrayElem() + if x.Data == nil { + r.EncodeNil() + } else { + r.EncodeStringBytesRaw([]byte(x.Data)) + } // end block: if x.Data slice == nil + z.EncWriteArrayEnd() + } + } +} + +func (x *Log) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer2 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + if !z.DecBinary() && z.IsJSONHandle() { + z.DecJSONUnmarshal(x) + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeNil2 { + *(x) = Log{} + } else if yyct2 == codecSelferValueTypeMap2 { + yyl2 := z.DecReadMapStart() + if yyl2 == 0 { + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + z.DecReadMapEnd() + } else if yyct2 == codecSelferValueTypeArray2 { + yyl2 := z.DecReadArrayStart() + if yyl2 != 0 { + x.codecDecodeSelfFromArray(yyl2, d) + } + z.DecReadArrayEnd() + } else { + panic(errCodecSelferOnlyMapOrArrayEncodeToStruct2) + } + } +} + +func (x *Log) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer2 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if z.DecCheckBreak() { + break + } + } + z.DecReadMapElemKey() + yys3 := z.StringView(r.DecodeStringAsBytes()) + z.DecReadMapElemValue() + switch yys3 { + case "1": + if !z.DecBinary() && z.IsJSONHandle() { + z.DecJSONUnmarshal(&x.Address) + } else { + h.deccommon_Address((*libcommon.Address)(&x.Address), d) + } + case "2": + h.decSlicecommon_Hash((*[]libcommon.Hash)(&x.Topics), d) + case "3": + x.Data = r.DecodeBytes(([]byte)(x.Data), false) + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 +} + +func (x *Log) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer2 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = z.DecCheckBreak() + } + if yyb10 { + z.DecReadArrayEnd() + return + } + z.DecReadArrayElem() + if !z.DecBinary() && z.IsJSONHandle() { + z.DecJSONUnmarshal(&x.Address) + } else { + h.deccommon_Address((*libcommon.Address)(&x.Address), d) + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = z.DecCheckBreak() + } + if yyb10 { + z.DecReadArrayEnd() + return + } + z.DecReadArrayElem() + h.decSlicecommon_Hash((*[]libcommon.Hash)(&x.Topics), d) + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = z.DecCheckBreak() + } + if yyb10 { + z.DecReadArrayEnd() + return + } + z.DecReadArrayElem() + x.Data = r.DecodeBytes(([]byte)(x.Data), false) + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = z.DecCheckBreak() + } + if yyb10 { + break + } + z.DecReadArrayElem() + z.DecStructFieldNotFound(yyj10-1, "") + } +} + +func (x *Log) IsCodecEmpty() bool { + return !(len(x.Address) != 0 && len(x.Topics) != 0 && len(x.Data) != 0 && true) +} + +func (x Logs) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer2 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + h.encLogs((Logs)(x), e) + } // end block: if x slice == nil +} + +func (x *Logs) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer2 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + h.decLogs((*Logs)(x), d) +} + +func (x codecSelfer2) encReceipts(v Receipts, e *codec1978.Encoder) { + var h codecSelfer2 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if v == nil { + r.EncodeNil() + return + } + z.EncWriteArrayStart(len(v)) + for _, yyv1 := range v { + z.EncWriteArrayElem() + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + z.EncWriteArrayEnd() +} + +func (x codecSelfer2) decReceipts(v *Receipts, d *codec1978.Decoder) { + var h codecSelfer2 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyh1.IsNil { + if yyv1 != nil { + yyv1 = nil + yyc1 = true + } + } else if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []*Receipt{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else { + yyhl1 := yyl1 > 0 + var yyrl1 int + _ = yyrl1 + if yyhl1 { + if yyl1 > cap(yyv1) { + yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]*Receipt, yyrl1) + } + yyc1 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + } + var yyj1 int + for yyj1 = 0; (yyhl1 && yyj1 < yyl1) || !(yyhl1 || z.DecCheckBreak()); yyj1++ { // bounds-check-elimination + if yyj1 == 0 && yyv1 == nil { + if yyhl1 { + yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + } else { + yyrl1 = 8 + } + yyv1 = make([]*Receipt, yyrl1) + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + var yydb1 bool + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, nil) + yyc1 = true + } + if yydb1 { + z.DecSwallow() + } else { + if r.TryNil() { + yyv1[yyj1] = nil + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(Receipt) + } + yyv1[yyj1].CodecDecodeSelf(d) + } + } + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = make([]*Receipt, 0) + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer2) enccommon_Address(v *libcommon.Address, e *codec1978.Encoder) { + var h codecSelfer2 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if v == nil { + r.EncodeNil() + return + } + r.EncodeStringBytesRaw(((*[20]byte)(v))[:]) +} + +func (x codecSelfer2) deccommon_Address(v *libcommon.Address, d *codec1978.Decoder) { + var h codecSelfer2 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + r.DecodeBytes(((*[20]byte)(v))[:], true) +} + +func (x codecSelfer2) encSlicecommon_Hash(v []libcommon.Hash, e *codec1978.Encoder) { + var h codecSelfer2 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if v == nil { + r.EncodeNil() + return + } + z.EncWriteArrayStart(len(v)) + for _, yyv1 := range v { + z.EncWriteArrayElem() + yy2 := &yyv1 + if !z.EncBinary() { + z.EncTextMarshal(*yy2) + } else { + h.enccommon_Hash((*libcommon.Hash)(yy2), e) + } + } + z.EncWriteArrayEnd() +} + +func (x codecSelfer2) decSlicecommon_Hash(v *[]libcommon.Hash, d *codec1978.Decoder) { + var h codecSelfer2 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyh1.IsNil { + if yyv1 != nil { + yyv1 = nil + yyc1 = true + } + } else if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []libcommon.Hash{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else { + yyhl1 := yyl1 > 0 + var yyrl1 int + _ = yyrl1 + if yyhl1 { + if yyl1 > cap(yyv1) { + yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]libcommon.Hash, yyrl1) + } + yyc1 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + } + var yyj1 int + for yyj1 = 0; (yyhl1 && yyj1 < yyl1) || !(yyhl1 || z.DecCheckBreak()); yyj1++ { // bounds-check-elimination + if yyj1 == 0 && yyv1 == nil { + if yyhl1 { + yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 32) + } else { + yyrl1 = 8 + } + yyv1 = make([]libcommon.Hash, yyrl1) + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + var yydb1 bool + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, libcommon.Hash{}) + yyc1 = true + } + if yydb1 { + z.DecSwallow() + } else { + if !z.DecBinary() && z.IsJSONHandle() { + z.DecJSONUnmarshal(&yyv1[yyj1]) + } else { + h.deccommon_Hash((*libcommon.Hash)(&yyv1[yyj1]), d) + } + } + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = make([]libcommon.Hash, 0) + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} + +func (x codecSelfer2) enccommon_Hash(v *libcommon.Hash, e *codec1978.Encoder) { + var h codecSelfer2 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if v == nil { + r.EncodeNil() + return + } + r.EncodeStringBytesRaw(((*[32]byte)(v))[:]) +} + +func (x codecSelfer2) deccommon_Hash(v *libcommon.Hash, d *codec1978.Decoder) { + var h codecSelfer2 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + r.DecodeBytes(((*[32]byte)(v))[:], true) +} + +func (x codecSelfer2) encLogs(v Logs, e *codec1978.Encoder) { + var h codecSelfer2 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if v == nil { + r.EncodeNil() + return + } + z.EncWriteArrayStart(len(v)) + for _, yyv1 := range v { + z.EncWriteArrayElem() + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + z.EncWriteArrayEnd() +} + +func (x codecSelfer2) decLogs(v *Logs, d *codec1978.Decoder) { + var h codecSelfer2 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyh1.IsNil { + if yyv1 != nil { + yyv1 = nil + yyc1 = true + } + } else if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []*Log{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else { + yyhl1 := yyl1 > 0 + var yyrl1 int + _ = yyrl1 + if yyhl1 { + if yyl1 > cap(yyv1) { + yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]*Log, yyrl1) + } + yyc1 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + } + var yyj1 int + for yyj1 = 0; (yyhl1 && yyj1 < yyl1) || !(yyhl1 || z.DecCheckBreak()); yyj1++ { // bounds-check-elimination + if yyj1 == 0 && yyv1 == nil { + if yyhl1 { + yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + } else { + yyrl1 = 8 + } + yyv1 = make([]*Log, yyrl1) + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + var yydb1 bool + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, nil) + yyc1 = true + } + if yydb1 { + z.DecSwallow() + } else { + if r.TryNil() { + yyv1[yyj1] = nil + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(Log) + } + yyv1[yyj1].CodecDecodeSelf(d) + } + } + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = make([]*Log, 0) + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } +} diff --git a/core/types/receipt_test.go b/core/types/receipt_test.go index 27d78251c7f..4eb2f1a9d67 100644 --- a/core/types/receipt_test.go +++ b/core/types/receipt_test.go @@ -21,11 +21,11 @@ import ( "errors" "math" "math/big" + "reflect" "testing" "github.com/holiman/uint256" libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/stretchr/testify/assert" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/u256" @@ -66,13 +66,11 @@ func TestLegacyReceiptDecoding(t *testing.T) { Address: libcommon.BytesToAddress([]byte{0x11}), Topics: []libcommon.Hash{libcommon.HexToHash("dead"), libcommon.HexToHash("beef")}, Data: []byte{0x01, 0x00, 0xff}, - Index: 999, }, { Address: libcommon.BytesToAddress([]byte{0x01, 0x11}), Topics: []libcommon.Hash{libcommon.HexToHash("dead"), libcommon.HexToHash("beef")}, Data: []byte{0x01, 0x00, 0xff}, - Index: 1000, }, }, TxHash: tx.Hash(), @@ -100,33 +98,34 @@ func TestLegacyReceiptDecoding(t *testing.T) { if dec.CumulativeGasUsed != receipt.CumulativeGasUsed { t.Fatalf("Receipt CumulativeGasUsed mismatch, want %v, have %v", receipt.CumulativeGasUsed, dec.CumulativeGasUsed) } - assert.Equal(t, uint32(receipt.Logs[0].Index), dec.firstLogIndex) - //if len(dec.Logs) != len(receipt.Logs) { - // t.Fatalf("Receipt log number mismatch, want %v, have %v", len(receipt.Logs), len(dec.Logs)) - //} - //for i := 0; i < len(dec.Logs); i++ { - // if dec.Logs[i].Address != receipt.Logs[i].Address { - // t.Fatalf("Receipt log %d address mismatch, want %v, have %v", i, receipt.Logs[i].Address, dec.Logs[i].Address) - // } - // if !reflect.DeepEqual(dec.Logs[i].Topics, receipt.Logs[i].Topics) { - // t.Fatalf("Receipt log %d topics mismatch, want %v, have %v", i, receipt.Logs[i].Topics, dec.Logs[i].Topics) - // } - // if !bytes.Equal(dec.Logs[i].Data, receipt.Logs[i].Data) { - // t.Fatalf("Receipt log %d data mismatch, want %v, have %v", i, receipt.Logs[i].Data, dec.Logs[i].Data) - // } - //} + if len(dec.Logs) != len(receipt.Logs) { + t.Fatalf("Receipt log number mismatch, want %v, have %v", len(receipt.Logs), len(dec.Logs)) + } + for i := 0; i < len(dec.Logs); i++ { + if dec.Logs[i].Address != receipt.Logs[i].Address { + t.Fatalf("Receipt log %d address mismatch, want %v, have %v", i, receipt.Logs[i].Address, dec.Logs[i].Address) + } + if !reflect.DeepEqual(dec.Logs[i].Topics, receipt.Logs[i].Topics) { + t.Fatalf("Receipt log %d topics mismatch, want %v, have %v", i, receipt.Logs[i].Topics, dec.Logs[i].Topics) + } + if !bytes.Equal(dec.Logs[i].Data, receipt.Logs[i].Data) { + t.Fatalf("Receipt log %d data mismatch, want %v, have %v", i, receipt.Logs[i].Data, dec.Logs[i].Data) + } + } }) } } func encodeAsStoredReceiptRLP(want *Receipt) ([]byte, error) { - w := bytes.NewBuffer(nil) - casted := ReceiptForStorage(*want) - err := casted.EncodeRLP(w) - if err != nil { - return nil, err + stored := &storedReceiptRLP{ + PostStateOrStatus: want.statusEncoding(), + CumulativeGasUsed: want.CumulativeGasUsed, + Logs: make([]*LogForStorage, len(want.Logs)), + } + for i, log := range want.Logs { + stored.Logs[i] = (*LogForStorage)(log) } - return w.Bytes(), nil + return rlp.EncodeToBytes(stored) } // Tests that receipt data can be correctly derived from the contextual infos @@ -177,7 +176,6 @@ func TestDeriveFields(t *testing.T) { TxHash: txs[0].Hash(), ContractAddress: libcommon.BytesToAddress([]byte{0x01, 0x11, 0x11}), GasUsed: 1, - firstLogIndex: 0, }, &Receipt{ PostState: libcommon.Hash{2}.Bytes(), @@ -189,7 +187,6 @@ func TestDeriveFields(t *testing.T) { TxHash: txs[1].Hash(), ContractAddress: libcommon.BytesToAddress([]byte{0x02, 0x22, 0x22}), GasUsed: 2, - firstLogIndex: 2, }, &Receipt{ Type: AccessListTxType, @@ -202,136 +199,69 @@ func TestDeriveFields(t *testing.T) { TxHash: txs[2].Hash(), ContractAddress: libcommon.BytesToAddress([]byte{0x03, 0x33, 0x33}), GasUsed: 3, - firstLogIndex: 4, }, } // Clear all the computed fields and re-derive them number := big.NewInt(1) hash := libcommon.BytesToHash([]byte{0x03, 0x14}) - t.Run("DeriveV1", func(t *testing.T) { - clearComputedFieldsOnReceipts(t, receipts) - if err := receipts.DeriveFields(hash, number.Uint64(), txs, []libcommon.Address{libcommon.BytesToAddress([]byte{0x0}), libcommon.BytesToAddress([]byte{0x0}), libcommon.BytesToAddress([]byte{0x0})}); err != nil { - t.Fatalf("DeriveFields(...) = %v, want ", err) - } - // Iterate over all the computed fields and check that they're correct - signer := MakeSigner(params.TestChainConfig, number.Uint64(), 0) + clearComputedFieldsOnReceipts(t, receipts) + if err := receipts.DeriveFields(hash, number.Uint64(), txs, []libcommon.Address{libcommon.BytesToAddress([]byte{0x0}), libcommon.BytesToAddress([]byte{0x0}), libcommon.BytesToAddress([]byte{0x0})}); err != nil { + t.Fatalf("DeriveFields(...) = %v, want ", err) + } + // Iterate over all the computed fields and check that they're correct + signer := MakeSigner(params.TestChainConfig, number.Uint64(), 0) - logIndex := uint(0) - for i, r := range receipts { - if r.Type != txs[i].Type() { - t.Errorf("receipts[%d].Type = %d, want %d", i, r.Type, txs[i].Type()) - } - if r.TxHash != txs[i].Hash() { - t.Errorf("receipts[%d].TxHash = %s, want %s", i, r.TxHash.String(), txs[i].Hash().String()) - } - if r.BlockHash != hash { - t.Errorf("receipts[%d].BlockHash = %s, want %s", i, r.BlockHash.String(), hash.String()) - } - if r.BlockNumber.Cmp(number) != 0 { - t.Errorf("receipts[%c].BlockNumber = %s, want %s", i, r.BlockNumber.String(), number.String()) - } - if r.TransactionIndex != uint(i) { - t.Errorf("receipts[%d].TransactionIndex = %d, want %d", i, r.TransactionIndex, i) - } - if r.GasUsed != txs[i].GetGas() { - t.Errorf("receipts[%d].GasUsed = %d, want %d", i, r.GasUsed, txs[i].GetGas()) - } - if txs[i].GetTo() != nil && r.ContractAddress != (libcommon.Address{}) { - t.Errorf("receipts[%d].ContractAddress = %s, want %s", i, r.ContractAddress.String(), (libcommon.Address{}).String()) - } - from, _ := txs[i].Sender(*signer) - contractAddress := crypto.CreateAddress(from, txs[i].GetNonce()) - if txs[i].GetTo() == nil && r.ContractAddress != contractAddress { - t.Errorf("receipts[%d].ContractAddress = %s, want %s", i, r.ContractAddress.String(), contractAddress.String()) - } - for j := range r.Logs { - if r.Logs[j].BlockNumber != number.Uint64() { - t.Errorf("receipts[%d].Logs[%d].BlockNumber = %d, want %d", i, j, r.Logs[j].BlockNumber, number.Uint64()) - } - if r.Logs[j].BlockHash != hash { - t.Errorf("receipts[%d].Logs[%d].BlockHash = %s, want %s", i, j, r.Logs[j].BlockHash.String(), hash.String()) - } - if r.Logs[j].TxHash != txs[i].Hash() { - t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, r.Logs[j].TxHash.String(), txs[i].Hash().String()) - } - if r.Logs[j].TxHash != txs[i].Hash() { - t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, r.Logs[j].TxHash.String(), txs[i].Hash().String()) - } - if r.Logs[j].TxIndex != uint(i) { - t.Errorf("receipts[%d].Logs[%d].TransactionIndex = %d, want %d", i, j, r.Logs[j].TxIndex, i) - } - if r.Logs[j].Index != logIndex { - t.Errorf("receipts[%d].Logs[%d].Index = %d, want %d", i, j, r.Logs[j].Index, logIndex) - } - logIndex++ - } + logIndex := uint(0) + for i := range receipts { + if receipts[i].Type != txs[i].Type() { + t.Errorf("receipts[%d].Type = %d, want %d", i, receipts[i].Type, txs[i].Type()) } - }) - - t.Run("DeriveV3", func(t *testing.T) { - clearComputedFieldsOnReceipts(t, receipts) - // Iterate over all the computed fields and check that they're correct - signer := MakeSigner(params.TestChainConfig, number.Uint64(), 0) - - logIndex := uint(0) - for i := range receipts { - txs[i].SetSender(libcommon.BytesToAddress([]byte{0x0})) - r, err := receipts.DeriveFieldsV3ForSingleReceipt(i, hash, number.Uint64(), txs[i]) - if err != nil { - panic(err) - } - - if r.Type != txs[i].Type() { - t.Errorf("receipts[%d].Type = %d, want %d", i, r.Type, txs[i].Type()) - } - if r.TxHash != txs[i].Hash() { - t.Errorf("receipts[%d].TxHash = %s, want %s", i, r.TxHash.String(), txs[i].Hash().String()) - } - if r.BlockHash != hash { - t.Errorf("receipts[%d].BlockHash = %s, want %s", i, r.BlockHash.String(), hash.String()) - } - if r.BlockNumber.Cmp(number) != 0 { - t.Errorf("receipts[%c].BlockNumber = %s, want %s", i, r.BlockNumber.String(), number.String()) + if receipts[i].TxHash != txs[i].Hash() { + t.Errorf("receipts[%d].TxHash = %s, want %s", i, receipts[i].TxHash.String(), txs[i].Hash().String()) + } + if receipts[i].BlockHash != hash { + t.Errorf("receipts[%d].BlockHash = %s, want %s", i, receipts[i].BlockHash.String(), hash.String()) + } + if receipts[i].BlockNumber.Cmp(number) != 0 { + t.Errorf("receipts[%c].BlockNumber = %s, want %s", i, receipts[i].BlockNumber.String(), number.String()) + } + if receipts[i].TransactionIndex != uint(i) { + t.Errorf("receipts[%d].TransactionIndex = %d, want %d", i, receipts[i].TransactionIndex, i) + } + if receipts[i].GasUsed != txs[i].GetGas() { + t.Errorf("receipts[%d].GasUsed = %d, want %d", i, receipts[i].GasUsed, txs[i].GetGas()) + } + if txs[i].GetTo() != nil && receipts[i].ContractAddress != (libcommon.Address{}) { + t.Errorf("receipts[%d].ContractAddress = %s, want %s", i, receipts[i].ContractAddress.String(), (libcommon.Address{}).String()) + } + from, _ := txs[i].Sender(*signer) + contractAddress := crypto.CreateAddress(from, txs[i].GetNonce()) + if txs[i].GetTo() == nil && receipts[i].ContractAddress != contractAddress { + t.Errorf("receipts[%d].ContractAddress = %s, want %s", i, receipts[i].ContractAddress.String(), contractAddress.String()) + } + for j := range receipts[i].Logs { + if receipts[i].Logs[j].BlockNumber != number.Uint64() { + t.Errorf("receipts[%d].Logs[%d].BlockNumber = %d, want %d", i, j, receipts[i].Logs[j].BlockNumber, number.Uint64()) } - if r.TransactionIndex != uint(i) { - t.Errorf("receipts[%d].TransactionIndex = %d, want %d", i, r.TransactionIndex, i) + if receipts[i].Logs[j].BlockHash != hash { + t.Errorf("receipts[%d].Logs[%d].BlockHash = %s, want %s", i, j, receipts[i].Logs[j].BlockHash.String(), hash.String()) } - if r.GasUsed != txs[i].GetGas() { - t.Errorf("receipts[%d].GasUsed = %d, want %d", i, r.GasUsed, txs[i].GetGas()) + if receipts[i].Logs[j].TxHash != txs[i].Hash() { + t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, receipts[i].Logs[j].TxHash.String(), txs[i].Hash().String()) } - if txs[i].GetTo() != nil && r.ContractAddress != (libcommon.Address{}) { - t.Errorf("receipts[%d].ContractAddress = %s, want %s", i, r.ContractAddress.String(), (libcommon.Address{}).String()) + if receipts[i].Logs[j].TxHash != txs[i].Hash() { + t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, receipts[i].Logs[j].TxHash.String(), txs[i].Hash().String()) } - from, _ := txs[i].Sender(*signer) - contractAddress := crypto.CreateAddress(from, txs[i].GetNonce()) - if txs[i].GetTo() == nil && r.ContractAddress != contractAddress { - t.Errorf("receipts[%d].ContractAddress = %s, want %s", i, r.ContractAddress.String(), contractAddress.String()) + if receipts[i].Logs[j].TxIndex != uint(i) { + t.Errorf("receipts[%d].Logs[%d].TransactionIndex = %d, want %d", i, j, receipts[i].Logs[j].TxIndex, i) } - for j := range r.Logs { - if r.Logs[j].BlockNumber != number.Uint64() { - t.Errorf("receipts[%d].Logs[%d].BlockNumber = %d, want %d", i, j, r.Logs[j].BlockNumber, number.Uint64()) - } - if r.Logs[j].BlockHash != hash { - t.Errorf("receipts[%d].Logs[%d].BlockHash = %s, want %s", i, j, r.Logs[j].BlockHash.String(), hash.String()) - } - if r.Logs[j].TxHash != txs[i].Hash() { - t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, r.Logs[j].TxHash.String(), txs[i].Hash().String()) - } - if r.Logs[j].TxHash != txs[i].Hash() { - t.Errorf("receipts[%d].Logs[%d].TxHash = %s, want %s", i, j, r.Logs[j].TxHash.String(), txs[i].Hash().String()) - } - if r.Logs[j].TxIndex != uint(i) { - t.Errorf("receipts[%d].Logs[%d].TransactionIndex = %d, want %d", i, j, r.Logs[j].TxIndex, i) - } - if r.Logs[j].Index != logIndex { - t.Errorf("receipts[%d].Logs[%d].Index = %d, want %d", i, j, r.Logs[j].Index, logIndex) - } - logIndex++ + if receipts[i].Logs[j].Index != logIndex { + t.Errorf("receipts[%d].Logs[%d].Index = %d, want %d", i, j, receipts[i].Logs[j].Index, logIndex) } + logIndex++ } - }) - + } } // TestTypedReceiptEncodingDecoding reproduces a flaw that existed in the receipt diff --git a/core/types/request.go b/core/types/request.go deleted file mode 100644 index 1423be32c62..00000000000 --- a/core/types/request.go +++ /dev/null @@ -1,111 +0,0 @@ -package types - -import ( - "bytes" - "fmt" - "io" - - rlp2 "github.com/ledgerwatch/erigon-lib/rlp" - "github.com/ledgerwatch/erigon/rlp" -) - -const ( - DepositRequestType byte = 0x00 -) - -type Request struct { - inner RequestData -} - -type RequestData interface { - encodeRLP(*bytes.Buffer) error - decodeRLP([]byte) error - requestType() byte - copy() RequestData - encodingSize() int -} - -func (r *Request) Type() byte { - return r.inner.requestType() -} - -func NewRequest(inner RequestData) *Request { - req := new(Request) - req.inner = inner.copy() - return req -} - -func (r *Request) EncodingSize() int { - switch r.Type() { - case DepositRequestType: - total := r.inner.encodingSize() + 1 // +1 byte for requset type - return rlp2.ListPrefixLen(total) + total - default: - panic(fmt.Sprintf("Unknown request type: %d", r.Type())) - } -} - -func (r *Request) EncodeRLP(w io.Writer) error { - var buf bytes.Buffer // TODO(racytech): find a solution to reuse the same buffer instead of recreating it - buf.WriteByte(r.Type()) // first write type of request then encode inner data - r.inner.encodeRLP(&buf) - return rlp.Encode(w, buf.Bytes()) -} - -func (r *Request) DecodeRLP(s *rlp.Stream) error { - kind, _, err := s.Kind() - switch { - case err != nil: - return err - case kind == rlp.List: - return fmt.Errorf("error: untyped request (unexpected lit)") - case kind == rlp.Byte: - return fmt.Errorf("error: too short request") - default: - var buf []byte - if buf, err = s.Bytes(); err != nil { - return err - } - return r.decode(buf) - } -} - -func (r *Request) decode(data []byte) error { - if len(data) <= 1 { - return fmt.Errorf("error: too short type request") - } - var inner RequestData - switch data[0] { - case DepositRequestType: - inner = new(Deposit) - default: - return fmt.Errorf("unknown request type - %d", data[0]) - } - - if err := inner.decodeRLP(data[1:]); err != nil { - return err - } - r.inner = inner - return nil -} - -func (r Requests) Deposits() Deposits { - deposits := make(Deposits, 0, len(r)) - for _, req := range r { - if req.Type() == DepositRequestType { - deposits = append(deposits, req.inner.(*Deposit)) - } - } - return deposits -} - -type Requests []*Request - -func (r Requests) Len() int { return len(r) } - -// EncodeIndex encodes the i'th request to w. Note that this does not check for errors -// because we assume that *request will only ever contain valid requests that were either -// constructed by decoding or via public API in this package. -func (r Requests) EncodeIndex(i int, w *bytes.Buffer) { - rlp.Encode(w, r[i]) -} diff --git a/core/types/transaction.go b/core/types/transaction.go index 07135d7ef92..fb781275283 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -78,7 +78,6 @@ type Transaction interface { RawSignatureValues() (*uint256.Int, *uint256.Int, *uint256.Int) EncodingSize() int EncodeRLP(w io.Writer) error - DecodeRLP(s *rlp.Stream) error MarshalBinary(w io.Writer) error // Sender returns the address derived from the signature (V, R, S) using secp256k1 // elliptic curve and an error if it failed deriving or upon an incorrect @@ -88,7 +87,6 @@ type Transaction interface { // signing method. The cache is invalidated if the cached signer does // not match the signer used in the current call. Sender(Signer) (libcommon.Address, error) - cashedSender() (libcommon.Address, bool) GetSender() (libcommon.Address, bool) SetSender(libcommon.Address) IsContractDeploy() bool @@ -115,19 +113,19 @@ func (t BinaryTransactions) EncodeIndex(i int, w *bytes.Buffer) { } func DecodeRLPTransaction(s *rlp.Stream, blobTxnsAreWrappedWithBlobs bool) (Transaction, error) { - kind, _, err := s.Kind() + kind, size, err := s.Kind() if err != nil { return nil, err } if rlp.List == kind { tx := &LegacyTx{} - if err = tx.DecodeRLP(s); err != nil { + if err = tx.DecodeRLP(s, size); err != nil { return nil, err } return tx, nil } if rlp.String != kind { - return nil, fmt.Errorf("not an RLP encoded transaction. If this is a canonical encoded transaction, use UnmarshalTransactionFromBinary instead. Got %v for kind, expected String", kind) + return nil, fmt.Errorf("Not an RLP encoded transaction. If this is a canonical encoded transaction, use UnmarshalTransactionFromBinary instead. Got %v for kind, expected String", kind) } // Decode the EIP-2718 typed TX envelope. var b []byte @@ -165,14 +163,7 @@ func DecodeTransaction(data []byte) (Transaction, error) { return UnmarshalTransactionFromBinary(data, blobTxnsAreWrappedWithBlobs) } s := rlp.NewStream(bytes.NewReader(data), uint64(len(data))) - tx, err := DecodeRLPTransaction(s, blobTxnsAreWrappedWithBlobs) - if err != nil { - return nil, err - } - if s.Remaining() != 0 { - return nil, fmt.Errorf("trailing bytes after rlp encoded transaction") - } - return tx, nil + return DecodeRLPTransaction(s, blobTxnsAreWrappedWithBlobs) } // Parse transaction without envelope. @@ -181,17 +172,32 @@ func UnmarshalTransactionFromBinary(data []byte, blobTxnsAreWrappedWithBlobs boo return nil, fmt.Errorf("short input: %v", len(data)) } s := rlp.NewStream(bytes.NewReader(data[1:]), uint64(len(data)-1)) - var t Transaction switch data[0] { case AccessListTxType: - t = &AccessListTx{} + t := &AccessListTx{} + if err := t.DecodeRLP(s); err != nil { + return nil, err + } + return t, nil case DynamicFeeTxType: - t = &DynamicFeeTransaction{} + t := &DynamicFeeTransaction{} + if err := t.DecodeRLP(s); err != nil { + return nil, err + } + return t, nil case BlobTxType: if blobTxnsAreWrappedWithBlobs { - t = &BlobTxWrapper{} + t := &BlobTxWrapper{} + if err := t.DecodeRLP(s); err != nil { + return nil, err + } + return t, nil } else { - t = &BlobTx{} + t := &BlobTx{} + if err := t.DecodeRLP(s); err != nil { + return nil, err + } + return t, nil } default: if data[0] >= 0x80 { @@ -200,13 +206,6 @@ func UnmarshalTransactionFromBinary(data []byte, blobTxnsAreWrappedWithBlobs boo } return nil, ErrTxTypeNotSupported } - if err := t.DecodeRLP(s); err != nil { - return nil, err - } - if s.Remaining() != 0 { - return nil, fmt.Errorf("trailing bytes after rlp encoded transaction") - } - return t, nil } // Remove everything but the payload body from the wrapper - this is not used, for reference only diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go index 669389e635a..dfa5fd217b7 100644 --- a/core/types/transaction_test.go +++ b/core/types/transaction_test.go @@ -826,46 +826,3 @@ func TestShortUnwrapLib(t *testing.T) { assertEqual(blobTx, &wrappedBlobTx.Tx) } - -func TestTrailingBytes(t *testing.T) { - // Create a valid transaction - valid_rlp_transaction := []byte{201, 38, 38, 128, 128, 107, 58, 42, 38, 42} - - // Test valid transaction - transactions := make([][]byte, 1) - transactions[0] = valid_rlp_transaction - - for _, txn := range transactions { - if TypedTransactionMarshalledAsRlpString(txn) { - panic("TypedTransactionMarshalledAsRlpString() error") - } - } - - _, err := DecodeTransactions(transactions) - if err != nil { - fmt.Println("Valid transaction errored") - panic(err) // @audit this will pass - } - - // Append excess bytes to the blob transaction - num_excess := 100 - malicious_rlp_transaction := make([]byte, len(valid_rlp_transaction)+num_excess) - copy(malicious_rlp_transaction, valid_rlp_transaction) - - // Validate transactions are different - assert.NotEqual(t, malicious_rlp_transaction, valid_rlp_transaction) - - // Test malicious transaction - transactions[0] = malicious_rlp_transaction - - for _, txn := range transactions { - if TypedTransactionMarshalledAsRlpString(txn) { - panic("TypedTransactionMarshalledAsRlpString() error") - } - } - - _, err = DecodeTransactions(transactions) - if err == nil { - panic("Malicious transaction has not errored!") // @audit this panic is occurs - } -} diff --git a/core/vm/contract.go b/core/vm/contract.go index 7d6d7daa6ba..2af9a15631f 100644 --- a/core/vm/contract.go +++ b/core/vm/contract.go @@ -19,6 +19,7 @@ package vm import ( "github.com/holiman/uint256" libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/core/tracing" ) // ContractRef is a reference to the contract's backing object @@ -166,14 +167,28 @@ func (c *Contract) Caller() libcommon.Address { } // UseGas attempts the use gas and subtracts it and returns true on success -func (c *Contract) UseGas(gas uint64) (ok bool) { +func (c *Contract) UseGas(gas uint64, logger *tracing.Hooks, reason tracing.GasChangeReason) (ok bool) { if c.Gas < gas { return false } + if logger != nil && logger.OnGasChange != nil && reason != tracing.GasChangeIgnored { + logger.OnGasChange(c.Gas, c.Gas-gas, reason) + } c.Gas -= gas return true } +// RefundGas refunds gas to the contract +func (c *Contract) RefundGas(gas uint64, logger *tracing.Hooks, reason tracing.GasChangeReason) { + if gas == 0 { + return + } + if logger != nil && logger.OnGasChange != nil && reason != tracing.GasChangeIgnored { + logger.OnGasChange(c.Gas, c.Gas+gas, reason) + } + c.Gas += gas +} + // Address returns the contracts address func (c *Contract) Address() libcommon.Address { return c.self diff --git a/core/vm/contracts.go b/core/vm/contracts.go index d1a9d4c809c..9494230faaf 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -35,6 +35,7 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/math" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/crypto/bn256" "github.com/ledgerwatch/erigon/crypto/secp256r1" @@ -209,12 +210,15 @@ func ActivePrecompiles(rules *chain.Rules) []libcommon.Address { // - the returned bytes, // - the _remaining_ gas, // - any error that occurred -func RunPrecompiledContract(p PrecompiledContract, input []byte, suppliedGas uint64, +func RunPrecompiledContract(p PrecompiledContract, input []byte, suppliedGas uint64, logger *tracing.Hooks, ) (ret []byte, remainingGas uint64, err error) { gasCost := p.RequiredGas(input) if suppliedGas < gasCost { return nil, 0, ErrOutOfGas } + if logger != nil && logger.OnGasChange != nil { + logger.OnGasChange(suppliedGas, suppliedGas-gasCost, tracing.GasChangeCallPrecompiledContract) + } suppliedGas -= gasCost output, err := p.Run(input) return output, suppliedGas, err @@ -1121,6 +1125,9 @@ func (c *bls12381MapFpToG1) Run(input []byte) ([]byte, error) { // Compute mapping r := bls12381.MapToG1(fe) + if err != nil { + return nil, err + } // Encode the G1 point to 128 bytes return encodePointG1(&r), nil @@ -1154,6 +1161,9 @@ func (c *bls12381MapFp2ToG2) Run(input []byte) ([]byte, error) { // Compute mapping r := bls12381.MapToG2(bls12381.E2{A0: c0, A1: c1}) + if err != nil { + return nil, err + } // Encode the G2 point to 256 bytes return encodePointG2(&r), nil diff --git a/core/vm/contracts_test.go b/core/vm/contracts_test.go index a20e92ac903..ae7718444e4 100644 --- a/core/vm/contracts_test.go +++ b/core/vm/contracts_test.go @@ -101,7 +101,7 @@ func testPrecompiled(t *testing.T, addr string, test precompiledTest) { gas := p.RequiredGas(in) t.Run(fmt.Sprintf("%s-Gas=%d", test.Name, gas), func(t *testing.T) { t.Parallel() - if res, _, err := RunPrecompiledContract(p, in, gas); err != nil { + if res, _, err := RunPrecompiledContract(p, in, gas, nil); err != nil { t.Error(err) } else if common.Bytes2Hex(res) != test.Expected { t.Errorf("Expected %v, got %v", test.Expected, common.Bytes2Hex(res)) @@ -124,7 +124,7 @@ func testPrecompiledOOG(t *testing.T, addr string, test precompiledTest) { t.Run(fmt.Sprintf("%s-Gas=%d", test.Name, gas), func(t *testing.T) { t.Parallel() - _, _, err := RunPrecompiledContract(p, in, gas) + _, _, err := RunPrecompiledContract(p, in, gas, nil) if err.Error() != "out of gas" { t.Errorf("Expected error [out of gas], got [%v]", err) } @@ -142,7 +142,7 @@ func testPrecompiledFailure(addr string, test precompiledFailureTest, t *testing gas := p.RequiredGas(in) t.Run(test.Name, func(t *testing.T) { t.Parallel() - _, _, err := RunPrecompiledContract(p, in, gas) + _, _, err := RunPrecompiledContract(p, in, gas, nil) if err.Error() != test.ExpectedError { t.Errorf("Expected error [%v], got [%v]", test.ExpectedError, err) } @@ -174,7 +174,7 @@ func benchmarkPrecompiled(b *testing.B, addr string, test precompiledTest) { bench.ResetTimer() for i := 0; i < bench.N; i++ { copy(data, in) - res, _, err = RunPrecompiledContract(p, data, reqGas) + res, _, err = RunPrecompiledContract(p, data, reqGas, nil) } bench.StopTimer() elapsed := uint64(time.Since(start)) diff --git a/core/vm/eips.go b/core/vm/eips.go index c05c41006fb..8d48f1a7b33 100644 --- a/core/vm/eips.go +++ b/core/vm/eips.go @@ -29,7 +29,6 @@ import ( ) var activators = map[int]func(*JumpTable){ - 2935: enable2935, 7516: enable7516, 6780: enable6780, 5656: enable5656, @@ -328,14 +327,3 @@ func enable7516(jt *JumpTable) { numPush: 1, } } - -// enable2935 applies EIP-2935 (Historical block hashes in state) -func enable2935(jt *JumpTable) { - jt[BLOCKHASH] = &operation{ - execute: opBlockhash2935, - constantGas: GasExtStep, - dynamicGas: gasOpBlockhashEIP2935, - numPop: 1, - numPush: 1, - } -} diff --git a/core/vm/errors.go b/core/vm/errors.go index a45a7b0a928..7fea84a0197 100644 --- a/core/vm/errors.go +++ b/core/vm/errors.go @@ -19,6 +19,7 @@ package vm import ( "errors" "fmt" + "math" ) // List evm execution errors @@ -75,3 +76,132 @@ type ErrInvalidOpCode struct { } func (e *ErrInvalidOpCode) Error() string { return fmt.Sprintf("invalid opcode: %s", e.opcode) } + +// rpcError is the same interface as the one defined in rpc/errors.go +// but we do not want to depend on rpc package here so we redefine it. +// +// It's used to ensure that the VMError implements the RPC error interface. +type rpcError interface { + Error() string // returns the message + ErrorCode() int // returns the code +} + +var _ rpcError = (*VMError)(nil) + +// VMError wraps a VM error with an additional stable error code. The error +// field is the original error that caused the VM error and must be one of the +// VM error defined at the top of this file. +// +// If the error is not one of the known error above, the error code will be +// set to VMErrorCodeUnknown. +type VMError struct { + error + code int +} + +func VMErrorFromErr(err error) error { + if err == nil { + return nil + } + + return &VMError{ + error: fmt.Errorf("%w", err), + code: vmErrorCodeFromErr(err), + } +} + +func (e *VMError) Error() string { + return e.error.Error() +} + +func (e *VMError) Unwrap() error { + return e.error +} + +func (e *VMError) ErrorCode() int { + return e.code +} + +const ( + // We start the error code at 1 so that we can use 0 later for some possible extension. There + // is no unspecified value for the code today because it should always be set to a valid value + // that could be VMErrorCodeUnknown if the error is not mapped to a known error code. + + VMErrorCodeOutOfGas = 1 + iota + VMErrorCodeCodeStoreOutOfGas + VMErrorCodeDepth + VMErrorCodeInsufficientBalance + VMErrorCodeContractAddressCollision + VMErrorCodeExecutionReverted + VMErrorCodeMaxInitCodeSizeExceeded + VMErrorCodeMaxCodeSizeExceeded + VMErrorCodeInvalidJump + VMErrorCodeWriteProtection + VMErrorCodeReturnDataOutOfBounds + VMErrorCodeGasUintOverflow + VMErrorCodeInvalidCode + VMErrorCodeNonceUintOverflow + VMErrorCodeStackUnderflow + VMErrorCodeStackOverflow + VMErrorCodeInvalidOpCode + VMErrorInvalidSubroutineEntry + VMErrorInvalidRetsub + VMErrorReturnStackExceeded + + // VMErrorCodeUnknown explicitly marks an error as unknown, this is useful when error is converted + // from an actual `error` in which case if the mapping is not known, we can use this value to indicate that. + VMErrorCodeUnknown = math.MaxInt - 1 +) + +func vmErrorCodeFromErr(err error) int { + switch { + case errors.Is(err, ErrOutOfGas): + return VMErrorCodeOutOfGas + case errors.Is(err, ErrCodeStoreOutOfGas): + return VMErrorCodeCodeStoreOutOfGas + case errors.Is(err, ErrDepth): + return VMErrorCodeDepth + case errors.Is(err, ErrInsufficientBalance): + return VMErrorCodeInsufficientBalance + case errors.Is(err, ErrContractAddressCollision): + return VMErrorCodeContractAddressCollision + case errors.Is(err, ErrExecutionReverted): + return VMErrorCodeExecutionReverted + case errors.Is(err, ErrMaxCodeSizeExceeded): + return VMErrorCodeMaxCodeSizeExceeded + case errors.Is(err, ErrInvalidJump): + return VMErrorCodeInvalidJump + case errors.Is(err, ErrWriteProtection): + return VMErrorCodeWriteProtection + case errors.Is(err, ErrReturnDataOutOfBounds): + return VMErrorCodeReturnDataOutOfBounds + case errors.Is(err, ErrGasUintOverflow): + return VMErrorCodeGasUintOverflow + case errors.Is(err, ErrInvalidCode): + return VMErrorCodeInvalidCode + case errors.Is(err, ErrNonceUintOverflow): + return VMErrorCodeNonceUintOverflow + case errors.Is(err, ErrInvalidSubroutineEntry): + return VMErrorInvalidSubroutineEntry + case errors.Is(err, ErrInvalidRetsub): + return VMErrorInvalidRetsub + case errors.Is(err, ErrReturnStackExceeded): + return VMErrorReturnStackExceeded + + default: + // Dynamic errors + if v := (*ErrStackUnderflow)(nil); errors.As(err, &v) { + return VMErrorCodeStackUnderflow + } + + if v := (*ErrStackOverflow)(nil); errors.As(err, &v) { + return VMErrorCodeStackOverflow + } + + if v := (*ErrInvalidOpCode)(nil); errors.As(err, &v) { + return VMErrorCodeInvalidOpCode + } + + return VMErrorCodeUnknown + } +} diff --git a/core/vm/evm.go b/core/vm/evm.go index f2ae81cd775..675c8af2d08 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -17,6 +17,7 @@ package vm import ( + "errors" "sync/atomic" "github.com/holiman/uint256" @@ -25,6 +26,7 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/common/u256" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/vm/evmtypes" "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/params" @@ -164,6 +166,30 @@ func (evm *EVM) Interpreter() Interpreter { func (evm *EVM) call(typ OpCode, caller ContractRef, addr libcommon.Address, input []byte, gas uint64, value *uint256.Int, bailout bool) (ret []byte, leftOverGas uint64, err error) { depth := evm.interpreter.Depth() + p, isPrecompile := evm.precompile(addr) + + var code []byte + if !isPrecompile { + code = evm.intraBlockState.GetCode(addr) + } + + // Invoke tracer hooks that signal entering/exiting a call frame + if evm.Config().Tracer != nil { + v := value + if typ == STATICCALL { + v = nil + } else if typ == DELEGATECALL { + // NOTE: caller must, at all times be a contract. It should never happen + // that caller is something other than a Contract. + parent := caller.(*Contract) + // DELEGATECALL inherits value from parent call + v = parent.value + } + evm.captureBegin(depth, typ, caller.Address(), addr, isPrecompile, input, gas, v, code) + defer func(startGas uint64) { + evm.captureEnd(depth, typ, startGas, leftOverGas, ret, err) + }(gas) + } if evm.config.NoRecursion && depth > 0 { return nil, gas, nil @@ -180,31 +206,13 @@ func (evm *EVM) call(typ OpCode, caller ContractRef, addr libcommon.Address, inp } } } - p, isPrecompile := evm.precompile(addr) - var code []byte - if !isPrecompile { - code = evm.intraBlockState.GetCode(addr) - } snapshot := evm.intraBlockState.Snapshot() if typ == CALL { if !evm.intraBlockState.Exist(addr) { if !isPrecompile && evm.chainRules.IsSpuriousDragon && value.IsZero() { - if evm.config.Debug { - v := value - if typ == STATICCALL { - v = nil - } - // Calling a non existing account, don't do anything, but ping the tracer - if depth == 0 { - evm.config.Tracer.CaptureStart(evm, caller.Address(), addr, isPrecompile, false /* create */, input, gas, v, code) - evm.config.Tracer.CaptureEnd(ret, 0, nil) - } else { - evm.config.Tracer.CaptureEnter(typ, caller.Address(), addr, isPrecompile, false /* create */, input, gas, v, code) - evm.config.Tracer.CaptureExit(ret, 0, nil) - } - } + // Calling a non existing account, don't do anything return nil, gas, nil } evm.intraBlockState.CreateAccount(addr, false) @@ -215,29 +223,12 @@ func (evm *EVM) call(typ OpCode, caller ContractRef, addr libcommon.Address, inp // This doesn't matter on Mainnet, where all empties are gone at the time of Byzantium, // but is the correct thing to do and matters on other networks, in tests, and potential // future scenarios - evm.intraBlockState.AddBalance(addr, u256.Num0) - } - if evm.config.Debug { - v := value - if typ == STATICCALL { - v = nil - } - if depth == 0 { - evm.config.Tracer.CaptureStart(evm, caller.Address(), addr, isPrecompile, false /* create */, input, gas, v, code) - defer func(startGas uint64) { // Lazy evaluation of the parameters - evm.config.Tracer.CaptureEnd(ret, startGas-gas, err) - }(gas) - } else { - evm.config.Tracer.CaptureEnter(typ, caller.Address(), addr, isPrecompile, false /* create */, input, gas, v, code) - defer func(startGas uint64) { // Lazy evaluation of the parameters - evm.config.Tracer.CaptureExit(ret, startGas-gas, err) - }(gas) - } + evm.intraBlockState.AddBalance(addr, u256.Num0, tracing.BalanceChangeTouchAccount) } // It is allowed to call precompiles, even via delegatecall if isPrecompile { - ret, gas, err = RunPrecompiledContract(p, input, gas) + ret, gas, err = RunPrecompiledContract(p, input, gas, evm.Config().Tracer) } else if len(code) == 0 { // If the account has no code, we can abort here // The depth-check is already done, and precompiles handled above @@ -272,6 +263,9 @@ func (evm *EVM) call(typ OpCode, caller ContractRef, addr libcommon.Address, inp if err != nil || evm.config.RestoreState { evm.intraBlockState.RevertToSnapshot(snapshot) if err != ErrExecutionReverted { + if evm.config.Tracer != nil && evm.config.Tracer.OnGasChange != nil { + evm.Config().Tracer.OnGasChange(gas, 0, tracing.GasChangeCallFailedExecution) + } gas = 0 } // TODO: consider clearing up unused snapshots: @@ -338,24 +332,14 @@ func (evm *EVM) OverlayCreate(caller ContractRef, codeAndHash *codeAndHash, gas } // create creates a new contract using code as deployment code. -func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gasRemaining uint64, value *uint256.Int, address libcommon.Address, typ OpCode, incrementNonce bool) ([]byte, libcommon.Address, uint64, error) { - var ret []byte - var err error - var gasConsumption uint64 +func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gasRemaining uint64, value *uint256.Int, address libcommon.Address, typ OpCode, incrementNonce bool) (ret []byte, createAddress libcommon.Address, leftOverGas uint64, err error) { depth := evm.interpreter.Depth() - if evm.config.Debug { - if depth == 0 { - evm.config.Tracer.CaptureStart(evm, caller.Address(), address, false /* precompile */, true /* create */, codeAndHash.code, gasRemaining, value, nil) - defer func() { - evm.config.Tracer.CaptureEnd(ret, gasConsumption, err) - }() - } else { - evm.config.Tracer.CaptureEnter(typ, caller.Address(), address, false /* precompile */, true /* create */, codeAndHash.code, gasRemaining, value, nil) - defer func() { - evm.config.Tracer.CaptureExit(ret, gasConsumption, err) - }() - } + if evm.Config().Tracer != nil { + evm.captureBegin(depth, typ, caller.Address(), address, false, codeAndHash.code, gasRemaining, value, nil) + defer func(startGas uint64) { + evm.captureEnd(depth, typ, startGas, leftOverGas, ret, err) + }(gasRemaining) } // Depth check execution. Fail if we're trying to execute above the @@ -385,6 +369,9 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gasRemainin contractHash := evm.intraBlockState.GetCodeHash(address) if evm.intraBlockState.GetNonce(address) != 0 || (contractHash != (libcommon.Hash{}) && contractHash != emptyCodeHash) { err = ErrContractAddressCollision + if evm.config.Tracer != nil && evm.config.Tracer.OnGasChange != nil { + evm.Config().Tracer.OnGasChange(gasRemaining, 0, tracing.GasChangeCallFailedExecution) + } return nil, libcommon.Address{}, 0, err } // Create a new account on the state @@ -425,7 +412,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gasRemainin // by the error checking condition below. if err == nil { createDataGas := uint64(len(ret)) * params.CreateDataGas - if contract.UseGas(createDataGas) { + if contract.UseGas(createDataGas, evm.Config().Tracer, tracing.GasChangeCallCodeStorage) { evm.intraBlockState.SetCode(address, ret) } else if evm.chainRules.IsHomestead { err = ErrCodeStoreOutOfGas @@ -438,13 +425,10 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gasRemainin if err != nil && (evm.chainRules.IsHomestead || err != ErrCodeStoreOutOfGas) { evm.intraBlockState.RevertToSnapshot(snapshot) if err != ErrExecutionReverted { - contract.UseGas(contract.Gas) + contract.UseGas(contract.Gas, evm.Config().Tracer, tracing.GasChangeCallFailedExecution) } } - // calculate gasConsumption for deferred captures - gasConsumption = gasRemaining - contract.Gas - return ret, address, contract.Gas, err } @@ -492,3 +476,49 @@ func (evm *EVM) ChainRules() *chain.Rules { func (evm *EVM) IntraBlockState() evmtypes.IntraBlockState { return evm.intraBlockState } + +func (evm *EVM) captureBegin(depth int, typ OpCode, from libcommon.Address, to libcommon.Address, precompile bool, input []byte, startGas uint64, value *uint256.Int, code []byte) { + tracer := evm.Config().Tracer + + if tracer.OnEnter != nil { + tracer.OnEnter(depth, byte(typ), from, to, precompile, input, startGas, value, code) + } + if tracer.OnGasChange != nil { + tracer.OnGasChange(0, startGas, tracing.GasChangeCallInitialBalance) + } +} + +func (evm *EVM) captureEnd(depth int, typ OpCode, startGas uint64, leftOverGas uint64, ret []byte, err error) { + tracer := evm.Config().Tracer + + if leftOverGas != 0 && tracer.OnGasChange != nil { + tracer.OnGasChange(leftOverGas, 0, tracing.GasChangeCallLeftOverReturned) + } + + var reverted bool + if err != nil { + reverted = true + } + if !evm.chainRules.IsHomestead && errors.Is(err, ErrCodeStoreOutOfGas) { + reverted = false + } + + if tracer.OnExit != nil { + tracer.OnExit(depth, ret, startGas-leftOverGas, VMErrorFromErr(err), reverted) + } +} + +// GetVMContext provides context about the block being executed as well as state +// to the tracers. +func (evm *EVM) GetVMContext() *tracing.VMContext { + return &tracing.VMContext{ + Coinbase: evm.Context.Coinbase, + BlockNumber: evm.Context.BlockNumber, + Time: evm.Context.Time, + Random: evm.Context.PrevRanDao, + GasPrice: evm.TxContext.GasPrice, + ChainConfig: evm.ChainConfig(), + IntraBlockState: evm.IntraBlockState(), + TxHash: evm.TxHash, + } +} diff --git a/core/vm/evmtypes/evmtypes.go b/core/vm/evmtypes/evmtypes.go index 4b919f6b3e3..a03be566ad9 100644 --- a/core/vm/evmtypes/evmtypes.go +++ b/core/vm/evmtypes/evmtypes.go @@ -9,6 +9,7 @@ import ( "github.com/ledgerwatch/erigon-lib/common" types2 "github.com/ledgerwatch/erigon-lib/types" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/types" ) @@ -59,8 +60,8 @@ type ( type IntraBlockState interface { CreateAccount(common.Address, bool) - SubBalance(common.Address, *uint256.Int) - AddBalance(common.Address, *uint256.Int) + SubBalance(common.Address, *uint256.Int, tracing.BalanceChangeReason) + AddBalance(common.Address, *uint256.Int, tracing.BalanceChangeReason) GetBalance(common.Address) *uint256.Int GetNonce(common.Address) uint64 diff --git a/core/vm/gas_table_test.go b/core/vm/gas_table_test.go index 83d9088e472..b0c0a8850c9 100644 --- a/core/vm/gas_table_test.go +++ b/core/vm/gas_table_test.go @@ -146,7 +146,7 @@ var createGasTests = []struct { func TestCreateGas(t *testing.T) { t.Parallel() - db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + _, db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) for i, tt := range createGasTests { address := libcommon.BytesToAddress([]byte("contract")) @@ -164,8 +164,8 @@ func TestCreateGas(t *testing.T) { defer domains.Close() txc.Doms = domains - stateReader = rpchelper.NewLatestStateReader(tx) - stateWriter = rpchelper.NewLatestStateWriter(txc, 0) + stateReader = rpchelper.NewLatestStateReader(tx, true) + stateWriter = rpchelper.NewLatestStateWriter(txc, 0, true) s := state.New(stateReader) s.CreateAccount(address, true) diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 21f9bf24d15..dc0885ffbc1 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -27,6 +27,7 @@ import ( "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/params" ) @@ -464,60 +465,28 @@ func opGasprice(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ return nil, nil } -// opBlockhash executes the BLOCKHASH opcode pre-EIP-2935 func opBlockhash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { - arg := scope.Stack.Peek() - arg64, overflow := arg.Uint64WithOverflow() + num := scope.Stack.Peek() + num64, overflow := num.Uint64WithOverflow() if overflow { - arg.Clear() + num.Clear() return nil, nil } var upper, lower uint64 upper = interpreter.evm.Context.BlockNumber - if upper <= params.BlockHashOldWindow { + if upper < 257 { lower = 0 } else { - lower = upper - params.BlockHashOldWindow + lower = upper - 256 } - if arg64 >= lower && arg64 < upper { - arg.SetBytes(interpreter.evm.Context.GetHash(arg64).Bytes()) + if num64 >= lower && num64 < upper { + num.SetBytes(interpreter.evm.Context.GetHash(num64).Bytes()) } else { - arg.Clear() + num.Clear() } return nil, nil } -// opBlockhash2935 executes for the BLOCKHASH opcode post EIP-2935 by returning the -// corresponding hash for the blocknumber from the state, if within range. -// The range is defined by [head - params.BlockHashHistoryServeWindow - 1, head - 1] -// This should not be used without activating EIP-2935 -func opBlockhash2935(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { - arg := scope.Stack.Peek() - arg64, overflow := arg.Uint64WithOverflow() - if overflow { - arg.Clear() - return nil, nil - } - - // Check if arg is within allowed window - var upper uint64 - upper = interpreter.evm.Context.BlockNumber - if arg64 >= upper || arg64+params.BlockHashHistoryServeWindow < upper { - arg.Clear() - return nil, nil - } - - // Return state read value from the slot - storageSlot := libcommon.BytesToHash(uint256.NewInt(arg64 % params.BlockHashHistoryServeWindow).Bytes()) - interpreter.evm.intraBlockState.GetState( - params.HistoryStorageAddress, - &storageSlot, - arg, - ) - - return nil, nil -} - func opCoinbase(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { scope.Stack.Push(new(uint256.Int).SetBytes(interpreter.evm.Context.Coinbase.Bytes())) return nil, nil @@ -684,7 +653,7 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b // reuse size int for stackvalue stackvalue := size - scope.Contract.UseGas(gas) + scope.Contract.UseGas(gas, interpreter.evm.Config().Tracer, tracing.GasChangeCallContractCreation) res, addr, returnGas, suberr := interpreter.evm.Create(scope.Contract, input, gas, &value) @@ -699,7 +668,8 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b } else { stackvalue.SetBytes(addr.Bytes()) } - scope.Contract.Gas += returnGas + + scope.Contract.RefundGas(returnGas, interpreter.evm.config.Tracer, tracing.GasChangeCallLeftOverRefunded) if suberr == ErrExecutionReverted { interpreter.returnData = res // set REVERT data to return data buffer @@ -723,7 +693,7 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([] // Apply EIP150 gas -= gas / 64 - scope.Contract.UseGas(gas) + scope.Contract.UseGas(gas, interpreter.evm.Config().Tracer, tracing.GasChangeCallContractCreation2) // reuse size int for stackvalue stackValue := size res, addr, returnGas, suberr := interpreter.evm.Create2(scope.Contract, input, gas, &endowment, &salt) @@ -734,9 +704,9 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([] } else { stackValue.SetBytes(addr.Bytes()) } - scope.Stack.Push(&stackValue) - scope.Contract.Gas += returnGas + + scope.Contract.RefundGas(returnGas, interpreter.evm.config.Tracer, tracing.GasChangeCallLeftOverRefunded) if suberr == ErrExecutionReverted { interpreter.returnData = res // set REVERT data to return data buffer @@ -778,7 +748,7 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } - scope.Contract.Gas += returnGas + scope.Contract.RefundGas(returnGas, interpreter.evm.config.Tracer, tracing.GasChangeCallLeftOverRefunded) interpreter.returnData = ret return ret, nil @@ -812,7 +782,7 @@ func opCallCode(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } - scope.Contract.Gas += returnGas + scope.Contract.RefundGas(returnGas, interpreter.evm.config.Tracer, tracing.GasChangeCallLeftOverRefunded) interpreter.returnData = ret return ret, nil @@ -842,7 +812,7 @@ func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } - scope.Contract.Gas += returnGas + scope.Contract.RefundGas(returnGas, interpreter.evm.config.Tracer, tracing.GasChangeCallLeftOverRefunded) interpreter.returnData = ret return ret, nil @@ -872,7 +842,7 @@ func opStaticCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) scope.Memory.Set(retOffset.Uint64(), retSize.Uint64(), ret) } - scope.Contract.Gas += returnGas + scope.Contract.RefundGas(returnGas, interpreter.evm.config.Tracer, tracing.GasChangeCallLeftOverRefunded) interpreter.returnData = ret return ret, nil @@ -906,15 +876,17 @@ func opSelfdestruct(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext beneficiary := scope.Stack.Pop() callerAddr := scope.Contract.Address() beneficiaryAddr := libcommon.Address(beneficiary.Bytes20()) - balance := interpreter.evm.IntraBlockState().GetBalance(callerAddr) - if interpreter.evm.Config().Debug { - if interpreter.cfg.Debug { - interpreter.cfg.Tracer.CaptureEnter(SELFDESTRUCT, callerAddr, beneficiaryAddr, false /* precompile */, false /* create */, []byte{}, 0, balance, nil /* code */) - interpreter.cfg.Tracer.CaptureExit([]byte{}, 0, nil) + balance := *interpreter.evm.IntraBlockState().GetBalance(callerAddr) + interpreter.evm.IntraBlockState().AddBalance(beneficiaryAddr, &balance, tracing.BalanceIncreaseSelfdestruct) + interpreter.evm.IntraBlockState().Selfdestruct(callerAddr) + if interpreter.evm.Config().Tracer != nil { + if interpreter.evm.Config().Tracer.OnEnter != nil { + interpreter.evm.Config().Tracer.OnEnter(interpreter.depth, byte(SELFDESTRUCT), scope.Contract.Address(), beneficiary.Bytes20(), false, []byte{}, 0, &balance, nil) + } + if interpreter.evm.Config().Tracer.OnExit != nil { + interpreter.evm.Config().Tracer.OnExit(interpreter.depth, []byte{}, 0, nil, false) } } - interpreter.evm.IntraBlockState().AddBalance(beneficiaryAddr, balance) - interpreter.evm.IntraBlockState().Selfdestruct(callerAddr) return nil, errStopToken } @@ -926,15 +898,17 @@ func opSelfdestruct6780(pc *uint64, interpreter *EVMInterpreter, scope *ScopeCon callerAddr := scope.Contract.Address() beneficiaryAddr := libcommon.Address(beneficiary.Bytes20()) balance := *interpreter.evm.IntraBlockState().GetBalance(callerAddr) - if interpreter.evm.Config().Debug { - if interpreter.cfg.Debug { - interpreter.cfg.Tracer.CaptureEnter(SELFDESTRUCT, callerAddr, beneficiaryAddr, false /* precompile */, false /* create */, []byte{}, 0, &balance, nil /* code */) - interpreter.cfg.Tracer.CaptureExit([]byte{}, 0, nil) + interpreter.evm.IntraBlockState().SubBalance(callerAddr, &balance, tracing.BalanceDecreaseSelfdestruct) + interpreter.evm.IntraBlockState().AddBalance(beneficiaryAddr, &balance, tracing.BalanceIncreaseSelfdestruct) + interpreter.evm.IntraBlockState().Selfdestruct6780(callerAddr) + if interpreter.evm.Config().Tracer != nil { + if interpreter.cfg.Tracer.OnEnter != nil { + interpreter.cfg.Tracer.OnEnter(interpreter.depth, byte(SELFDESTRUCT), scope.Contract.Address(), beneficiary.Bytes20(), false, []byte{}, 0, &balance, nil) + } + if interpreter.cfg.Tracer.OnExit != nil { + interpreter.cfg.Tracer.OnExit(interpreter.depth, []byte{}, 0, nil, false) } } - interpreter.evm.IntraBlockState().SubBalance(callerAddr, &balance) - interpreter.evm.IntraBlockState().AddBalance(beneficiaryAddr, &balance) - interpreter.evm.IntraBlockState().Selfdestruct6780(callerAddr) return nil, errStopToken } diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index 04f6b7f4e37..9e8b8543602 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -20,27 +20,30 @@ import ( "hash" "sync" + "github.com/holiman/uint256" "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/erigon-lib/common" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/math" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/vm/stack" ) // Config are the configuration options for the Interpreter type Config struct { - Debug bool // Enables debugging - Tracer EVMLogger // Opcode logger - NoRecursion bool // Disables call, callcode, delegate call and create - NoBaseFee bool // Forces the EIP-1559 baseFee to 0 (needed for 0 price calls) - SkipAnalysis bool // Whether we can skip jumpdest analysis based on the checked history - TraceJumpDest bool // Print transaction hashes where jumpdest analysis was useful - NoReceipts bool // Do not calculate receipts - ReadOnly bool // Do no perform any block finalisation - StatelessExec bool // true is certain conditions (like state trie root hash matching) need to be relaxed for stateless EVM execution - RestoreState bool // Revert all changes made to the state (useful for constant system calls) + Debug bool // Enables debugging + Tracer *tracing.Hooks + NoRecursion bool // Disables call, callcode, delegate call and create + NoBaseFee bool // Forces the EIP-1559 baseFee to 0 (needed for 0 price calls) + SkipAnalysis bool // Whether we can skip jumpdest analysis based on the checked history + TraceJumpDest bool // Print transaction hashes where jumpdest analysis was useful + NoReceipts bool // Do not calculate receipts + ReadOnly bool // Do no perform any block finalisation + StatelessExec bool // true is certain conditions (like state trie root hash matching) need to be relaxed for stateless EVM execution + RestoreState bool // Revert all changes made to the state (useful for constant system calls) ExtraEips []int // Additional EIPS that are to be enabled } @@ -81,6 +84,53 @@ type ScopeContext struct { Contract *Contract } +// MemoryData returns the underlying memory slice. Callers must not modify the contents +// of the returned data. +func (ctx *ScopeContext) MemoryData() []byte { + if ctx.Memory == nil { + return nil + } + return ctx.Memory.Data() +} + +// MemoryData returns the stack data. Callers must not modify the contents +// of the returned data. +func (ctx *ScopeContext) StackData() []uint256.Int { + if ctx.Stack == nil { + return nil + } + return ctx.Stack.Data +} + +// Caller returns the current caller. +func (ctx *ScopeContext) Caller() common.Address { + return ctx.Contract.Caller() +} + +// Address returns the address where this scope of execution is taking place. +func (ctx *ScopeContext) Address() common.Address { + return ctx.Contract.Address() +} + +// CallValue returns the value supplied with this call. +func (ctx *ScopeContext) CallValue() *uint256.Int { + return ctx.Contract.Value() +} + +// CallInput returns the input/calldata with this call. Callers must not modify +// the contents of the returned data. +func (ctx *ScopeContext) CallInput() []byte { + return ctx.Contract.Input +} + +func (ctx *ScopeContext) Code() []byte { + return ctx.Contract.Code +} + +func (ctx *ScopeContext) CodeHash() libcommon.Hash { + return ctx.Contract.CodeHash +} + // keccakState wraps sha3.state. In addition to the usual hash methods, it also supports // Read to get a variable amount of data from the hash state. Read is faster than Sum // because it doesn't copy the internal state, but also modifies the internal state. @@ -226,11 +276,12 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( in.depth++ defer func() { // first: capture data/memory/state/depth/etc... then clenup them - if in.cfg.Debug && err != nil { - if !logged { - in.cfg.Tracer.CaptureState(pcCopy, op, gasCopy, cost, callContext, in.returnData, in.depth, err) //nolint:errcheck - } else { - in.cfg.Tracer.CaptureFault(pcCopy, op, gasCopy, cost, callContext, in.depth, err) + if in.cfg.Tracer != nil && err != nil { + if !logged && in.evm.config.Tracer.OnOpcode != nil { + in.evm.config.Tracer.OnOpcode(pcCopy, byte(op), gasCopy, cost, callContext, in.returnData, in.depth, VMErrorFromErr(err)) + } + if logged && in.evm.config.Tracer.OnFault != nil { + in.evm.config.Tracer.OnFault(pcCopy, byte(op), gasCopy, cost, callContext, in.depth, VMErrorFromErr(err)) } } // this function must execute _after_: the `CaptureState` needs the stacks before @@ -252,7 +303,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( if steps%1000 == 0 && in.evm.Cancelled() { break } - if in.cfg.Debug { + if in.cfg.Debug || in.cfg.Tracer != nil { // Capture pre-execution values for tracing. logged, pcCopy, gasCopy = false, _pc, contract.Gas } @@ -267,7 +318,7 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( } else if sLen > operation.maxStack { return nil, &ErrStackOverflow{stackLen: sLen, limit: operation.maxStack} } - if !contract.UseGas(cost) { + if !contract.UseGas(cost, in.cfg.Tracer, tracing.GasChangeIgnored) { return nil, ErrOutOfGas } if operation.dynamicGas != nil { @@ -293,20 +344,27 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( var dynamicCost uint64 dynamicCost, err = operation.dynamicGas(in.evm, contract, locStack, mem, memorySize) cost += dynamicCost // for tracing - if err != nil || !contract.UseGas(dynamicCost) { + if err != nil || !contract.UseGas(dynamicCost, in.cfg.Tracer, tracing.GasChangeIgnored) { return nil, ErrOutOfGas } // Do tracing before memory expansion - if in.cfg.Debug { - in.cfg.Tracer.CaptureState(_pc, op, gasCopy, cost, callContext, in.returnData, in.depth, err) //nolint:errcheck - logged = true + if in.cfg.Tracer != nil { + if in.evm.config.Tracer.OnOpcode != nil { + in.evm.config.Tracer.OnOpcode(_pc, byte(op), gasCopy, cost, callContext, in.returnData, in.depth, VMErrorFromErr(err)) + logged = true + } } if memorySize > 0 { mem.Resize(memorySize) } - } else if in.cfg.Debug { - in.cfg.Tracer.CaptureState(_pc, op, gasCopy, cost, callContext, in.returnData, in.depth, err) //nolint:errcheck - logged = true + } else if in.cfg.Tracer != nil { + if in.evm.config.Tracer.OnGasChange != nil { + in.evm.config.Tracer.OnGasChange(gasCopy, gasCopy-cost, tracing.GasChangeCallOpCode) + } + if in.evm.config.Tracer.OnOpcode != nil { + in.evm.config.Tracer.OnOpcode(_pc, byte(op), gasCopy, cost, callContext, in.returnData, in.depth, VMErrorFromErr(err)) + logged = true + } } // execute the operation res, err = operation.execute(pc, in, callContext) diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index 82c43dd3167..806ae494133 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -92,7 +92,6 @@ func validateAndFillMaxStack(jt *JumpTable) { // cancun, and prague instructions. func newPragueInstructionSet() JumpTable { instructionSet := newCancunInstructionSet() - enable2935(&instructionSet) validateAndFillMaxStack(&instructionSet) return instructionSet } diff --git a/core/vm/logger.go b/core/vm/logger.go deleted file mode 100644 index 5677233f97a..00000000000 --- a/core/vm/logger.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package vm - -import ( - "github.com/holiman/uint256" - libcommon "github.com/ledgerwatch/erigon-lib/common" - - "github.com/ledgerwatch/erigon/core/types" -) - -// EVMLogger is used to collect execution traces from an EVM transaction -// execution. CaptureState is called for each step of the VM with the -// current VM state. -// Note that reference types are actual VM data structures; make copies -// if you need to retain them beyond the current call. -type EVMLogger interface { - // Transaction level - CaptureTxStart(gasLimit uint64) - CaptureTxEnd(restGas uint64) - // Top call frame - CaptureStart(env *EVM, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) - CaptureEnd(output []byte, usedGas uint64, err error) - // Rest of the frames - CaptureEnter(typ OpCode, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) - CaptureExit(output []byte, usedGas uint64, err error) - // Opcode level - CaptureState(pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, rData []byte, depth int, err error) - CaptureFault(pc uint64, op OpCode, gas, cost uint64, scope *ScopeContext, depth int, err error) -} - -// FlushableTracer is a Tracer extension whose accumulated traces has to be -// flushed once the tracing is completed. -type FlushableTracer interface { - EVMLogger - Flush(tx types.Transaction) -} diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go index 1e1b68c6995..678891adda9 100644 --- a/core/vm/operations_acl.go +++ b/core/vm/operations_acl.go @@ -23,6 +23,7 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/math" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/vm/stack" "github.com/ledgerwatch/erigon/params" ) @@ -163,7 +164,7 @@ func makeCallVariantGasCallEIP2929(oldCalculator gasFunc) gasFunc { if addrMod { // Charge the remaining difference here already, to correctly calculate available // gas for call - if !contract.UseGas(coldCost) { + if !contract.UseGas(coldCost, evm.Config().Tracer, tracing.GasChangeCallStorageColdAccess) { return 0, ErrOutOfGas } } @@ -235,22 +236,3 @@ func makeSelfdestructGasFn(refundsEnabled bool) gasFunc { } return gasFunc } - -// gasOpBlockhashEIP2935 returns the gas for the new BLOCKHASH operation post EIP-2935 -// If arg is outside of the params.BlockHashHistoryServeWindow, zero dynamic gas is returned -// EIP-2929 Cold/Warm storage read cost is applicable here similar to SLOAD -func gasOpBlockhashEIP2935(evm *EVM, contract *Contract, stack *stack.Stack, mem *Memory, memorySize uint64) (uint64, error) { - arg := stack.Peek() - arg64, overflow := arg.Uint64WithOverflow() - if overflow { - return 0, nil - } - if arg64 >= evm.Context.BlockNumber || arg64+params.BlockHashHistoryServeWindow < evm.Context.BlockNumber { - return 0, nil - } - storageSlot := libcommon.BytesToHash(uint256.NewInt(arg64 % params.BlockHashHistoryServeWindow).Bytes()) - if _, slotMod := evm.IntraBlockState().AddSlotToAccessList(params.HistoryStorageAddress, storageSlot); slotMod { - return params.ColdSloadCostEIP2929, nil - } - return params.WarmStorageReadCostEIP2929, nil -} diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go index cec1e7078b1..b1bd8656dcc 100644 --- a/core/vm/runtime/runtime.go +++ b/core/vm/runtime/runtime.go @@ -28,6 +28,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/crypto" ) @@ -111,8 +112,8 @@ func setDefaults(cfg *Config) { func Execute(code, input []byte, cfg *Config, bn uint64) ([]byte, *state.IntraBlockState, error) { if cfg == nil { cfg = new(Config) - setDefaults(cfg) } + setDefaults(cfg) externalState := cfg.State != nil var tx kv.RwTx @@ -135,6 +136,9 @@ func Execute(code, input []byte, cfg *Config, bn uint64) ([]byte, *state.IntraBl sender = vm.AccountRef(cfg.Origin) rules = vmenv.ChainRules() ) + if cfg.EVMConfig.Tracer != nil && cfg.EVMConfig.Tracer.OnTxStart != nil { + cfg.EVMConfig.Tracer.OnTxStart(vmenv.GetVMContext(), types.NewTransaction(0, address, cfg.Value, cfg.GasLimit, cfg.GasPrice, input), cfg.Origin) + } cfg.State.Prepare(rules, cfg.Origin, cfg.Coinbase, &address, vm.ActivePrecompiles(rules), nil) cfg.State.CreateAccount(address, true) // set the receiver's (the executing contract) code for execution. @@ -179,6 +183,10 @@ func Create(input []byte, cfg *Config, blockNr uint64) ([]byte, libcommon.Addres sender = vm.AccountRef(cfg.Origin) rules = vmenv.ChainRules() ) + if cfg.EVMConfig.Tracer != nil && cfg.EVMConfig.Tracer.OnTxStart != nil { + cfg.EVMConfig.Tracer.OnTxStart(vmenv.GetVMContext(), types.NewContractCreation(0, cfg.Value, cfg.GasLimit, cfg.GasPrice, input), cfg.Origin) + } + cfg.State.Prepare(rules, cfg.Origin, cfg.Coinbase, nil, vm.ActivePrecompiles(rules), nil) // Call the code with the given configuration. @@ -204,6 +212,9 @@ func Call(address libcommon.Address, input []byte, cfg *Config) ([]byte, uint64, sender := cfg.State.GetOrNewStateObject(cfg.Origin) statedb := cfg.State rules := vmenv.ChainRules() + if cfg.EVMConfig.Tracer != nil && cfg.EVMConfig.Tracer.OnTxStart != nil { + cfg.EVMConfig.Tracer.OnTxStart(vmenv.GetVMContext(), types.NewTransaction(0, address, cfg.Value, cfg.GasLimit, cfg.GasPrice, input), cfg.Origin) + } statedb.Prepare(rules, cfg.Origin, cfg.Coinbase, &address, vm.ActivePrecompiles(rules), nil) // Call the code with the given configuration. diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go index 8553064707c..f4d8ffb801f 100644 --- a/core/vm/runtime/runtime_test.go +++ b/core/vm/runtime/runtime_test.go @@ -30,15 +30,12 @@ import ( "github.com/ledgerwatch/erigon/accounts/abi" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/consensus" - "github.com/ledgerwatch/erigon/consensus/misc" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/asm" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/tracers/logger" - "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/erigon/rlp" ) func TestDefaults(t *testing.T) { @@ -238,7 +235,7 @@ func fakeHeader(n uint64, parentHash libcommon.Hash) *types.Header { Coinbase: libcommon.HexToAddress("0x00000000000000000000000000000000deadbeef"), Number: big.NewInt(int64(n)), ParentHash: parentHash, - Time: n, + Time: 1000, Nonce: types.BlockNonce{0x1}, Extra: []byte{}, Difficulty: big.NewInt(0), @@ -247,45 +244,6 @@ func fakeHeader(n uint64, parentHash libcommon.Hash) *types.Header { return &header } -// FakeChainHeaderReader implements consensus.ChainHeaderReader interface -type FakeChainHeaderReader struct{} - -func (cr *FakeChainHeaderReader) GetHeaderByHash(hash libcommon.Hash) *types.Header { - return nil -} -func (cr *FakeChainHeaderReader) GetHeaderByNumber(number uint64) *types.Header { - return cr.GetHeaderByHash(libcommon.BigToHash(big.NewInt(int64(number)))) -} -func (cr *FakeChainHeaderReader) Config() *chain.Config { return nil } -func (cr *FakeChainHeaderReader) CurrentHeader() *types.Header { return nil } - -// GetHeader returns a fake header with the parentHash equal to the number - 1 -func (cr *FakeChainHeaderReader) GetHeader(hash libcommon.Hash, number uint64) *types.Header { - return &types.Header{ - Coinbase: libcommon.HexToAddress("0x00000000000000000000000000000000deadbeef"), - Number: big.NewInt(int64(number)), - ParentHash: libcommon.BigToHash(big.NewInt(int64(number - 1))), - Time: number, - Nonce: types.BlockNonce{0x1}, - Extra: []byte{}, - Difficulty: big.NewInt(0), - GasLimit: 100000, - } -} -func (cr *FakeChainHeaderReader) GetBlock(hash libcommon.Hash, number uint64) *types.Block { - return nil -} -func (cr *FakeChainHeaderReader) HasBlock(hash libcommon.Hash, number uint64) bool { return false } -func (cr *FakeChainHeaderReader) GetTd(hash libcommon.Hash, number uint64) *big.Int { return nil } -func (cr *FakeChainHeaderReader) FrozenBlocks() uint64 { return 0 } -func (cr *FakeChainHeaderReader) BorEventsByBlock(hash libcommon.Hash, number uint64) []rlp.RawValue { - return nil -} -func (cr *FakeChainHeaderReader) BorStartEventID(hash libcommon.Hash, number uint64) uint64 { - return 0 -} -func (cr *FakeChainHeaderReader) BorSpan(spanId uint64) []byte { return nil } - type dummyChain struct { counter int } @@ -355,14 +313,10 @@ func TestBlockhash(t *testing.T) { // The method call to 'test()' input := libcommon.Hex2Bytes("f8a8fd6d") chain := &dummyChain{} - cfg := &Config{ + ret, _, err := Execute(data, input, &Config{ GetHashFn: core.GetHashFn(header, chain.GetHeader), BlockNumber: new(big.Int).Set(header.Number), - Time: new(big.Int), - } - setDefaults(cfg) - cfg.ChainConfig.PragueTime = big.NewInt(1) - ret, _, err := Execute(data, input, cfg, header.Number.Uint64()) + }, header.Number.Uint64()) if err != nil { t.Fatalf("expected no error, got %v", err) } @@ -387,73 +341,6 @@ func TestBlockhash(t *testing.T) { } } -func TestBlockHashEip2935(t *testing.T) { - t.Parallel() - - // This is the contract we're using. It requests the blockhash for current num (should be all zeroes), We are fetching BlockHash for current block (should be zer0), parent block, last block which is supposed to be there (head - HISTORY_SERVE_WINDOW) and also one block before that (should be zero) - - /* - pragma solidity ^0.8.25; - contract BlockHashTestPrague{ - function test() public view returns (bytes32, bytes32, bytes32, bytes32){ - uint256 head = block.number; - bytes32 zero = blockhash(head); - bytes32 first = blockhash(head-1); - bytes32 last = blockhash(head - 8192); - bytes32 beyond = blockhash(head - 8193); - return (zero, first, last, beyond); - } - } - */ - // The contract above - data := libcommon.Hex2Bytes("608060405234801561000f575f80fd5b5060043610610029575f3560e01c8063f8a8fd6d1461002d575b5f80fd5b61003561004e565b60405161004594939291906100bf565b60405180910390f35b5f805f805f4390505f814090505f6001836100699190610138565b4090505f6120008461007b9190610138565b4090505f6120018561008d9190610138565b409050838383839850985098509850505050505090919293565b5f819050919050565b6100b9816100a7565b82525050565b5f6080820190506100d25f8301876100b0565b6100df60208301866100b0565b6100ec60408301856100b0565b6100f960608301846100b0565b95945050505050565b5f819050919050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b5f61014282610102565b915061014d83610102565b92508282039050818111156101655761016461010b565b5b9291505056fea2646970667358221220bac67d00c05154c1dca13fe3c1493172d44692d312cb3fd72a3d7457874d595464736f6c63430008190033") - // The method call to 'test()' - input := libcommon.Hex2Bytes("f8a8fd6d") - - // Current head - n := uint64(10000) - parentHash := libcommon.Hash{} - s := common.LeftPadBytes(big.NewInt(int64(n-1)).Bytes(), 32) - copy(parentHash[:], s) - fakeHeaderReader := &FakeChainHeaderReader{} - header := fakeHeaderReader.GetHeader(libcommon.BigToHash(big.NewInt(int64(n))), n) - - chain := &dummyChain{} - cfg := &Config{ - GetHashFn: core.GetHashFn(header, chain.GetHeader), - BlockNumber: new(big.Int).Set(header.Number), - Time: big.NewInt(10000), - } - setDefaults(cfg) - cfg.ChainConfig.PragueTime = big.NewInt(10000) - _, tx := memdb.NewTestTx(t) - cfg.State = state.New(state.NewPlainStateReader(tx)) - cfg.State.CreateAccount(params.HistoryStorageAddress, true) - misc.StoreBlockHashesEip2935(header, cfg.State, cfg.ChainConfig, &FakeChainHeaderReader{}) - - ret, _, err := Execute(data, input, cfg, header.Number.Uint64()) - if err != nil { - t.Fatalf("expected no error, got %v", err) - } - if len(ret) != 128 { - t.Fatalf("expected returndata to be 128 bytes, got %d", len(ret)) - } - - zero := new(big.Int).SetBytes(ret[0:32]) - first := new(big.Int).SetBytes(ret[32:64]) - last := new(big.Int).SetBytes(ret[64:96]) - beyond := new(big.Int).SetBytes(ret[96:128]) - if zero.Sign() != 0 || beyond.Sign() != 0 { - t.Fatalf("expected zeroes, got %x %x", ret[0:32], ret[96:128]) - } - if first.Uint64() != 9999 { - t.Fatalf("first block should be 9999, got %d (%x)", first, ret[32:64]) - } - if last.Uint64() != 1808 { - t.Fatalf("last block should be 1808, got %d (%x)", last, ret[64:96]) - } -} - // benchmarkNonModifyingCode benchmarks code, but if the code modifies the // state, this should not be used, since it does not reset the state between runs. func benchmarkNonModifyingCode(b *testing.B, gas uint64, code []byte, name string) { //nolint:unparam @@ -634,16 +521,14 @@ func TestEip2929Cases(t *testing.T) { fmt.Printf("%v\n\nBytecode: \n```\n0x%x\n```\nOperations: \n```\n%v\n```\n\n", comment, code, ops) - cfg := &Config{ + //nolint:errcheck + Execute(code, nil, &Config{ EVMConfig: vm.Config{ Debug: true, - Tracer: logger.NewMarkdownLogger(nil, os.Stdout), + Tracer: logger.NewMarkdownLogger(nil, os.Stdout).Hooks(), ExtraEips: []int{2929}, }, - } - setDefaults(cfg) - //nolint:errcheck - Execute(code, nil, cfg, 0) + }, 0) } { // First eip testcase diff --git a/erigon-lib/commitment/bin_patricia_hashed.go b/erigon-lib/commitment/bin_patricia_hashed.go index 33deb3a9826..6db6f0eff20 100644 --- a/erigon-lib/commitment/bin_patricia_hashed.go +++ b/erigon-lib/commitment/bin_patricia_hashed.go @@ -1059,7 +1059,7 @@ func (bph *BinPatriciaHashed) fold() (err error) { upBinaryCell.extLen = 0 upBinaryCell.downHashedLen = 0 if bph.branchBefore[row] { - _, err = bph.branchEncoder.CollectUpdate(bph.ctx, updateKey, 0, bph.touchMap[row], 0, RetrieveCellNoop) + _, err = bph.branchEncoder.CollectUpdate(updateKey, 0, bph.touchMap[row], 0, RetrieveCellNoop) if err != nil { return fmt.Errorf("failed to encode leaf node update: %w", err) } @@ -1087,7 +1087,7 @@ func (bph *BinPatriciaHashed) fold() (err error) { upBinaryCell.fillFromLowerBinaryCell(cell, depth, bph.currentKey[upDepth:bph.currentKeyLen], nibble) // Delete if it existed if bph.branchBefore[row] { - _, err = bph.branchEncoder.CollectUpdate(bph.ctx, updateKey, 0, bph.touchMap[row], 0, RetrieveCellNoop) + _, err = bph.branchEncoder.CollectUpdate(updateKey, 0, bph.touchMap[row], 0, RetrieveCellNoop) if err != nil { return fmt.Errorf("failed to encode leaf node update: %w", err) } @@ -1162,7 +1162,7 @@ func (bph *BinPatriciaHashed) fold() (err error) { var err error _ = cellGetter - lastNibble, err = bph.branchEncoder.CollectUpdate(bph.ctx, updateKey, bitmap, bph.touchMap[row], bph.afterMap[row], cellGetter) + lastNibble, err = bph.branchEncoder.CollectUpdate(updateKey, bitmap, bph.touchMap[row], bph.afterMap[row], cellGetter) if err != nil { return fmt.Errorf("failed to encode branch update: %w", err) } @@ -1366,7 +1366,7 @@ func (bph *BinPatriciaHashed) ProcessKeys(ctx context.Context, plainKeys [][]byt if err != nil { return nil, fmt.Errorf("root hash evaluation failed: %w", err) } - err = bph.branchEncoder.Load(bph.ctx, etl.TransformArgs{Quit: ctx.Done()}) + err = bph.branchEncoder.Load(loadToPatriciaContextFunc(bph.ctx), etl.TransformArgs{Quit: ctx.Done()}) if err != nil { return nil, fmt.Errorf("branch update failed: %w", err) } @@ -1530,10 +1530,6 @@ func (bph *BinPatriciaHashed) SetState(buf []byte) error { return nil } -func (bph *BinPatriciaHashed) ProcessTree(ctx context.Context, t *UpdateTree, lp string) (rootHash []byte, err error) { - panic("not implemented") -} - func (bph *BinPatriciaHashed) ProcessUpdates(ctx context.Context, plainKeys [][]byte, updates []Update) (rootHash []byte, err error) { for i, pk := range plainKeys { updates[i].hashedKey = hexToBin(pk) @@ -1619,7 +1615,7 @@ func (bph *BinPatriciaHashed) ProcessUpdates(ctx context.Context, plainKeys [][] return nil, fmt.Errorf("root hash evaluation failed: %w", err) } - err = bph.branchEncoder.Load(bph.ctx, etl.TransformArgs{Quit: ctx.Done()}) + err = bph.branchEncoder.Load(loadToPatriciaContextFunc(bph.ctx), etl.TransformArgs{Quit: ctx.Done()}) if err != nil { return nil, fmt.Errorf("branch update failed: %w", err) } diff --git a/erigon-lib/commitment/commitment.go b/erigon-lib/commitment/commitment.go index 55e0dd81cc6..33d8e05825b 100644 --- a/erigon-lib/commitment/commitment.go +++ b/erigon-lib/commitment/commitment.go @@ -5,23 +5,23 @@ import ( "context" "encoding/binary" "fmt" - "github.com/google/btree" - "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon-lib/common/cryptozerocopy" - "github.com/ledgerwatch/erigon-lib/metrics" - "github.com/ledgerwatch/erigon-lib/types" - "github.com/ledgerwatch/log/v3" - "golang.org/x/crypto/sha3" + "hash" "math/bits" "strings" + "github.com/ledgerwatch/log/v3" + "golang.org/x/crypto/sha3" + + "github.com/ledgerwatch/erigon-lib/metrics" + + "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/etl" ) var ( - mxKeys = metrics.GetOrCreateCounter("domain_commitment_keys") - mxBranchUpdatesApplied = metrics.GetOrCreateCounter("domain_commitment_updates_applied") + mxCommitmentKeys = metrics.GetOrCreateCounter("domain_commitment_keys") + mxCommitmentBranchUpdates = metrics.GetOrCreateCounter("domain_commitment_updates_applied") ) // Trie represents commitment variant. @@ -41,8 +41,6 @@ type Trie interface { // Set context for state IO ResetContext(ctx PatriciaContext) - ProcessTree(ctx context.Context, tree *UpdateTree, logPrefix string) (rootHash []byte, err error) - // Reads updates from storage ProcessKeys(ctx context.Context, pk [][]byte, logPrefix string) (rootHash []byte, err error) @@ -74,19 +72,14 @@ const ( VariantBinPatriciaTrie TrieVariant = "bin-patricia-hashed" ) -func InitializeTrieAndUpdateTree(tv TrieVariant, mode Mode, tmpdir string) (Trie, *UpdateTree) { +func InitializeTrie(tv TrieVariant) Trie { switch tv { case VariantBinPatriciaTrie: - trie := NewBinPatriciaHashed(length.Addr, nil) - fn := func(key []byte) []byte { return hexToBin(key) } - tree := NewUpdateTree(mode, tmpdir, fn) - return trie, tree + return NewBinPatriciaHashed(length.Addr, nil) case VariantHexPatriciaTrie: fallthrough default: - trie := NewHexPatriciaHashed(length.Addr, nil) - tree := NewUpdateTree(mode, tmpdir, trie.hashAndNibblizeKey) - return trie, tree + return NewHexPatriciaHashed(length.Addr, nil) } } @@ -152,7 +145,6 @@ func (branchData BranchData) String() string { type BranchEncoder struct { buf *bytes.Buffer bitmapBuf [binary.MaxVarintLen64]byte - merger *BranchMerger updates *etl.Collector tmpdir string } @@ -161,7 +153,6 @@ func NewBranchEncoder(sz uint64, tmpdir string) *BranchEncoder { be := &BranchEncoder{ buf: bytes.NewBuffer(make([]byte, sz)), tmpdir: tmpdir, - merger: NewHexBranchMerger(sz / 2), } be.initCollector() return be @@ -172,20 +163,26 @@ func (be *BranchEncoder) initCollector() { be.updates.LogLvl(log.LvlDebug) } -func (be *BranchEncoder) Load(pc PatriciaContext, args etl.TransformArgs) error { - if err := be.updates.Load(nil, "", func(prefix, update []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { +// reads previous comitted value and merges current with it if needed. +func loadToPatriciaContextFunc(pc PatriciaContext) etl.LoadFunc { + return func(prefix, update []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { stateValue, stateStep, err := pc.GetBranch(prefix) if err != nil { return err } + // this updates ensures that if commitment is present, each branch are also present in commitment state at that moment with costs of storage + //fmt.Printf("commitment branch encoder merge prefix [%x] [%x]->[%x]\n%v\n", prefix, stateValue, update, BranchData(update).String()) cp, cu := common.Copy(prefix), common.Copy(update) // has to copy :( if err = pc.PutBranch(cp, cu, stateValue, stateStep); err != nil { return err } - mxBranchUpdatesApplied.Inc() return nil - }, args); err != nil { + } +} + +func (be *BranchEncoder) Load(load etl.LoadFunc, args etl.TransformArgs) error { + if err := be.updates.Load(nil, "", load, args); err != nil { return err } be.initCollector() @@ -193,34 +190,20 @@ func (be *BranchEncoder) Load(pc PatriciaContext, args etl.TransformArgs) error } func (be *BranchEncoder) CollectUpdate( - ctx PatriciaContext, prefix []byte, bitmap, touchMap, afterMap uint16, readCell func(nibble int, skip bool) (*Cell, error), ) (lastNibble int, err error) { - var update []byte - update, lastNibble, err = be.EncodeBranch(bitmap, touchMap, afterMap, readCell) + v, ln, err := be.EncodeBranch(bitmap, touchMap, afterMap, readCell) if err != nil { return 0, err } - - prev, prevStep, err := ctx.GetBranch(prefix) - _ = prevStep - if err != nil { - return 0, err - } - if len(prev) > 0 { - update, err = be.merger.Merge(prev, update) - if err != nil { - return 0, err - } - } - //fmt.Printf("collectBranchUpdate [%x] -> [%x]\n", prefix, update) - if err = be.updates.Collect(prefix, update); err != nil { + //fmt.Printf("collectBranchUpdate [%x] -> [%x]\n", prefix, []byte(v)) + if err := be.updates.Collect(prefix, v); err != nil { return 0, err } - return lastNibble, nil + return ln, nil } // Encoded result should be copied before next call to EncodeBranch, underlying slice is reused @@ -470,7 +453,7 @@ func (branchData BranchData) MergeHexBranches(branchData2 BranchData, newData [] var bitmapBuf [4]byte binary.BigEndian.PutUint16(bitmapBuf[0:], touchMap1|touchMap2) binary.BigEndian.PutUint16(bitmapBuf[2:], afterMap2) - newData = append(newData[:0], bitmapBuf[:]...) + newData = append(newData, bitmapBuf[:]...) for bitset, j := bitmap1|bitmap2, 0; bitset != 0; j++ { bit := bitset & -bitset if bitmap2&bit != 0 { @@ -552,12 +535,13 @@ func (branchData BranchData) DecodeCells() (touchMap, afterMap uint16, row [16]* } type BranchMerger struct { - buf []byte - num [4]byte + buf *bytes.Buffer + num [4]byte + keccak hash.Hash } func NewHexBranchMerger(capacity uint64) *BranchMerger { - return &BranchMerger{buf: make([]byte, capacity)} + return &BranchMerger{buf: bytes.NewBuffer(make([]byte, capacity)), keccak: sha3.NewLegacyKeccak256()} } // MergeHexBranches combines two branchData, number 2 coming after (and potentially shadowing) number 1 @@ -583,14 +567,19 @@ func (m *BranchMerger) Merge(branch1 BranchData, branch2 BranchData) (BranchData binary.BigEndian.PutUint16(m.num[2:], afterMap2) dataPos := 4 - m.buf = append(m.buf[:0], m.num[:]...) + m.buf.Reset() + if _, err := m.buf.Write(m.num[:]); err != nil { + return nil, err + } for bitset, j := bitmap1|bitmap2, 0; bitset != 0; j++ { bit := bitset & -bitset if bitmap2&bit != 0 { // Add fields from branch2 fieldBits := PartFlags(branch2[pos2]) - m.buf = append(m.buf, byte(fieldBits)) + if err := m.buf.WriteByte(byte(fieldBits)); err != nil { + return nil, err + } pos2++ for i := 0; i < bits.OnesCount8(byte(fieldBits)); i++ { @@ -601,14 +590,19 @@ func (m *BranchMerger) Merge(branch1 BranchData, branch2 BranchData) (BranchData return nil, fmt.Errorf("MergeHexBranches branch2: size overflow for length") } - m.buf = append(m.buf, branch2[pos2:pos2+n]...) + _, err := m.buf.Write(branch2[pos2 : pos2+n]) + if err != nil { + return nil, err + } pos2 += n dataPos += n if len(branch2) < pos2+int(l) { return nil, fmt.Errorf("MergeHexBranches branch2 is too small: expected at least %d got %d bytes", pos2+int(l), len(branch2)) } if l > 0 { - m.buf = append(m.buf, branch2[pos2:pos2+int(l)]...) + if _, err := m.buf.Write(branch2[pos2 : pos2+int(l)]); err != nil { + return nil, err + } pos2 += int(l) dataPos += int(l) } @@ -618,7 +612,9 @@ func (m *BranchMerger) Merge(branch1 BranchData, branch2 BranchData) (BranchData add := (touchMap2&bit == 0) && (afterMap2&bit != 0) // Add fields from branchData1 fieldBits := PartFlags(branch1[pos1]) if add { - m.buf = append(m.buf, byte(fieldBits)) + if err := m.buf.WriteByte(byte(fieldBits)); err != nil { + return nil, err + } } pos1++ for i := 0; i < bits.OnesCount8(byte(fieldBits)); i++ { @@ -628,9 +624,10 @@ func (m *BranchMerger) Merge(branch1 BranchData, branch2 BranchData) (BranchData } else if n < 0 { return nil, fmt.Errorf("MergeHexBranches branch1: size overflow for length") } - if add { - m.buf = append(m.buf, branch1[pos1:pos1+n]...) + if _, err := m.buf.Write(branch1[pos1 : pos1+n]); err != nil { + return nil, err + } } pos1 += n if len(branch1) < pos1+int(l) { @@ -640,7 +637,9 @@ func (m *BranchMerger) Merge(branch1 BranchData, branch2 BranchData) (BranchData } if l > 0 { if add { - m.buf = append(m.buf, branch1[pos1:pos1+int(l)]...) + if _, err := m.buf.Write(branch1[pos1 : pos1+int(l)]); err != nil { + return nil, err + } } pos1 += int(l) } @@ -648,7 +647,9 @@ func (m *BranchMerger) Merge(branch1 BranchData, branch2 BranchData) (BranchData } bitset ^= bit } - return m.buf, nil + target := make([]byte, m.buf.Len()) + copy(target, m.buf.Bytes()) + return target, nil } func ParseTrieVariant(s string) TrieVariant { @@ -757,254 +758,3 @@ func DecodeBranchAndCollectStat(key, branch []byte, tv TrieVariant) *BranchStat } return stat } - -// Defines how to evaluate commitments -type Mode uint - -const ( - ModeDisabled Mode = 0 - ModeDirect Mode = 1 - ModeUpdate Mode = 2 -) - -func (m Mode) String() string { - switch m { - case ModeDisabled: - return "disabled" - case ModeDirect: - return "direct" - case ModeUpdate: - return "update" - default: - return "unknown" - } -} - -func ParseCommitmentMode(s string) Mode { - var mode Mode - switch s { - case "off": - mode = ModeDisabled - case "update": - mode = ModeUpdate - default: - mode = ModeDirect - } - return mode -} - -type UpdateTree struct { - keccak cryptozerocopy.KeccakState - hasher keyHasher - keys map[string]struct{} - tree *btree.BTreeG[*KeyUpdate] - mode Mode - tmpdir string -} - -type keyHasher func(key []byte) []byte - -func keyHasherNoop(key []byte) []byte { return key } - -func NewUpdateTree(m Mode, tmpdir string, hasher keyHasher) *UpdateTree { - t := &UpdateTree{ - keccak: sha3.NewLegacyKeccak256().(cryptozerocopy.KeccakState), - hasher: hasher, - tmpdir: tmpdir, - mode: m, - } - if t.mode == ModeDirect { - t.keys = make(map[string]struct{}) - } else if t.mode == ModeUpdate { - t.tree = btree.NewG[*KeyUpdate](64, keyUpdateLessFn) - } - return t -} - -// TouchPlainKey marks plainKey as updated and applies different fn for different key types -// (different behaviour for Code, Account and Storage key modifications). -func (t *UpdateTree) TouchPlainKey(key, val []byte, fn func(c *KeyUpdate, val []byte)) { - switch t.mode { - case ModeUpdate: - pivot, updated := &KeyUpdate{plainKey: key}, false - - t.tree.DescendLessOrEqual(pivot, func(item *KeyUpdate) bool { - if bytes.Equal(item.plainKey, pivot.plainKey) { - fn(item, val) - updated = true - } - return false - }) - if !updated { - pivot.update.plainKey = pivot.plainKey - pivot.update.hashedKey = t.hasher(pivot.plainKey) - fn(pivot, val) - t.tree.ReplaceOrInsert(pivot) - } - case ModeDirect: - t.keys[string(key)] = struct{}{} - default: - } -} - -func (t *UpdateTree) Size() (updates uint64) { - switch t.mode { - case ModeDirect: - return uint64(len(t.keys)) - case ModeUpdate: - return uint64(t.tree.Len()) - default: - return 0 - } -} - -func (t *UpdateTree) TouchAccount(c *KeyUpdate, val []byte) { - if len(val) == 0 { - c.update.Flags = DeleteUpdate - return - } - if c.update.Flags&DeleteUpdate != 0 { - c.update.Flags ^= DeleteUpdate - } - nonce, balance, chash := types.DecodeAccountBytesV3(val) - if c.update.Nonce != nonce { - c.update.Nonce = nonce - c.update.Flags |= NonceUpdate - } - if !c.update.Balance.Eq(balance) { - c.update.Balance.Set(balance) - c.update.Flags |= BalanceUpdate - } - if !bytes.Equal(chash, c.update.CodeHashOrStorage[:]) { - if len(chash) == 0 { - c.update.ValLength = length.Hash - copy(c.update.CodeHashOrStorage[:], EmptyCodeHash) - } else { - copy(c.update.CodeHashOrStorage[:], chash) - c.update.ValLength = length.Hash - c.update.Flags |= CodeUpdate - } - } -} - -func (t *UpdateTree) TouchStorage(c *KeyUpdate, val []byte) { - c.update.ValLength = len(val) - if len(val) == 0 { - c.update.Flags = DeleteUpdate - } else { - c.update.Flags |= StorageUpdate - copy(c.update.CodeHashOrStorage[:], val) - } -} - -func (t *UpdateTree) TouchCode(c *KeyUpdate, val []byte) { - t.keccak.Reset() - t.keccak.Write(val) - t.keccak.Read(c.update.CodeHashOrStorage[:]) - if c.update.Flags == DeleteUpdate && len(val) == 0 { - c.update.Flags = DeleteUpdate - c.update.ValLength = 0 - return - } - c.update.ValLength = length.Hash - if len(val) != 0 { - c.update.Flags |= CodeUpdate - } -} - -func (t *UpdateTree) Close() { - if t.keys != nil { - clear(t.keys) - } - if t.tree != nil { - t.tree.Clear(true) - t.tree = nil - } -} - -func (t *UpdateTree) HashSort(ctx context.Context, fn func(hk, pk []byte) error) error { - switch t.mode { - case ModeDirect: - collector := etl.NewCollector("commitment", t.tmpdir, etl.NewSortableBuffer(etl.BufferOptimalSize/4), log.Root().New("update-tree")) - defer collector.Close() - collector.LogLvl(log.LvlDebug) - collector.SortAndFlushInBackground(true) - - for k := range t.keys { - select { - case <-ctx.Done(): - return nil - default: - } - if err := collector.Collect(t.hasher([]byte(k)), []byte(k)); err != nil { - return err - } - } - clear(t.keys) - - err := collector.Load(nil, "", func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { - return fn(k, v) - }, etl.TransformArgs{Quit: ctx.Done()}) - if err != nil { - return err - } - case ModeUpdate: - t.tree.Ascend(func(item *KeyUpdate) bool { - select { - case <-ctx.Done(): - return false - default: - } - - if err := fn(item.update.hashedKey, item.plainKey); err != nil { - return false - } - return true - }) - t.tree.Clear(true) - default: - return nil - } - return nil -} - -// Returns list of both plain and hashed keys. If .mode is ModeUpdate, updates also returned. -// No ordering guarantees is provided. -func (t *UpdateTree) List(clear bool) ([][]byte, []Update) { - switch t.mode { - case ModeDirect: - plainKeys := make([][]byte, 0, len(t.keys)) - err := t.HashSort(context.Background(), func(hk, pk []byte) error { - plainKeys = append(plainKeys, common.Copy(pk)) - return nil - }) - if err != nil { - return nil, nil - } - return plainKeys, nil - case ModeUpdate: - plainKeys := make([][]byte, t.tree.Len()) - updates := make([]Update, t.tree.Len()) - i := 0 - t.tree.Ascend(func(item *KeyUpdate) bool { - plainKeys[i], updates[i] = item.plainKey, item.update - i++ - return true - }) - if clear { - t.tree.Clear(true) - } - return plainKeys, updates - default: - return nil, nil - } -} - -type KeyUpdate struct { - plainKey []byte - update Update -} - -func keyUpdateLessFn(i, j *KeyUpdate) bool { - return bytes.Compare(i.plainKey, j.plainKey) < 0 -} diff --git a/erigon-lib/commitment/commitment_bench_test.go b/erigon-lib/commitment/commitment_bench_test.go deleted file mode 100644 index 424eab422ed..00000000000 --- a/erigon-lib/commitment/commitment_bench_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package commitment - -import ( - "encoding/binary" - "github.com/ledgerwatch/erigon-lib/common" - "github.com/stretchr/testify/require" - "testing" -) - -func BenchmarkBranchMerger_Merge(b *testing.B) { - b.StopTimer() - row, bm := generateCellRow(b, 16) - - be := NewBranchEncoder(1024, b.TempDir()) - enc, _, err := be.EncodeBranch(bm, bm, bm, func(i int, skip bool) (*Cell, error) { - return row[i], nil - }) - require.NoError(b, err) - - var copies [16][]byte - var tm uint16 - am := bm - - for i := 15; i >= 0; i-- { - row[i] = nil - tm, bm, am = uint16(1<>1, am>>1 - enc1, _, err := be.EncodeBranch(bm, tm, am, func(i int, skip bool) (*Cell, error) { - return row[i], nil - }) - require.NoError(b, err) - - copies[i] = common.Copy(enc1) - } - - b.StartTimer() - bmg := NewHexBranchMerger(4096) - var ci int - for i := 0; i < b.N; i++ { - _, err := bmg.Merge(enc, copies[ci]) - if err != nil { - b.Fatal(err) - } - ci++ - if ci == len(copies) { - ci = 0 - } - } -} - -func BenchmarkBranchData_ReplacePlainKeys(b *testing.B) { - row, bm := generateCellRow(b, 16) - - cells, am := unfoldBranchDataFromString(b, "86e586e5082035e72a782b51d9c98548467e3f868294d923cdbbdf4ce326c867bd972c4a2395090109203b51781a76dc87640aea038e3fdd8adca94049aaa436735b162881ec159f6fb408201aa2fa41b5fb019e8abf8fc32800805a2743cfa15373cf64ba16f4f70e683d8e0404a192d9050404f993d9050404e594d90508208642542ff3ce7d63b9703e85eb924ab3071aa39c25b1651c6dda4216387478f10404bd96d905") - for i, c := range cells { - if c == nil { - continue - } - if c.apl > 0 { - offt, _ := binary.Uvarint(c.apk[:c.apl]) - b.Logf("%d apk %x, offt %d\n", i, c.apk[:c.apl], offt) - } - if c.spl > 0 { - offt, _ := binary.Uvarint(c.spk[:c.spl]) - b.Logf("%d spk %x offt %d\n", i, c.spk[:c.spl], offt) - } - - } - _ = cells - _ = am - - cg := func(nibble int, skip bool) (*Cell, error) { - return row[nibble], nil - } - - be := NewBranchEncoder(1024, b.TempDir()) - enc, _, err := be.EncodeBranch(bm, bm, bm, cg) - require.NoError(b, err) - - original := common.Copy(enc) - for i := 0; i < b.N; i++ { - target := make([]byte, 0, len(enc)) - oldKeys := make([][]byte, 0) - replaced, err := enc.ReplacePlainKeys(target, func(key []byte, isStorage bool) ([]byte, error) { - oldKeys = append(oldKeys, key) - if isStorage { - return key[:8], nil - } - return key[:4], nil - }) - require.NoError(b, err) - require.Truef(b, len(replaced) < len(enc), "replaced expected to be shorter than original enc") - - keyI := 0 - replacedBack, err := replaced.ReplacePlainKeys(nil, func(key []byte, isStorage bool) ([]byte, error) { - require.EqualValues(b, oldKeys[keyI][:4], key[:4]) - defer func() { keyI++ }() - return oldKeys[keyI], nil - }) - require.NoError(b, err) - require.EqualValues(b, original, replacedBack) - } -} diff --git a/erigon-lib/commitment/commitment_test.go b/erigon-lib/commitment/commitment_test.go index d60108f8b42..d794054ba6a 100644 --- a/erigon-lib/commitment/commitment_test.go +++ b/erigon-lib/commitment/commitment_test.go @@ -1,12 +1,9 @@ package commitment import ( - "bytes" - "context" "encoding/binary" "encoding/hex" "math/rand" - "sort" "testing" "github.com/ledgerwatch/erigon-lib/common" @@ -14,8 +11,8 @@ import ( "github.com/stretchr/testify/require" ) -func generateCellRow(tb testing.TB, size int) (row []*Cell, bitmap uint16) { - tb.Helper() +func generateCellRow(t *testing.T, size int) (row []*Cell, bitmap uint16) { + t.Helper() row = make([]*Cell, size) var bm uint16 @@ -23,24 +20,24 @@ func generateCellRow(tb testing.TB, size int) (row []*Cell, bitmap uint16) { row[i] = new(Cell) row[i].hl = 32 n, err := rand.Read(row[i].h[:]) - require.NoError(tb, err) - require.EqualValues(tb, row[i].hl, n) + require.NoError(t, err) + require.EqualValues(t, row[i].hl, n) th := rand.Intn(120) switch { case th > 70: n, err = rand.Read(row[i].apk[:]) - require.NoError(tb, err) + require.NoError(t, err) row[i].apl = n case th > 20 && th <= 70: n, err = rand.Read(row[i].spk[:]) - require.NoError(tb, err) + require.NoError(t, err) row[i].spl = n case th <= 20: n, err = rand.Read(row[i].extension[:th]) row[i].extLen = n - require.NoError(tb, err) - require.EqualValues(tb, th, n) + require.NoError(t, err) + require.EqualValues(t, th, n) } bm |= uint16(1 << i) } @@ -84,42 +81,6 @@ func TestBranchData_MergeHexBranches2(t *testing.T) { } } -func TestBranchData_MergeHexBranches_ValueAliveAfterNewMerges(t *testing.T) { - t.Skip() - row, bm := generateCellRow(t, 16) - - be := NewBranchEncoder(1024, t.TempDir()) - enc, _, err := be.EncodeBranch(bm, bm, bm, func(i int, skip bool) (*Cell, error) { - return row[i], nil - }) - require.NoError(t, err) - - copies := make([][]byte, 16) - values := make([][]byte, len(copies)) - - merger := NewHexBranchMerger(8192) - - var tm uint16 - am := bm - - for i := 15; i >= 0; i-- { - row[i] = nil - tm, bm, am = uint16(1<>1, am>>1 - enc1, _, err := be.EncodeBranch(bm, tm, am, func(i int, skip bool) (*Cell, error) { - return row[i], nil - }) - require.NoError(t, err) - merged, err := merger.Merge(enc, enc1) - require.NoError(t, err) - - copies[i] = common.Copy(merged) - values[i] = merged - } - for i := 0; i < len(copies); i++ { - require.EqualValues(t, copies[i], values[i]) - } -} - func TestBranchData_MergeHexBranchesEmptyBranches(t *testing.T) { // Create a BranchMerger instance with sufficient capacity for testing. merger := NewHexBranchMerger(1024) @@ -154,20 +115,20 @@ func TestBranchData_MergeHexBranches3(t *testing.T) { } // helper to decode row of cells from string -func unfoldBranchDataFromString(tb testing.TB, encs string) (row []*Cell, am uint16) { - tb.Helper() +func unfoldBranchDataFromString(t *testing.T, encs string) (row []*Cell, am uint16) { + t.Helper() //encs := "0405040b04080f0b080d030204050b0502090805050d01060e060d070f0903090c04070a0d0a000e090b060b0c040c0700020e0b0c060b0106020c0607050a0b0209070d06040808" //encs := "37ad10eb75ea0fc1c363db0dda0cd2250426ee2c72787155101ca0e50804349a94b649deadcc5cddc0d2fd9fb358c2edc4e7912d165f88877b1e48c69efacf418e923124506fbb2fd64823fd41cbc10427c423" enc, err := hex.DecodeString(encs) - require.NoError(tb, err) + require.NoError(t, err) tm, am, origins, err := BranchData(enc).DecodeCells() - require.NoError(tb, err) + require.NoError(t, err) _, _ = tm, am - tb.Logf("%s", BranchData(enc).String()) - //require.EqualValues(tb, tm, am) + t.Logf("%s", BranchData(enc).String()) + //require.EqualValues(t, tm, am) //for i, c := range origins { // if c == nil { // continue @@ -288,99 +249,3 @@ func TestBranchData_ReplacePlainKeys_WithEmpty(t *testing.T) { require.EqualValues(t, orig, merged) }) } - -func TestNewUpdateTree(t *testing.T) { - t.Run("ModeUpdate", func(t *testing.T) { - ut := NewUpdateTree(ModeUpdate, t.TempDir(), keyHasherNoop) - - require.NotNil(t, ut.tree) - require.NotNil(t, ut.keccak) - require.Nil(t, ut.keys) - require.Equal(t, ModeUpdate, ut.mode) - }) - - t.Run("ModeDirect", func(t *testing.T) { - ut := NewUpdateTree(ModeDirect, t.TempDir(), keyHasherNoop) - - require.NotNil(t, ut.keccak) - require.NotNil(t, ut.keys) - require.Equal(t, ModeDirect, ut.mode) - }) - -} - -func TestUpdateTree_TouchPlainKey(t *testing.T) { - utUpdate := NewUpdateTree(ModeUpdate, t.TempDir(), keyHasherNoop) - utDirect := NewUpdateTree(ModeDirect, t.TempDir(), keyHasherNoop) - utUpdate1 := NewUpdateTree(ModeUpdate, t.TempDir(), keyHasherNoop) - utDirect1 := NewUpdateTree(ModeDirect, t.TempDir(), keyHasherNoop) - - type tc struct { - key []byte - val []byte - } - - upds := []tc{ - {common.FromHex("c17fa85f22306d37cec90b0ec74c5623dbbac68f"), []byte("value1")}, - {common.FromHex("553bba1d92398a69fbc9f01593bbc51b58862366"), []byte("value0")}, - {common.FromHex("553bba1d92398a69fbc9f01593bbc51b58862366"), []byte("value1")}, - {common.FromHex("97c780315e7820752006b7a918ce7ec023df263a87a715b64d5ab445e1782a760a974f8810551f81dfb7f1425f7d8358332af195"), []byte("value1")}, - } - for i := 0; i < len(upds); i++ { - utUpdate.TouchPlainKey(upds[i].key, upds[i].val, utUpdate.TouchStorage) - utDirect.TouchPlainKey(upds[i].key, upds[i].val, utDirect.TouchStorage) - utUpdate1.TouchPlainKey(upds[i].key, upds[i].val, utUpdate.TouchStorage) - utDirect1.TouchPlainKey(upds[i].key, upds[i].val, utDirect.TouchStorage) - } - - uniqUpds := make(map[string]tc) - for i := 0; i < len(upds); i++ { - uniqUpds[string(upds[i].key)] = upds[i] - } - sortedUniqUpds := make([]tc, 0, len(uniqUpds)) - for _, v := range uniqUpds { - sortedUniqUpds = append(sortedUniqUpds, v) - } - sort.Slice(sortedUniqUpds, func(i, j int) bool { - return bytes.Compare(sortedUniqUpds[i].key, sortedUniqUpds[j].key) < 0 - }) - - sz := utUpdate.Size() - require.EqualValues(t, 3, sz) - - sz = utDirect.Size() - require.EqualValues(t, 3, sz) - - pk, upd := utUpdate.List(true) - require.Len(t, pk, 3) - require.NotNil(t, upd) - - for i := 0; i < len(sortedUniqUpds); i++ { - require.EqualValues(t, sortedUniqUpds[i].key, pk[i]) - require.EqualValues(t, sortedUniqUpds[i].val, upd[i].CodeHashOrStorage[:upd[i].ValLength]) - } - - pk, upd = utDirect.List(true) - require.Len(t, pk, 3) - require.Nil(t, upd) - - for i := 0; i < len(sortedUniqUpds); i++ { - require.EqualValues(t, sortedUniqUpds[i].key, pk[i]) - } - - i := 0 - err := utUpdate1.HashSort(context.Background(), func(hk, pk []byte) error { - require.EqualValues(t, sortedUniqUpds[i].key, pk) - i++ - return nil - }) - require.NoError(t, err) - - i = 0 - err = utDirect1.HashSort(context.Background(), func(hk, pk []byte) error { - require.EqualValues(t, sortedUniqUpds[i].key, pk) - i++ - return nil - }) - require.NoError(t, err) -} diff --git a/erigon-lib/commitment/hex_patricia_hashed.go b/erigon-lib/commitment/hex_patricia_hashed.go index 0f2414a7831..2f831af70d8 100644 --- a/erigon-lib/commitment/hex_patricia_hashed.go +++ b/erigon-lib/commitment/hex_patricia_hashed.go @@ -22,7 +22,6 @@ import ( "encoding/binary" "encoding/hex" "fmt" - "github.com/ledgerwatch/erigon-lib/etl" "hash" "io" "math/bits" @@ -82,6 +81,7 @@ type HexPatriciaHashed struct { ctx PatriciaContext hashAuxBuffer [128]byte // buffer to compute cell hash or write hash-related things auxBuffer *bytes.Buffer // auxiliary buffer used during branch updates encoding + branchMerger *BranchMerger branchEncoder *BranchEncoder } @@ -92,6 +92,7 @@ func NewHexPatriciaHashed(accountKeyLen int, ctx PatriciaContext) *HexPatriciaHa keccak2: sha3.NewLegacyKeccak256().(keccakState), accountKeyLen: accountKeyLen, auxBuffer: bytes.NewBuffer(make([]byte, 8192)), + branchMerger: NewHexBranchMerger(1024), } tdir := os.TempDir() if ctx != nil { @@ -1053,7 +1054,7 @@ func (hph *HexPatriciaHashed) fold() (err error) { upCell.extLen = 0 upCell.downHashedLen = 0 if hph.branchBefore[row] { - _, err := hph.branchEncoder.CollectUpdate(hph.ctx, updateKey, 0, hph.touchMap[row], 0, RetrieveCellNoop) + _, err := hph.collectBranchUpdate(updateKey, 0, hph.touchMap[row], 0, RetrieveCellNoop) if err != nil { return fmt.Errorf("failed to encode leaf node update: %w", err) } @@ -1081,7 +1082,7 @@ func (hph *HexPatriciaHashed) fold() (err error) { upCell.fillFromLowerCell(cell, depth, hph.currentKey[upDepth:hph.currentKeyLen], nibble) // Delete if it existed if hph.branchBefore[row] { - _, err := hph.branchEncoder.CollectUpdate(hph.ctx, updateKey, 0, hph.touchMap[row], 0, RetrieveCellNoop) + _, err := hph.collectBranchUpdate(updateKey, 0, hph.touchMap[row], 0, RetrieveCellNoop) if err != nil { return fmt.Errorf("failed to encode leaf node update: %w", err) } @@ -1154,7 +1155,7 @@ func (hph *HexPatriciaHashed) fold() (err error) { var lastNibble int var err error - lastNibble, err = hph.branchEncoder.CollectUpdate(hph.ctx, updateKey, bitmap, hph.touchMap[row], hph.afterMap[row], cellGetter) + lastNibble, err = hph.collectBranchUpdate(updateKey, bitmap, hph.touchMap[row], hph.afterMap[row], cellGetter) if err != nil { return fmt.Errorf("failed to encode branch update: %w", err) } @@ -1269,109 +1270,45 @@ func (hph *HexPatriciaHashed) updateCell(plainKey, hashedKey []byte) *Cell { return cell } -func (hph *HexPatriciaHashed) RootHash() ([]byte, error) { - rh, err := hph.computeCellHash(&hph.root, 0, nil) +func (hph *HexPatriciaHashed) collectBranchUpdate( + prefix []byte, + bitmap, touchMap, afterMap uint16, + readCell func(nibble int, skip bool) (*Cell, error), +) (lastNibble int, err error) { + + update, ln, err := hph.branchEncoder.EncodeBranch(bitmap, touchMap, afterMap, readCell) if err != nil { - return nil, err + return 0, err } - return rh[1:], nil // first byte is 128+hash_len -} - -func (hph *HexPatriciaHashed) ProcessTree(ctx context.Context, tree *UpdateTree, logPrefix string) (rootHash []byte, err error) { - var ( - stagedCell = new(Cell) - logEvery = time.NewTicker(20 * time.Second) - - m runtime.MemStats - ki uint64 - ) - defer logEvery.Stop() - updatesCount := tree.Size() - - err = tree.HashSort(ctx, func(hashedKey, plainKey []byte) error { - select { - case <-logEvery.C: - dbg.ReadMemStats(&m) - log.Info(fmt.Sprintf("[%s][agg] computing trie", logPrefix), - "progress", fmt.Sprintf("%dk/%dk", ki/1000, updatesCount/1000), "alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys)) - default: - } - - if hph.trace { - fmt.Printf("\n%d/%d) plainKey=[%x], hashedKey=[%x], currentKey=[%x]\n", ki+1, updatesCount, plainKey, hashedKey, hph.currentKey[:hph.currentKeyLen]) - } - // Keep folding until the currentKey is the prefix of the key we modify - for hph.needFolding(hashedKey) { - if err := hph.fold(); err != nil { - return fmt.Errorf("fold: %w", err) - } - } - // Now unfold until we step on an empty cell - for unfolding := hph.needUnfolding(hashedKey); unfolding > 0; unfolding = hph.needUnfolding(hashedKey) { - if err := hph.unfold(hashedKey, unfolding); err != nil { - return fmt.Errorf("unfold: %w", err) - } - } - - // Update the cell - stagedCell.reset() - if len(plainKey) == hph.accountKeyLen { - if err := hph.ctx.GetAccount(plainKey, stagedCell); err != nil { - return fmt.Errorf("GetAccount for key %x failed: %w", plainKey, err) - } - if !stagedCell.Delete { - cell := hph.updateCell(plainKey, hashedKey) - cell.setAccountFields(stagedCell.CodeHash[:], &stagedCell.Balance, stagedCell.Nonce) - - if hph.trace { - fmt.Printf("GetAccount update key %x => balance=%d nonce=%v codeHash=%x\n", cell.apk, &cell.Balance, cell.Nonce, cell.CodeHash) - } - } - } else { - if err = hph.ctx.GetStorage(plainKey, stagedCell); err != nil { - return fmt.Errorf("GetStorage for key %x failed: %w", plainKey, err) - } - if !stagedCell.Delete { - hph.updateCell(plainKey, hashedKey).setStorage(stagedCell.Storage[:stagedCell.StorageLen]) - if hph.trace { - fmt.Printf("GetStorage reading key %x => %x\n", plainKey, stagedCell.Storage[:stagedCell.StorageLen]) - } - } - } - - if stagedCell.Delete { - if hph.trace { - fmt.Printf("delete cell %x hash %x\n", plainKey, hashedKey) - } - hph.deleteCell(hashedKey) - } - mxKeys.Inc() - ki++ - return nil - }) + prev, prevStep, err := hph.ctx.GetBranch(prefix) // prefix already compacted by fold if err != nil { - return nil, fmt.Errorf("hash sort failed: %w", err) + return 0, err } - - // Folding everything up to the root - for hph.activeRows > 0 { - if err := hph.fold(); err != nil { - return nil, fmt.Errorf("final fold: %w", err) + if len(prev) > 0 { + previous := BranchData(prev) + merged, err := hph.branchMerger.Merge(previous, update) + if err != nil { + return 0, err } + update = merged } + // this updates ensures that if commitment is present, each branch are also present in commitment state at that moment with costs of storage + //fmt.Printf("commitment branch encoder merge prefix [%x] [%x]->[%x]\n%update\n", prefix, stateValue, update, BranchData(update).String()) - rootHash, err = hph.RootHash() - if err != nil { - return nil, fmt.Errorf("root hash evaluation failed: %w", err) + cp, cu := common.Copy(prefix), common.Copy(update) // has to copy :( + if err = hph.ctx.PutBranch(cp, cu, prev, prevStep); err != nil { + return 0, err } - if hph.trace { - fmt.Printf("root hash %x updates %d\n", rootHash, updatesCount) - } - err = hph.branchEncoder.Load(hph.ctx, etl.TransformArgs{Quit: ctx.Done()}) + mxCommitmentBranchUpdates.Inc() + return ln, nil +} + +func (hph *HexPatriciaHashed) RootHash() ([]byte, error) { + rh, err := hph.computeCellHash(&hph.root, 0, nil) if err != nil { - return nil, fmt.Errorf("branch update failed: %w", err) + return nil, err } - return rootHash, nil + return rh[1:], nil // first byte is 128+hash_len } // Process keys and updates in a single pass. Branch updates are written to PatriciaContext if no error occurs. @@ -1450,7 +1387,7 @@ func (hph *HexPatriciaHashed) ProcessKeys(ctx context.Context, plainKeys [][]byt } hph.deleteCell(hashedKey) } - mxKeys.Inc() + mxCommitmentKeys.Inc() } // Folding everything up to the root for hph.activeRows > 0 { @@ -1466,10 +1403,6 @@ func (hph *HexPatriciaHashed) ProcessKeys(ctx context.Context, plainKeys [][]byt if hph.trace { fmt.Printf("root hash %x updates %d\n", rootHash, len(plainKeys)) } - err = hph.branchEncoder.Load(hph.ctx, etl.TransformArgs{Quit: ctx.Done()}) - if err != nil { - return nil, fmt.Errorf("branch update failed: %w", err) - } return rootHash, nil } @@ -1546,7 +1479,7 @@ func (hph *HexPatriciaHashed) ProcessUpdates(ctx context.Context, plainKeys [][] } } - mxKeys.Inc() + mxCommitmentKeys.Inc() } // Folding everything up to the root for hph.activeRows > 0 { @@ -1559,10 +1492,6 @@ func (hph *HexPatriciaHashed) ProcessUpdates(ctx context.Context, plainKeys [][] if err != nil { return nil, fmt.Errorf("root hash evaluation failed: %w", err) } - err = hph.branchEncoder.Load(hph.ctx, etl.TransformArgs{Quit: ctx.Done()}) - if err != nil { - return nil, fmt.Errorf("branch update failed: %w", err) - } return rootHash, nil } diff --git a/erigon-lib/commitment/hex_patricia_hashed_bench_test.go b/erigon-lib/commitment/hex_patricia_hashed_bench_test.go index 643a6c1accd..930a98468a3 100644 --- a/erigon-lib/commitment/hex_patricia_hashed_bench_test.go +++ b/erigon-lib/commitment/hex_patricia_hashed_bench_test.go @@ -9,7 +9,7 @@ import ( "github.com/ledgerwatch/erigon-lib/common/length" ) -func Benchmark_HexPatriciaHashed_ReviewKeys(b *testing.B) { +func Benchmark_HexPatriciaHahsed_ReviewKeys(b *testing.B) { ms := NewMockState(&testing.T{}) ctx := context.Background() hph := NewHexPatriciaHashed(length.Addr, ms) diff --git a/erigon-lib/common/dbg/dbg_env.go b/erigon-lib/common/dbg/dbg_env.go index 7096a122208..41b83c0d442 100644 --- a/erigon-lib/common/dbg/dbg_env.go +++ b/erigon-lib/common/dbg/dbg_env.go @@ -1,18 +1,18 @@ package dbg import ( + "fmt" "os" "strconv" "time" "github.com/c2h5oh/datasize" - "github.com/ledgerwatch/log/v3" ) func EnvString(envVarName string, defaultVal string) string { v, _ := os.LookupEnv(envVarName) if v != "" { - log.Info("[dbg] env", envVarName, v) + fmt.Printf("[dbg] env %s=%s\n", envVarName, v) return v } return defaultVal @@ -20,11 +20,11 @@ func EnvString(envVarName string, defaultVal string) string { func EnvBool(envVarName string, defaultVal bool) bool { v, _ := os.LookupEnv(envVarName) if v == "true" { - log.Info("[dbg] env", envVarName, true) + fmt.Printf("[dbg] env %s=%t\n", envVarName, true) return true } if v == "false" { - log.Info("[dbg] env", envVarName, false) + fmt.Printf("[dbg] env %s=%t\n", envVarName, false) return false } return defaultVal @@ -36,7 +36,7 @@ func EnvInt(envVarName string, defaultVal int) int { if err != nil { panic(err) } - log.Info("[dbg] env", envVarName, i) + fmt.Printf("[dbg] env %s=%d\n", envVarName, i) return i } return defaultVal @@ -48,7 +48,7 @@ func EnvDataSize(envVarName string, defaultVal datasize.ByteSize) datasize.ByteS if err != nil { panic(err) } - log.Info("[dbg] env", envVarName, val) + fmt.Printf("[dbg] env %s=%s\n", envVarName, val) return val } return defaultVal @@ -57,7 +57,8 @@ func EnvDataSize(envVarName string, defaultVal datasize.ByteSize) datasize.ByteS func EnvDuration(envVarName string, defaultVal time.Duration) time.Duration { v, _ := os.LookupEnv(envVarName) if v != "" { - log.Info("[dbg] env", envVarName, v) + fmt.Printf("[dbg] env %s=%s\n", envVarName, v) + val, err := time.ParseDuration(v) if err != nil { panic(err) diff --git a/erigon-lib/diagnostics/entities.go b/erigon-lib/diagnostics/entities.go index 71e745b32d1..e7e75c91b12 100644 --- a/erigon-lib/diagnostics/entities.go +++ b/erigon-lib/diagnostics/entities.go @@ -77,23 +77,11 @@ type SnapshotDownloadStatistics struct { } type SegmentDownloadStatistics struct { - Name string `json:"name"` - TotalBytes uint64 `json:"totalBytes"` - DownloadedBytes uint64 `json:"downloadedBytes"` - Webseeds []SegmentPeer `json:"webseeds"` - Peers []SegmentPeer `json:"peers"` - DownloadedStats FileDownloadedStatistics `json:"downloadedStats"` -} - -type FileDownloadedStatistics struct { - TimeTook float64 `json:"timeTook"` - AverageRate uint64 `json:"averageRate"` -} - -type FileDownloadedStatisticsUpdate struct { - FileName string `json:"fileName"` - TimeTook float64 `json:"timeTook"` - AverageRate uint64 `json:"averageRate"` + Name string `json:"name"` + TotalBytes uint64 `json:"totalBytes"` + DownloadedBytes uint64 `json:"downloadedBytes"` + Webseeds []SegmentPeer `json:"webseeds"` + Peers []SegmentPeer `json:"peers"` } type SegmentPeer struct { @@ -257,11 +245,6 @@ type NetworkSpeedTestResult struct { Latency time.Duration `json:"latency"` DownloadSpeed float64 `json:"downloadSpeed"` UploadSpeed float64 `json:"uploadSpeed"` - PacketLoss float64 `json:"packetLoss"` -} - -func (ti FileDownloadedStatisticsUpdate) Type() Type { - return TypeOf(ti) } func (ti MemoryStats) Type() Type { diff --git a/erigon-lib/diagnostics/snapshots.go b/erigon-lib/diagnostics/snapshots.go index 97f0941083e..25f636c8d29 100644 --- a/erigon-lib/diagnostics/snapshots.go +++ b/erigon-lib/diagnostics/snapshots.go @@ -12,7 +12,6 @@ func (d *DiagnosticClient) setupSnapshotDiagnostics(rootCtx context.Context) { d.runSegmentIndexingListener(rootCtx) d.runSegmentIndexingFinishedListener(rootCtx) d.runSnapshotFilesListListener(rootCtx) - d.runFileDownloadedListener(rootCtx) } func (d *DiagnosticClient) runSnapshotListener(rootCtx context.Context) { @@ -66,17 +65,7 @@ func (d *DiagnosticClient) runSegmentDownloadingListener(rootCtx context.Context d.syncStats.SnapshotDownload.SegmentsDownloading = map[string]SegmentDownloadStatistics{} } - if val, ok := d.syncStats.SnapshotDownload.SegmentsDownloading[info.Name]; ok { - val.TotalBytes = info.TotalBytes - val.DownloadedBytes = info.DownloadedBytes - val.Webseeds = info.Webseeds - val.Peers = info.Peers - - d.syncStats.SnapshotDownload.SegmentsDownloading[info.Name] = val - } else { - d.syncStats.SnapshotDownload.SegmentsDownloading[info.Name] = info - } - + d.syncStats.SnapshotDownload.SegmentsDownloading[info.Name] = info d.mu.Unlock() } } @@ -184,89 +173,6 @@ func (d *DiagnosticClient) runSnapshotFilesListListener(rootCtx context.Context) }() } -func (d *DiagnosticClient) runFileDownloadedListener(rootCtx context.Context) { - go func() { - ctx, ch, closeChannel := Context[FileDownloadedStatisticsUpdate](rootCtx, 1) - defer closeChannel() - - StartProviders(ctx, TypeOf(FileDownloadedStatisticsUpdate{}), log.Root()) - for { - select { - case <-rootCtx.Done(): - return - case info := <-ch: - d.mu.Lock() - - if d.syncStats.SnapshotDownload.SegmentsDownloading == nil { - d.syncStats.SnapshotDownload.SegmentsDownloading = map[string]SegmentDownloadStatistics{} - } - - if val, ok := d.syncStats.SnapshotDownload.SegmentsDownloading[info.FileName]; ok { - val.DownloadedStats = FileDownloadedStatistics{ - TimeTook: info.TimeTook, - AverageRate: info.AverageRate, - } - - d.syncStats.SnapshotDownload.SegmentsDownloading[info.FileName] = val - } else { - d.syncStats.SnapshotDownload.SegmentsDownloading[info.FileName] = SegmentDownloadStatistics{ - Name: info.FileName, - TotalBytes: 0, - DownloadedBytes: 0, - Webseeds: nil, - Peers: nil, - DownloadedStats: FileDownloadedStatistics{ - TimeTook: info.TimeTook, - AverageRate: info.AverageRate, - }, - } - } - - d.mu.Unlock() - } - } - }() -} - -func (d *DiagnosticClient) UpdateFileDownloadedStatistics(downloadedInfo *FileDownloadedStatisticsUpdate, downloadingInfo *SegmentDownloadStatistics) { - if d.syncStats.SnapshotDownload.SegmentsDownloading == nil { - d.syncStats.SnapshotDownload.SegmentsDownloading = map[string]SegmentDownloadStatistics{} - } - - if downloadedInfo != nil { - dwStats := FileDownloadedStatistics{ - TimeTook: downloadedInfo.TimeTook, - AverageRate: downloadedInfo.AverageRate, - } - if val, ok := d.syncStats.SnapshotDownload.SegmentsDownloading[downloadedInfo.FileName]; ok { - val.DownloadedStats = dwStats - - d.syncStats.SnapshotDownload.SegmentsDownloading[downloadedInfo.FileName] = val - } else { - d.syncStats.SnapshotDownload.SegmentsDownloading[downloadedInfo.FileName] = SegmentDownloadStatistics{ - Name: downloadedInfo.FileName, - TotalBytes: 0, - DownloadedBytes: 0, - Webseeds: make([]SegmentPeer, 0), - Peers: make([]SegmentPeer, 0), - DownloadedStats: dwStats, - } - } - } else { - if val, ok := d.syncStats.SnapshotDownload.SegmentsDownloading[downloadingInfo.Name]; ok { - val.TotalBytes = downloadingInfo.TotalBytes - val.DownloadedBytes = downloadingInfo.DownloadedBytes - val.Webseeds = downloadingInfo.Webseeds - val.Peers = downloadingInfo.Peers - - d.syncStats.SnapshotDownload.SegmentsDownloading[downloadingInfo.Name] = val - } else { - d.syncStats.SnapshotDownload.SegmentsDownloading[downloadingInfo.Name] = *downloadingInfo - } - } - -} - func (d *DiagnosticClient) SyncStatistics() SyncStatistics { return d.syncStats } diff --git a/erigon-lib/diagnostics/snapshots_test.go b/erigon-lib/diagnostics/snapshots_test.go deleted file mode 100644 index 9f56f9f4364..00000000000 --- a/erigon-lib/diagnostics/snapshots_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package diagnostics_test - -import ( - "testing" - - "github.com/ledgerwatch/erigon-lib/diagnostics" - "github.com/stretchr/testify/require" -) - -func TestUpdateFileDownloadingStats(t *testing.T) { - d := diagnostics.NewDiagnosticClient(nil, "test") - - d.UpdateFileDownloadedStatistics(nil, &segmentDownloadStatsMock) - - sd := d.SyncStatistics().SnapshotDownload.SegmentsDownloading - require.NotNil(t, sd) - require.NotEqual(t, len(sd), 0) - - require.Equal(t, sd["test"], segmentDownloadStatsMock) -} - -func TestUpdateFileDownloadedStats(t *testing.T) { - d := diagnostics.NewDiagnosticClient(nil, "test") - - d.UpdateFileDownloadedStatistics(&fileDownloadedUpdMock, nil) - - sd := d.SyncStatistics().SnapshotDownload.SegmentsDownloading - require.NotNil(t, sd) - require.NotEqual(t, len(sd), 0) - - require.Equal(t, sd["test"], diagnostics.SegmentDownloadStatistics{ - Name: "test", - TotalBytes: 0, - DownloadedBytes: 0, - Webseeds: make([]diagnostics.SegmentPeer, 0), - Peers: make([]diagnostics.SegmentPeer, 0), - DownloadedStats: diagnostics.FileDownloadedStatistics{ - TimeTook: 1.0, - AverageRate: 1, - }, - }) -} - -func TestUpdateFileFullStatsUpdate(t *testing.T) { - d := diagnostics.NewDiagnosticClient(nil, "test") - - d.UpdateFileDownloadedStatistics(nil, &segmentDownloadStatsMock) - - sd := d.SyncStatistics().SnapshotDownload.SegmentsDownloading - require.NotNil(t, sd) - require.NotEqual(t, len(sd), 0) - - require.Equal(t, sd["test"], segmentDownloadStatsMock) - - d.UpdateFileDownloadedStatistics(&fileDownloadedUpdMock, nil) - - require.Equal(t, sd["test"], diagnostics.SegmentDownloadStatistics{ - Name: "test", - TotalBytes: 1, - DownloadedBytes: 1, - Webseeds: make([]diagnostics.SegmentPeer, 0), - Peers: make([]diagnostics.SegmentPeer, 0), - DownloadedStats: diagnostics.FileDownloadedStatistics{ - TimeTook: 1.0, - AverageRate: 1, - }, - }) -} - -var ( - fileDownloadedUpdMock = diagnostics.FileDownloadedStatisticsUpdate{ - FileName: "test", - TimeTook: 1.0, - AverageRate: 1, - } - - segmentDownloadStatsMock = diagnostics.SegmentDownloadStatistics{ - Name: "test", - TotalBytes: 1, - DownloadedBytes: 1, - Webseeds: make([]diagnostics.SegmentPeer, 0), - Peers: make([]diagnostics.SegmentPeer, 0), - DownloadedStats: diagnostics.FileDownloadedStatistics{}, - } -) diff --git a/erigon-lib/diagnostics/speedtest.go b/erigon-lib/diagnostics/speedtest.go index ab9a04008bc..d2c463bbbbb 100644 --- a/erigon-lib/diagnostics/speedtest.go +++ b/erigon-lib/diagnostics/speedtest.go @@ -5,7 +5,6 @@ import ( "time" "github.com/showwin/speedtest-go/speedtest" - "github.com/showwin/speedtest-go/speedtest/transport" ) func (d *DiagnosticClient) setupSpeedtestDiagnostics(rootCtx context.Context) { @@ -29,54 +28,37 @@ func (d *DiagnosticClient) setupSpeedtestDiagnostics(rootCtx context.Context) { }() } -var cacheServerList speedtest.Servers - func (d *DiagnosticClient) runSpeedTest(rootCtx context.Context) NetworkSpeedTestResult { var speedtestClient = speedtest.New() - - serverList, err := speedtestClient.FetchServers() - // Ensure that the server list can rolled back to the previous cache. - if err == nil { - cacheServerList = serverList - } - targets, _ := cacheServerList.FindServer([]int{}) + serverList, _ := speedtestClient.FetchServers() + targets, _ := serverList.FindServer([]int{}) latency := time.Duration(0) downloadSpeed := float64(0) uploadSpeed := float64(0) - packetLoss := float64(-1) - - analyzer := speedtest.NewPacketLossAnalyzer(nil) if len(targets) > 0 { s := targets[0] - err = s.PingTestContext(rootCtx, nil) + err := s.PingTestContext(rootCtx, nil) if err == nil { latency = s.Latency } err = s.DownloadTestContext(rootCtx) if err == nil { - downloadSpeed = s.DLSpeed.Mbps() + downloadSpeed = s.DLSpeed } err = s.UploadTestContext(rootCtx) if err == nil { - uploadSpeed = s.ULSpeed.Mbps() + uploadSpeed = s.ULSpeed } - - ctx, cancel := context.WithTimeout(rootCtx, time.Second*15) - defer cancel() - _ = analyzer.RunWithContext(ctx, s.Host, func(pl *transport.PLoss) { - packetLoss = pl.Loss() - }) } return NetworkSpeedTestResult{ Latency: latency, DownloadSpeed: downloadSpeed, UploadSpeed: uploadSpeed, - PacketLoss: packetLoss, } } diff --git a/erigon-lib/downloader/downloader.go b/erigon-lib/downloader/downloader.go index ab2fc0368dd..24316d406fd 100644 --- a/erigon-lib/downloader/downloader.go +++ b/erigon-lib/downloader/downloader.go @@ -92,16 +92,10 @@ type Downloader struct { torrentFS *AtomicTorrentFS snapshotLock *snapshotLock webDownloadInfo map[string]webDownloadInfo - downloading map[string]*downloadInfo + downloading map[string]struct{} downloadLimit *rate.Limit } -type downloadInfo struct { - torrent *torrent.Torrent - time time.Time - progress float32 -} - type webDownloadInfo struct { url *url.URL length int64 @@ -109,6 +103,11 @@ type webDownloadInfo struct { torrent *torrent.Torrent } +type downloadProgress struct { + time time.Time + progress float32 +} + type AggStats struct { MetadataReady, FilesTotal int32 LastMetadataUpdate *time.Time @@ -133,6 +132,7 @@ type AggStats struct { WebseedBytesDownload *atomic.Int64 lastTorrentStatus time.Time + downloadProgress map[string]downloadProgress } type requestHandler struct { @@ -292,6 +292,7 @@ func New(ctx context.Context, cfg *downloadercfg.Cfg, logger log.Logger, verbosi WebseedBytesDownload: &atomic.Int64{}, WebseedDiscardCount: &atomic.Int64{}, WebseedServerFails: &atomic.Int64{}, + downloadProgress: map[string]downloadProgress{}, } snapLock, err := getSnapshotLock(ctx, cfg, db, &stats, mutex, logger) @@ -314,7 +315,7 @@ func New(ctx context.Context, cfg *downloadercfg.Cfg, logger log.Logger, verbosi snapshotLock: snapLock, webDownloadInfo: map[string]webDownloadInfo{}, webDownloadSessions: map[string]*RCloneSession{}, - downloading: map[string]*downloadInfo{}, + downloading: map[string]struct{}{}, webseedsDiscover: discover, } d.webseeds.SetTorrent(d.torrentFS, snapLock.Downloads, cfg.DownloadTorrentFilesFromWebseed) @@ -830,15 +831,7 @@ func (d *Downloader) mainLoop(silent bool) error { }() } - fileSlots := d.cfg.DownloadSlots - - var pieceSlots int - - if d.downloadLimit != nil { - pieceSlots = int(math.Round(float64(*d.downloadLimit / rate.Limit(downloadercfg.DefaultPieceSize)))) - } else { - pieceSlots = int(512 * datasize.MB / downloadercfg.DefaultPieceSize) - } + var sem = semaphore.NewWeighted(int64(d.cfg.DownloadSlots)) //TODO: feature is not ready yet //d.webDownloadClient, _ = NewRCloneClient(d.logger) @@ -862,8 +855,6 @@ func (d *Downloader) mainLoop(silent bool) error { checkGroup, _ := errgroup.WithContext(d.ctx) checkGroup.SetLimit(runtime.GOMAXPROCS(-1) * 4) - lastIntMult := time.Now() - for { torrents := d.torrentClient.Torrents() @@ -934,7 +925,6 @@ func (d *Downloader) mainLoop(silent bool) error { if _, ok := failed[t.Name()]; ok { continue } - d.lock.RLock() _, downloading := d.downloading[t.Name()] d.lock.RUnlock() @@ -1078,19 +1068,7 @@ func (d *Downloader) mainLoop(silent bool) error { d.stats.Downloading = int32(downloadingLen) d.lock.RUnlock() - // the call interval of the loop (elapsed sec) used to get slots/sec for - // calculating the number of files to download based on the loop speed - intervalMultiplier := int(time.Since(lastIntMult).Seconds()) - - // min and max here are taken from the torrent peer config - switch { - case intervalMultiplier < 16: - intervalMultiplier = 16 - case intervalMultiplier > 128: - intervalMultiplier = 128 - } - - available := availableTorrents(d.ctx, pending, d.downloading, fileSlots, pieceSlots*intervalMultiplier) + available := availableTorrents(d.ctx, pending, d.cfg.DownloadSlots-downloadingLen) d.lock.RLock() for _, webDownload := range d.webDownloadInfo { @@ -1197,7 +1175,7 @@ func (d *Downloader) mainLoop(silent bool) error { case len(t.PeerConns()) > 0: d.logger.Debug("[snapshots] Downloading from BitTorrent", "file", t.Name(), "peers", len(t.PeerConns()), "webpeers", len(t.WebseedPeerConns())) delete(waiting, t.Name()) - d.torrentDownload(t, downloadComplete) + d.torrentDownload(t, downloadComplete, sem) case len(t.WebseedPeerConns()) > 0: if d.webDownloadClient != nil { var peerUrls []*url.URL @@ -1210,21 +1188,21 @@ func (d *Downloader) mainLoop(silent bool) error { d.logger.Debug("[snapshots] Downloading from webseed", "file", t.Name(), "webpeers", len(t.WebseedPeerConns())) delete(waiting, t.Name()) - session, err := d.webDownload(peerUrls, t, nil, downloadComplete) + session, err := d.webDownload(peerUrls, t, nil, downloadComplete, sem) if err != nil { d.logger.Warn("Can't complete web download", "file", t.Info().Name, "err", err) if session == nil { delete(waiting, t.Name()) - d.torrentDownload(t, downloadComplete) + d.torrentDownload(t, downloadComplete, sem) } continue } } else { d.logger.Debug("[snapshots] Downloading from torrent", "file", t.Name(), "peers", len(t.PeerConns()), "webpeers", len(t.WebseedPeerConns())) delete(waiting, t.Name()) - d.torrentDownload(t, downloadComplete) + d.torrentDownload(t, downloadComplete, sem) } default: if d.webDownloadClient != nil { @@ -1263,13 +1241,13 @@ func (d *Downloader) mainLoop(silent bool) error { d.logger.Debug("[snapshots] Downloading from web", "file", t.Name(), "webpeers", len(t.WebseedPeerConns())) delete(waiting, t.Name()) - d.webDownload([]*url.URL{peerUrl}, t, &webDownload, downloadComplete) + d.webDownload([]*url.URL{peerUrl}, t, &webDownload, downloadComplete, sem) continue } d.logger.Debug("[snapshots] Downloading from torrent", "file", t.Name(), "peers", len(t.PeerConns())) delete(waiting, t.Name()) - d.torrentDownload(t, downloadComplete) + d.torrentDownload(t, downloadComplete, sem) } } @@ -1318,7 +1296,6 @@ func (d *Downloader) mainLoop(silent bool) error { } } } - } }() @@ -1493,17 +1470,21 @@ func getWebpeerTorrentInfo(ctx context.Context, downloadUrl *url.URL) (*metainfo return metainfo.Load(torrentResponse.Body) } -func (d *Downloader) torrentDownload(t *torrent.Torrent, statusChan chan downloadStatus) { +func (d *Downloader) torrentDownload(t *torrent.Torrent, statusChan chan downloadStatus, sem *semaphore.Weighted) { d.lock.Lock() - d.downloading[t.Name()] = &downloadInfo{torrent: t} + d.downloading[t.Name()] = struct{}{} d.lock.Unlock() + if err := sem.Acquire(d.ctx, 1); err != nil { + d.logger.Warn("Failed to acquire download semaphore", "err", err) + return + } + d.wg.Add(1) go func(t *torrent.Torrent) { defer d.wg.Done() - - downloadStarted := time.Now() + defer sem.Release(1) t.AllowDataDownload() @@ -1523,18 +1504,6 @@ func (d *Downloader) torrentDownload(t *torrent.Torrent, statusChan chan downloa case <-d.ctx.Done(): return case <-t.Complete.On(): - downloadTime := time.Since(downloadStarted) - downloaded := t.Stats().BytesReadUsefulData - - diagnostics.Send(diagnostics.FileDownloadedStatisticsUpdate{ - FileName: t.Name(), - TimeTook: downloadTime.Seconds(), - AverageRate: uint64(float64(downloaded.Int64()) / downloadTime.Seconds()), - }) - - d.logger.Debug("[snapshots] Downloaded from BitTorrent", "file", t.Name(), - "download-time", downloadTime.Round(time.Second).String(), "downloaded", common.ByteCount(uint64(downloaded.Int64())), - "rate", fmt.Sprintf("%s/s", common.ByteCount(uint64(float64(downloaded.Int64())/downloadTime.Seconds())))) return case <-time.After(10 * time.Second): bytesRead := t.Stats().BytesReadData @@ -1556,7 +1525,7 @@ func (d *Downloader) torrentDownload(t *torrent.Torrent, statusChan chan downloa }(t) } -func (d *Downloader) webDownload(peerUrls []*url.URL, t *torrent.Torrent, i *webDownloadInfo, statusChan chan downloadStatus) (*RCloneSession, error) { +func (d *Downloader) webDownload(peerUrls []*url.URL, t *torrent.Torrent, i *webDownloadInfo, statusChan chan downloadStatus, sem *semaphore.Weighted) (*RCloneSession, error) { if d.webDownloadClient == nil { return nil, fmt.Errorf("webdownload client not enabled") } @@ -1612,13 +1581,19 @@ func (d *Downloader) webDownload(peerUrls []*url.URL, t *torrent.Torrent, i *web d.lock.Lock() t.Drop() - d.downloading[name] = &downloadInfo{torrent: t} + d.downloading[name] = struct{}{} d.lock.Unlock() d.wg.Add(1) + if err := sem.Acquire(d.ctx, 1); err != nil { + d.logger.Warn("Failed to acquire download semaphore", "err", err) + return nil, err + } + go func() { defer d.wg.Done() + defer sem.Release(1) if dir.FileExist(info.Path) { if err := os.Remove(info.Path); err != nil { @@ -1724,25 +1699,8 @@ func selectDownloadPeer(ctx context.Context, peerUrls []*url.URL, t *torrent.Tor return "", fmt.Errorf("can't find download peer") } -func availableTorrents(ctx context.Context, pending []*torrent.Torrent, downloading map[string]*downloadInfo, fileSlots int, pieceSlots int) []*torrent.Torrent { - - piecesDownloading := 0 - pieceRemainder := int64(0) - - for _, info := range downloading { - if info.torrent.NumPieces() == 1 { - pieceRemainder += info.torrent.Info().Length - - if pieceRemainder >= downloadercfg.DefaultPieceSize { - pieceRemainder = 0 - piecesDownloading++ - } - } else { - piecesDownloading += info.torrent.NumPieces() - info.torrent.Stats().PiecesComplete - } - } - - if len(downloading) >= fileSlots && piecesDownloading > pieceSlots { +func availableTorrents(ctx context.Context, pending []*torrent.Torrent, slots int) []*torrent.Torrent { + if slots == 0 { select { case <-ctx.Done(): return nil @@ -1781,18 +1739,7 @@ func availableTorrents(ctx context.Context, pending []*torrent.Torrent, download for len(pending) > 0 && pending[0].Info() != nil { available = append(available, pending[0]) - if pending[0].NumPieces() == 1 { - pieceRemainder += pending[0].Info().Length - - if pieceRemainder >= downloadercfg.DefaultPieceSize { - pieceRemainder = 0 - piecesDownloading++ - } - } else { - piecesDownloading += pending[0].NumPieces() - } - - if len(available) >= fileSlots && piecesDownloading > pieceSlots { + if len(available) == slots { return available } @@ -1801,7 +1748,7 @@ func availableTorrents(ctx context.Context, pending []*torrent.Torrent, download for len(pendingStateFiles) > 0 && pendingStateFiles[0].Info() != nil { available = append(available, pendingStateFiles[0]) - if len(available) >= fileSlots && piecesDownloading > pieceSlots { + if len(available) == slots { return available } @@ -1845,18 +1792,7 @@ func availableTorrents(ctx context.Context, pending []*torrent.Torrent, download default: available = append(available, pending[selected]) - if pending[selected].NumPieces() == 1 { - pieceRemainder += pending[selected].Info().Length - - if pieceRemainder >= downloadercfg.DefaultPieceSize { - pieceRemainder = 0 - piecesDownloading++ - } - } else { - piecesDownloading += pending[selected].NumPieces() - } - - if len(available) >= fileSlots && piecesDownloading > pieceSlots { + if len(available) == slots { return available } @@ -1893,40 +1829,15 @@ func (d *Downloader) torrentInfo(name string) (*torrentInfo, error) { } func (d *Downloader) ReCalcStats(interval time.Duration) { - d.lock.RLock() - - torrentClient := d.torrentClient - + d.lock.Lock() + defer d.lock.Unlock() + //Call this methods outside of `lock` critical section, because they have own locks with contention + torrents := d.torrentClient.Torrents() + connStats := d.torrentClient.ConnStats() peers := make(map[torrent.PeerID]struct{}, 16) prevStats, stats := d.stats, d.stats - logger := d.logger - verbosity := d.verbosity - - downloading := map[string]*downloadInfo{} - - for file, info := range d.downloading { - i := *info - downloading[file] = &i - } - - webDownloadClient := d.webDownloadClient - - webDownloadInfo := map[string]webDownloadInfo{} - - for key, value := range d.webDownloadInfo { - webDownloadInfo[key] = value - } - - ctx := d.ctx - - d.lock.RUnlock() - - //Call this methods outside of `lock` critical section, because they have own locks with contention - torrents := torrentClient.Torrents() - connStats := torrentClient.ConnStats() - stats.Completed = true stats.BytesDownload = uint64(connStats.BytesReadUsefulIntendedData.Int64()) stats.BytesUpload = uint64(connStats.BytesWrittenData.Int64()) @@ -1948,6 +1859,12 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { diagnostics.Send(diagnostics.SnapshoFilesList{Files: filesList}) } + downloading := map[string]float32{} + + for file := range d.downloading { + downloading[file] = 0 + } + var dbInfo int var tComplete int var torrentInfo int @@ -1983,11 +1900,14 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { } progress := float32(float64(100) * (float64(bytesCompleted) / float64(tLen))) - if info, ok := downloading[torrentName]; ok { - if progress != info.progress { - info.time = time.Now() - info.progress = progress + if _, ok := downloading[torrentName]; ok { + + if progress != stats.downloadProgress[torrentName].progress { + stats.downloadProgress[torrentName] = downloadProgress{time: time.Now(), progress: progress} } + } else { + // we only care about progress of downloading files + delete(stats.downloadProgress, torrentName) } stats.BytesCompleted += uint64(bytesCompleted) @@ -2002,15 +1922,11 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { rates, peers := getPeersRatesForlogs(peersOfThisFile, torrentName) if !torrentComplete { - d.lock.RLock() - info, err := d.torrentInfo(torrentName) - d.lock.RUnlock() - - if err == nil { + if info, err := d.torrentInfo(torrentName); err == nil { if info != nil { dbInfo++ } - } else if _, ok := webDownloadInfo[torrentName]; ok { + } else if _, ok := d.webDownloadInfo[torrentName]; ok { stats.MetadataReady++ } else { noMetadata = append(noMetadata, torrentName) @@ -2023,14 +1939,13 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { // more detailed statistic: download rate of each peer (for each file) if !torrentComplete && progress != 0 { - if info, ok := downloading[torrentName]; ok { - info.time = time.Now() - info.progress = progress + if _, ok := downloading[torrentName]; ok { + downloading[torrentName] = progress } - logger.Log(verbosity, "[snapshots] progress", "file", torrentName, "progress", fmt.Sprintf("%.2f%%", progress), "peers", len(peersOfThisFile), "webseeds", len(weebseedPeersOfThisFile)) - logger.Log(verbosity, "[snapshots] webseed peers", webseedRates...) - logger.Log(verbosity, "[snapshots] bittorrent peers", rates...) + d.logger.Log(d.verbosity, "[snapshots] progress", "file", torrentName, "progress", fmt.Sprintf("%.2f%%", progress), "peers", len(peersOfThisFile), "webseeds", len(weebseedPeersOfThisFile)) + d.logger.Log(d.verbosity, "[snapshots] webseed peers", webseedRates...) + d.logger.Log(d.verbosity, "[snapshots] bittorrent peers", rates...) } diagnostics.Send(diagnostics.SegmentDownloadStatistics{ @@ -2046,8 +1961,8 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { var webTransfers int32 - if webDownloadClient != nil { - webStats, _ := webDownloadClient.Stats(ctx) + if d.webDownloadClient != nil { + webStats, _ := d.webDownloadClient.Stats(d.ctx) if webStats != nil { if len(webStats.Transferring) != 0 && stats.Completed { @@ -2094,8 +2009,8 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { // more detailed statistic: download rate of each peer (for each file) if transfer.Percentage != 0 { - logger.Log(verbosity, "[snapshots] progress", "file", transferName, "progress", fmt.Sprintf("%.2f%%", float32(transfer.Percentage)), "webseeds", 1) - logger.Log(verbosity, "[snapshots] web peers", webseedRates...) + d.logger.Log(d.verbosity, "[snapshots] progress", "file", transferName, "progress", fmt.Sprintf("%.2f%%", float32(transfer.Percentage)), "webseeds", 1) + d.logger.Log(d.verbosity, "[snapshots] web peers", webseedRates...) } diagnostics.Send(diagnostics.SegmentDownloadStatistics{ @@ -2109,7 +2024,7 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { } if len(downloading) > 0 { - if webDownloadClient != nil { + if d.webDownloadClient != nil { webTransfers += int32(len(downloading)) } @@ -2117,7 +2032,7 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { } if !stats.Completed { - logger.Debug("[snapshots] info", + d.logger.Debug("[snapshots] info", "len", len(torrents), "webTransfers", webTransfers, "torrent", torrentInfo, @@ -2140,7 +2055,7 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { if len(noMetadata) > 5 { noMetadata = append(noMetadata[:5], "...") } - logger.Info("[snapshots] no metadata yet", "files", amount, "list", strings.Join(noMetadata, ",")) + d.logger.Info("[snapshots] no metadata yet", "files", amount, "list", strings.Join(noMetadata, ",")) } var noDownloadProgress []string @@ -2158,17 +2073,17 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { zeroProgress = append(zeroProgress[:5], "...") } - logger.Info("[snapshots] no progress yet", "files", amount, "list", strings.Join(zeroProgress, ",")) + d.logger.Info("[snapshots] no progress yet", "files", amount, "list", strings.Join(zeroProgress, ",")) } if len(downloading) > 0 { amount := len(downloading) files := make([]string, 0, len(downloading)) - for file, info := range downloading { - files = append(files, fmt.Sprintf("%s (%.0f%%)", file, info.progress)) + for file, progress := range downloading { + files = append(files, fmt.Sprintf("%s (%.0f%%)", file, progress)) - if dp, ok := downloading[file]; ok { + if dp, ok := stats.downloadProgress[file]; ok { if time.Since(dp.time) > 30*time.Minute { noDownloadProgress = append(noDownloadProgress, file) } @@ -2176,16 +2091,16 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { } sort.Strings(files) - logger.Log(verbosity, "[snapshots] downloading", "files", amount, "list", strings.Join(files, ", ")) + d.logger.Log(d.verbosity, "[snapshots] downloading", "files", amount, "list", strings.Join(files, ", ")) } if time.Since(stats.lastTorrentStatus) > 5*time.Minute { stats.lastTorrentStatus = time.Now() if len(noDownloadProgress) > 0 { - progressStatus := getProgressStatus(torrentClient, noDownloadProgress) + progressStatus := getProgressStatus(d.torrentClient, noDownloadProgress) for file, status := range progressStatus { - logger.Debug(fmt.Sprintf("[snapshots] torrent status: %s\n %s", file, + d.logger.Debug(fmt.Sprintf("[snapshots] torrent status: %s\n %s", file, string(bytes.TrimRight(bytes.ReplaceAll(status, []byte("\n"), []byte("\n ")), "\n ")))) } } @@ -2215,17 +2130,7 @@ func (d *Downloader) ReCalcStats(interval time.Duration) { stats.PeersUnique = int32(len(peers)) stats.FilesTotal = int32(len(torrents)) + webTransfers - d.lock.Lock() d.stats = stats - - for file, info := range d.downloading { - if updated, ok := downloading[file]; ok { - info.time = updated.time - info.progress = updated.progress - } - } - - d.lock.Unlock() } type filterWriter struct { diff --git a/erigon-lib/downloader/downloadercfg/downloadercfg.go b/erigon-lib/downloader/downloadercfg/downloadercfg.go index 10f24cabe4d..7f3e5bfaef0 100644 --- a/erigon-lib/downloader/downloadercfg/downloadercfg.go +++ b/erigon-lib/downloader/downloadercfg/downloadercfg.go @@ -74,11 +74,6 @@ func Default() *torrent.ClientConfig { // *torrent.PeerConn: waiting for alloc limit reservation: reservation for 1802972 exceeds limiter max 1048576 torrentConfig.MaxAllocPeerRequestDataPerConn = int64(DefaultPieceSize) - // this limits the amount of unverified bytes - which will throttle the - // number of requests the torrent will handle - it acts as a brake on - // parallelism if set (default is 67,108,864) - torrentConfig.MaxUnverifiedBytes = 0 - // enable dht torrentConfig.NoDHT = true //torrentConfig.DisableTrackers = true diff --git a/erigon-lib/downloader/downloadercfg/logger.go b/erigon-lib/downloader/downloadercfg/logger.go index 7781f5d5d94..88eb5dcabfa 100644 --- a/erigon-lib/downloader/downloadercfg/logger.go +++ b/erigon-lib/downloader/downloadercfg/logger.go @@ -92,7 +92,7 @@ func (b adapterHandler) Handle(r lg.Record) { skip := strings.Contains(str, "EOF") || strings.Contains(str, "requested chunk too long") || strings.Contains(str, "banned ip") || - //strings.Contains(str, "banning webseed") || + strings.Contains(str, "banning webseed") || strings.Contains(str, "TrackerClient closed") || strings.Contains(str, "being sole dirtier of piece") || strings.Contains(str, "webrtc conn for unloaded torrent") || @@ -101,7 +101,7 @@ func (b adapterHandler) Handle(r lg.Record) { strings.Contains(str, "reservation cancelled") if skip { - log.Debug(str) + log.Trace(str) break } log.Warn(str) diff --git a/erigon-lib/downloader/webseed.go b/erigon-lib/downloader/webseed.go index e25eec414cc..c4080ec2c40 100644 --- a/erigon-lib/downloader/webseed.go +++ b/erigon-lib/downloader/webseed.go @@ -67,7 +67,7 @@ func (d *WebSeeds) getWebDownloadInfo(ctx context.Context, t *torrent.Torrent) ( headResponse.Body.Close() if headResponse.StatusCode != http.StatusOK { - d.logger.Trace("[snapshots.webseed] getWebDownloadInfo: HEAD request failed", + d.logger.Debug("[snapshots.webseed] getWebDownloadInfo: HEAD request failed", "webseed", webseed.String(), "name", t.Name(), "status", headResponse.Status) continue } @@ -93,10 +93,6 @@ func (d *WebSeeds) getWebDownloadInfo(ctx context.Context, t *torrent.Torrent) ( seedHashMismatches = append(seedHashMismatches, &seedHash{url: webseed}) } - if len(infos) == 0 { - d.logger.Trace("[snapshots.webseed] webseed info not found", "name", t.Name()) - } - return infos, seedHashMismatches, nil } diff --git a/erigon-lib/go.mod b/erigon-lib/go.mod index 23911a1afb3..e98345a3288 100644 --- a/erigon-lib/go.mod +++ b/erigon-lib/go.mod @@ -4,8 +4,8 @@ go 1.21 require ( github.com/erigontech/mdbx-go v0.38.0 - github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240510125329-7b93b3d7e99f - github.com/ledgerwatch/interfaces v0.0.0-20240510032129-13f644ca2307 + github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240505022337-08dce201593b + github.com/ledgerwatch/interfaces v0.0.0-20240502103437-1e84e26799a6 github.com/ledgerwatch/log/v3 v3.9.0 github.com/ledgerwatch/secp256k1 v1.0.0 ) @@ -129,7 +129,7 @@ require ( github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/showwin/speedtest-go v1.7.5 + github.com/showwin/speedtest-go v1.6.12 github.com/sirupsen/logrus v1.9.3 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect @@ -148,7 +148,7 @@ require ( ) replace ( - github.com/anacrolix/torrent => github.com/erigontech/torrent v1.54.2-alpha-10 + github.com/anacrolix/torrent => github.com/erigontech/torrent v1.54.2-alpha-8 github.com/holiman/bloomfilter/v2 => github.com/AskAlexSharov/bloomfilter/v2 v2.0.8 github.com/tidwall/btree => github.com/AskAlexSharov/btree v1.6.2 ) diff --git a/erigon-lib/go.sum b/erigon-lib/go.sum index ea57eb86e67..f072d1bfc54 100644 --- a/erigon-lib/go.sum +++ b/erigon-lib/go.sum @@ -146,8 +146,8 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/erigontech/mdbx-go v0.38.0 h1:K64h6YHc2biN081DPEp/KP1TE+X0Jmxu8T+RJadNkXc= github.com/erigontech/mdbx-go v0.38.0/go.mod h1:FAMxbOgqOnRDx51j8HjuJZIgznbDwjX7LItd+/UWyA4= -github.com/erigontech/torrent v1.54.2-alpha-10 h1:MqEorLDG5n2jsNAsSC+TKuZUyExO/KfGumHxh7GHG3o= -github.com/erigontech/torrent v1.54.2-alpha-10/go.mod h1:nYNeuR4xPlEl4CturFD9/KRXBRJEcJGqjegDNWakwG4= +github.com/erigontech/torrent v1.54.2-alpha-8 h1:MQobu6sUZCFbmWpsB7GqAh0IWs7VAZ370POaVxlApIk= +github.com/erigontech/torrent v1.54.2-alpha-8/go.mod h1:nYNeuR4xPlEl4CturFD9/KRXBRJEcJGqjegDNWakwG4= github.com/frankban/quicktest v1.9.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= @@ -270,10 +270,10 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240510125329-7b93b3d7e99f h1:vOUz9rYvrFWc84nuPUxReQj7OhU7QYWJCNXbH0NMPvI= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240510125329-7b93b3d7e99f/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= -github.com/ledgerwatch/interfaces v0.0.0-20240510032129-13f644ca2307 h1:v2syJaHSCTSEnzwFUW4F6FL92ZAnKEoyBesnm2E/IEU= -github.com/ledgerwatch/interfaces v0.0.0-20240510032129-13f644ca2307/go.mod h1:ugQv1QllJzBny3cKZKxUrSnykkjkBgm27eQM6dnGAcc= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240505022337-08dce201593b h1:lfllTgrcwFzFXX7c/L4i/xAj/8noP/yHNSmC8dDi08s= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240505022337-08dce201593b/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= +github.com/ledgerwatch/interfaces v0.0.0-20240502103437-1e84e26799a6 h1:snFpr1kpUlT/ffEa29S9tGgu2uIaLJqA2wv9PuOlBvU= +github.com/ledgerwatch/interfaces v0.0.0-20240502103437-1e84e26799a6/go.mod h1:ugQv1QllJzBny3cKZKxUrSnykkjkBgm27eQM6dnGAcc= github.com/ledgerwatch/log/v3 v3.9.0 h1:iDwrXe0PVwBC68Dd94YSsHbMgQ3ufsgjzXtFNFVZFRk= github.com/ledgerwatch/log/v3 v3.9.0/go.mod h1:EiAY6upmI/6LkNhOVxb4eVsmsP11HZCnZ3PlJMjYiqE= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= @@ -418,8 +418,8 @@ github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFt github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= -github.com/showwin/speedtest-go v1.7.5 h1:FQ3EdM2vnfw5BRCRzGCYe8aWu70rr21Az5ZFHiW9CdE= -github.com/showwin/speedtest-go v1.7.5/go.mod h1:uLgdWCNarXxlYsL2E5TOZpCIwpgSWnEANZp7gfHXHu0= +github.com/showwin/speedtest-go v1.6.12 h1:q+hWNn2cM35KkqtXGGbSmuJgd67gTP8+VlneY2hq9vU= +github.com/showwin/speedtest-go v1.6.12/go.mod h1:uLgdWCNarXxlYsL2E5TOZpCIwpgSWnEANZp7gfHXHu0= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= diff --git a/erigon-lib/gointerfaces/remoteproto/kv.pb.go b/erigon-lib/gointerfaces/remoteproto/kv.pb.go index b88e9200724..edd6463bf1d 100644 --- a/erigon-lib/gointerfaces/remoteproto/kv.pb.go +++ b/erigon-lib/gointerfaces/remoteproto/kv.pb.go @@ -1085,7 +1085,7 @@ func (x *DomainGetReply) GetOk() bool { return false } -type HistorySeekReq struct { +type HistoryGetReq struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields @@ -1096,8 +1096,8 @@ type HistorySeekReq struct { Ts uint64 `protobuf:"varint,4,opt,name=ts,proto3" json:"ts,omitempty"` } -func (x *HistorySeekReq) Reset() { - *x = HistorySeekReq{} +func (x *HistoryGetReq) Reset() { + *x = HistoryGetReq{} if protoimpl.UnsafeEnabled { mi := &file_remote_kv_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1105,13 +1105,13 @@ func (x *HistorySeekReq) Reset() { } } -func (x *HistorySeekReq) String() string { +func (x *HistoryGetReq) String() string { return protoimpl.X.MessageStringOf(x) } -func (*HistorySeekReq) ProtoMessage() {} +func (*HistoryGetReq) ProtoMessage() {} -func (x *HistorySeekReq) ProtoReflect() protoreflect.Message { +func (x *HistoryGetReq) ProtoReflect() protoreflect.Message { mi := &file_remote_kv_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1123,40 +1123,40 @@ func (x *HistorySeekReq) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use HistorySeekReq.ProtoReflect.Descriptor instead. -func (*HistorySeekReq) Descriptor() ([]byte, []int) { +// Deprecated: Use HistoryGetReq.ProtoReflect.Descriptor instead. +func (*HistoryGetReq) Descriptor() ([]byte, []int) { return file_remote_kv_proto_rawDescGZIP(), []int{12} } -func (x *HistorySeekReq) GetTxId() uint64 { +func (x *HistoryGetReq) GetTxId() uint64 { if x != nil { return x.TxId } return 0 } -func (x *HistorySeekReq) GetTable() string { +func (x *HistoryGetReq) GetTable() string { if x != nil { return x.Table } return "" } -func (x *HistorySeekReq) GetK() []byte { +func (x *HistoryGetReq) GetK() []byte { if x != nil { return x.K } return nil } -func (x *HistorySeekReq) GetTs() uint64 { +func (x *HistoryGetReq) GetTs() uint64 { if x != nil { return x.Ts } return 0 } -type HistorySeekReply struct { +type HistoryGetReply struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields @@ -1165,8 +1165,8 @@ type HistorySeekReply struct { Ok bool `protobuf:"varint,2,opt,name=ok,proto3" json:"ok,omitempty"` } -func (x *HistorySeekReply) Reset() { - *x = HistorySeekReply{} +func (x *HistoryGetReply) Reset() { + *x = HistoryGetReply{} if protoimpl.UnsafeEnabled { mi := &file_remote_kv_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1174,13 +1174,13 @@ func (x *HistorySeekReply) Reset() { } } -func (x *HistorySeekReply) String() string { +func (x *HistoryGetReply) String() string { return protoimpl.X.MessageStringOf(x) } -func (*HistorySeekReply) ProtoMessage() {} +func (*HistoryGetReply) ProtoMessage() {} -func (x *HistorySeekReply) ProtoReflect() protoreflect.Message { +func (x *HistoryGetReply) ProtoReflect() protoreflect.Message { mi := &file_remote_kv_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1192,19 +1192,19 @@ func (x *HistorySeekReply) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use HistorySeekReply.ProtoReflect.Descriptor instead. -func (*HistorySeekReply) Descriptor() ([]byte, []int) { +// Deprecated: Use HistoryGetReply.ProtoReflect.Descriptor instead. +func (*HistoryGetReply) Descriptor() ([]byte, []int) { return file_remote_kv_proto_rawDescGZIP(), []int{13} } -func (x *HistorySeekReply) GetV() []byte { +func (x *HistoryGetReply) GetV() []byte { if x != nil { return x.V } return nil } -func (x *HistorySeekReply) GetOk() bool { +func (x *HistoryGetReply) GetOk() bool { if x != nil { return x.Ok } @@ -1891,144 +1891,143 @@ var file_remote_kv_proto_rawDesc = []byte{ 0x74, 0x65, 0x73, 0x74, 0x22, 0x2e, 0x0a, 0x0e, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x0c, 0x0a, 0x01, 0x76, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x76, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x02, 0x6f, 0x6b, 0x22, 0x59, 0x0a, 0x0e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x53, - 0x65, 0x65, 0x6b, 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x12, 0x0c, 0x0a, 0x01, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x6b, 0x12, - 0x0e, 0x0a, 0x02, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x74, 0x73, 0x22, - 0x30, 0x0a, 0x10, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x0c, 0x0a, 0x01, 0x76, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, - 0x76, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x6f, - 0x6b, 0x22, 0xeb, 0x01, 0x0a, 0x0d, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x0c, - 0x0a, 0x01, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x6b, 0x12, 0x17, 0x0a, 0x07, - 0x66, 0x72, 0x6f, 0x6d, 0x5f, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x12, 0x52, 0x06, 0x66, - 0x72, 0x6f, 0x6d, 0x54, 0x73, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x6f, 0x5f, 0x74, 0x73, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x12, 0x52, 0x04, 0x74, 0x6f, 0x54, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x72, - 0x64, 0x65, 0x72, 0x5f, 0x61, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0b, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x12, 0x14, 0x0a, - 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, - 0x6d, 0x69, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, - 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, - 0x59, 0x0a, 0x0f, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x70, - 0x6c, 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, - 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xdf, 0x01, 0x0a, 0x0f, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x12, 0x13, - 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, - 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x72, 0x6f, - 0x6d, 0x5f, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x12, 0x52, 0x06, 0x66, 0x72, 0x6f, 0x6d, - 0x54, 0x73, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x6f, 0x5f, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x12, 0x52, 0x04, 0x74, 0x6f, 0x54, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x72, 0x64, 0x65, 0x72, - 0x5f, 0x61, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6f, - 0x72, 0x64, 0x65, 0x72, 0x41, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, - 0x6d, 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, - 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, - 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8a, 0x02, 0x0a, - 0x0e, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x12, - 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, - 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x66, 0x72, - 0x6f, 0x6d, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x66, 0x72, - 0x6f, 0x6d, 0x4b, 0x65, 0x79, 0x12, 0x15, 0x0a, 0x06, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x0e, 0x0a, 0x02, - 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, - 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x6c, 0x61, - 0x74, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x73, - 0x63, 0x65, 0x6e, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6f, 0x72, 0x64, 0x65, - 0x72, 0x41, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1b, 0x0a, - 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, - 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x5b, 0x0a, 0x05, 0x50, 0x61, 0x69, - 0x72, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, - 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x26, - 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, - 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x42, 0x0a, 0x0f, 0x50, 0x61, 0x72, 0x69, 0x73, 0x50, - 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x78, - 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6e, 0x65, 0x78, - 0x74, 0x4b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x4f, 0x0a, 0x0f, 0x49, 0x6e, - 0x64, 0x65, 0x78, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, - 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x12, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x54, 0x69, 0x6d, 0x65, - 0x53, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x2a, 0x86, 0x02, 0x0a, 0x02, - 0x4f, 0x70, 0x12, 0x09, 0x0a, 0x05, 0x46, 0x49, 0x52, 0x53, 0x54, 0x10, 0x00, 0x12, 0x0d, 0x0a, - 0x09, 0x46, 0x49, 0x52, 0x53, 0x54, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, - 0x53, 0x45, 0x45, 0x4b, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x45, 0x45, 0x4b, 0x5f, 0x42, - 0x4f, 0x54, 0x48, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x55, 0x52, 0x52, 0x45, 0x4e, 0x54, - 0x10, 0x04, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x41, 0x53, 0x54, 0x10, 0x06, 0x12, 0x0c, 0x0a, 0x08, - 0x4c, 0x41, 0x53, 0x54, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x07, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x45, - 0x58, 0x54, 0x10, 0x08, 0x12, 0x0c, 0x0a, 0x08, 0x4e, 0x45, 0x58, 0x54, 0x5f, 0x44, 0x55, 0x50, - 0x10, 0x09, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x45, 0x58, 0x54, 0x5f, 0x4e, 0x4f, 0x5f, 0x44, 0x55, - 0x50, 0x10, 0x0b, 0x12, 0x08, 0x0a, 0x04, 0x50, 0x52, 0x45, 0x56, 0x10, 0x0c, 0x12, 0x0c, 0x0a, - 0x08, 0x50, 0x52, 0x45, 0x56, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x0d, 0x12, 0x0f, 0x0a, 0x0b, 0x50, - 0x52, 0x45, 0x56, 0x5f, 0x4e, 0x4f, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x0e, 0x12, 0x0e, 0x0a, 0x0a, - 0x53, 0x45, 0x45, 0x4b, 0x5f, 0x45, 0x58, 0x41, 0x43, 0x54, 0x10, 0x0f, 0x12, 0x13, 0x0a, 0x0f, - 0x53, 0x45, 0x45, 0x4b, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x5f, 0x45, 0x58, 0x41, 0x43, 0x54, 0x10, - 0x10, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10, 0x1e, 0x12, 0x09, 0x0a, 0x05, 0x43, - 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x1f, 0x12, 0x11, 0x0a, 0x0d, 0x4f, 0x50, 0x45, 0x4e, 0x5f, 0x44, - 0x55, 0x50, 0x5f, 0x53, 0x4f, 0x52, 0x54, 0x10, 0x20, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4f, 0x55, - 0x4e, 0x54, 0x10, 0x21, 0x2a, 0x48, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, - 0x0a, 0x07, 0x53, 0x54, 0x4f, 0x52, 0x41, 0x47, 0x45, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x55, - 0x50, 0x53, 0x45, 0x52, 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x44, 0x45, 0x10, - 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, 0x5f, 0x43, 0x4f, 0x44, 0x45, - 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x10, 0x04, 0x2a, 0x24, - 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x46, - 0x4f, 0x52, 0x57, 0x41, 0x52, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x4e, 0x57, 0x49, - 0x4e, 0x44, 0x10, 0x01, 0x32, 0xbd, 0x04, 0x0a, 0x02, 0x4b, 0x56, 0x12, 0x36, 0x0a, 0x07, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x26, 0x0a, 0x02, 0x54, 0x78, 0x12, 0x0e, 0x2e, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x2e, 0x43, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x1a, 0x0c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x2e, 0x50, 0x61, 0x69, 0x72, 0x28, 0x01, 0x30, 0x01, 0x12, 0x46, 0x0a, 0x0c, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x1a, 0x2e, 0x72, 0x65, - 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x61, 0x74, 0x63, - 0x68, 0x30, 0x01, 0x12, 0x3d, 0x0a, 0x09, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, - 0x12, 0x18, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x72, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x70, - 0x6c, 0x79, 0x12, 0x28, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x10, 0x2e, 0x72, 0x65, - 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x0d, 0x2e, - 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x69, 0x72, 0x73, 0x12, 0x39, 0x0a, 0x09, - 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x47, 0x65, 0x74, 0x12, 0x14, 0x2e, 0x72, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x2e, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x1a, - 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x47, - 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3f, 0x0a, 0x0b, 0x48, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x53, 0x65, 0x65, 0x6b, 0x12, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, - 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65, 0x71, 0x1a, 0x18, - 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x53, - 0x65, 0x65, 0x6b, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3c, 0x0a, 0x0a, 0x49, 0x6e, 0x64, 0x65, - 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, - 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, - 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x36, 0x0a, 0x0c, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, - 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x1a, - 0x0d, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x69, 0x72, 0x73, 0x12, 0x34, - 0x0a, 0x0b, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x16, 0x2e, - 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x0d, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, - 0x61, 0x69, 0x72, 0x73, 0x42, 0x16, 0x5a, 0x14, 0x2e, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x3b, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x52, 0x02, 0x6f, 0x6b, 0x22, 0x58, 0x0a, 0x0d, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x47, + 0x65, 0x74, 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x12, 0x0c, 0x0a, 0x01, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x6b, 0x12, 0x0e, + 0x0a, 0x02, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x74, 0x73, 0x22, 0x2f, + 0x0a, 0x0f, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x12, 0x0c, 0x0a, 0x01, 0x76, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x76, 0x12, + 0x0e, 0x0a, 0x02, 0x6f, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x6f, 0x6b, 0x22, + 0xeb, 0x01, 0x0a, 0x0d, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, + 0x71, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x0c, 0x0a, 0x01, + 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x6b, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x72, + 0x6f, 0x6d, 0x5f, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x12, 0x52, 0x06, 0x66, 0x72, 0x6f, + 0x6d, 0x54, 0x73, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x6f, 0x5f, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x12, 0x52, 0x04, 0x74, 0x6f, 0x54, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x72, 0x64, 0x65, + 0x72, 0x5f, 0x61, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, + 0x6f, 0x72, 0x64, 0x65, 0x72, 0x41, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, + 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x59, 0x0a, + 0x0f, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x12, 0x1e, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, + 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, + 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xdf, 0x01, 0x0a, 0x0f, 0x48, 0x69, 0x73, + 0x74, 0x6f, 0x72, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a, 0x05, + 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, 0x78, 0x49, + 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x66, 0x72, 0x6f, 0x6d, 0x5f, + 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x12, 0x52, 0x06, 0x66, 0x72, 0x6f, 0x6d, 0x54, 0x73, + 0x12, 0x13, 0x0a, 0x05, 0x74, 0x6f, 0x5f, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x12, 0x52, + 0x04, 0x74, 0x6f, 0x54, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x61, + 0x73, 0x63, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6f, 0x72, 0x64, + 0x65, 0x72, 0x41, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1b, + 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, + 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8a, 0x02, 0x0a, 0x0e, 0x44, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x12, 0x13, 0x0a, + 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, 0x78, + 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x66, 0x72, 0x6f, 0x6d, + 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x66, 0x72, 0x6f, 0x6d, + 0x4b, 0x65, 0x79, 0x12, 0x15, 0x0a, 0x06, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x05, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x74, 0x73, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, + 0x74, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x6c, 0x61, 0x74, 0x65, + 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x73, 0x63, 0x65, + 0x6e, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x41, + 0x73, 0x63, 0x65, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, + 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, + 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, + 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, + 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x5b, 0x0a, 0x05, 0x50, 0x61, 0x69, 0x72, 0x73, + 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x04, + 0x6b, 0x65, 0x79, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, + 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x42, 0x0a, 0x0f, 0x50, 0x61, 0x72, 0x69, 0x73, 0x50, 0x61, 0x67, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x65, 0x78, 0x74, 0x5f, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6e, 0x65, 0x78, 0x74, 0x4b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x4f, 0x0a, 0x0f, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x6e, + 0x65, 0x78, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x12, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x74, + 0x61, 0x6d, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x12, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x2a, 0x86, 0x02, 0x0a, 0x02, 0x4f, 0x70, + 0x12, 0x09, 0x0a, 0x05, 0x46, 0x49, 0x52, 0x53, 0x54, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x46, + 0x49, 0x52, 0x53, 0x54, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x45, + 0x45, 0x4b, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x45, 0x45, 0x4b, 0x5f, 0x42, 0x4f, 0x54, + 0x48, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x55, 0x52, 0x52, 0x45, 0x4e, 0x54, 0x10, 0x04, + 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x41, 0x53, 0x54, 0x10, 0x06, 0x12, 0x0c, 0x0a, 0x08, 0x4c, 0x41, + 0x53, 0x54, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x07, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x45, 0x58, 0x54, + 0x10, 0x08, 0x12, 0x0c, 0x0a, 0x08, 0x4e, 0x45, 0x58, 0x54, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x09, + 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x45, 0x58, 0x54, 0x5f, 0x4e, 0x4f, 0x5f, 0x44, 0x55, 0x50, 0x10, + 0x0b, 0x12, 0x08, 0x0a, 0x04, 0x50, 0x52, 0x45, 0x56, 0x10, 0x0c, 0x12, 0x0c, 0x0a, 0x08, 0x50, + 0x52, 0x45, 0x56, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x0d, 0x12, 0x0f, 0x0a, 0x0b, 0x50, 0x52, 0x45, + 0x56, 0x5f, 0x4e, 0x4f, 0x5f, 0x44, 0x55, 0x50, 0x10, 0x0e, 0x12, 0x0e, 0x0a, 0x0a, 0x53, 0x45, + 0x45, 0x4b, 0x5f, 0x45, 0x58, 0x41, 0x43, 0x54, 0x10, 0x0f, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, + 0x45, 0x4b, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x5f, 0x45, 0x58, 0x41, 0x43, 0x54, 0x10, 0x10, 0x12, + 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10, 0x1e, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, + 0x53, 0x45, 0x10, 0x1f, 0x12, 0x11, 0x0a, 0x0d, 0x4f, 0x50, 0x45, 0x4e, 0x5f, 0x44, 0x55, 0x50, + 0x5f, 0x53, 0x4f, 0x52, 0x54, 0x10, 0x20, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4f, 0x55, 0x4e, 0x54, + 0x10, 0x21, 0x2a, 0x48, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, + 0x53, 0x54, 0x4f, 0x52, 0x41, 0x47, 0x45, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x50, 0x53, + 0x45, 0x52, 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x02, 0x12, + 0x0f, 0x0a, 0x0b, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x03, + 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x10, 0x04, 0x2a, 0x24, 0x0a, 0x09, + 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x4f, 0x52, + 0x57, 0x41, 0x52, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x4e, 0x57, 0x49, 0x4e, 0x44, + 0x10, 0x01, 0x32, 0xba, 0x04, 0x0a, 0x02, 0x4b, 0x56, 0x12, 0x36, 0x0a, 0x07, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x12, 0x26, 0x0a, 0x02, 0x54, 0x78, 0x12, 0x0e, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x2e, 0x43, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x1a, 0x0c, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x2e, 0x50, 0x61, 0x69, 0x72, 0x28, 0x01, 0x30, 0x01, 0x12, 0x46, 0x0a, 0x0c, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x1a, 0x2e, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x30, + 0x01, 0x12, 0x3d, 0x0a, 0x09, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x18, + 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x12, 0x28, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x10, 0x2e, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x0d, 0x2e, 0x72, 0x65, + 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x69, 0x72, 0x73, 0x12, 0x39, 0x0a, 0x09, 0x44, 0x6f, + 0x6d, 0x61, 0x69, 0x6e, 0x47, 0x65, 0x74, 0x12, 0x14, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x2e, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x1a, 0x16, 0x2e, + 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x47, 0x65, 0x74, + 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x3c, 0x0a, 0x0a, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, + 0x47, 0x65, 0x74, 0x12, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x48, 0x69, 0x73, + 0x74, 0x6f, 0x72, 0x79, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x72, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x47, 0x65, 0x74, 0x52, 0x65, + 0x70, 0x6c, 0x79, 0x12, 0x3c, 0x0a, 0x0a, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x15, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x12, 0x36, 0x0a, 0x0c, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x61, 0x6e, 0x67, + 0x65, 0x12, 0x17, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, + 0x72, 0x79, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x1a, 0x0d, 0x2e, 0x72, 0x65, 0x6d, + 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x69, 0x72, 0x73, 0x12, 0x34, 0x0a, 0x0b, 0x44, 0x6f, 0x6d, + 0x61, 0x69, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x16, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, + 0x65, 0x2e, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, + 0x1a, 0x0d, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x50, 0x61, 0x69, 0x72, 0x73, 0x42, + 0x16, 0x5a, 0x14, 0x2e, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x3b, 0x72, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2061,8 +2060,8 @@ var file_remote_kv_proto_goTypes = []interface{}{ (*RangeReq)(nil), // 12: remote.RangeReq (*DomainGetReq)(nil), // 13: remote.DomainGetReq (*DomainGetReply)(nil), // 14: remote.DomainGetReply - (*HistorySeekReq)(nil), // 15: remote.HistorySeekReq - (*HistorySeekReply)(nil), // 16: remote.HistorySeekReply + (*HistoryGetReq)(nil), // 15: remote.HistoryGetReq + (*HistoryGetReply)(nil), // 16: remote.HistoryGetReply (*IndexRangeReq)(nil), // 17: remote.IndexRangeReq (*IndexRangeReply)(nil), // 18: remote.IndexRangeReply (*HistoryRangeReq)(nil), // 19: remote.HistoryRangeReq @@ -2091,7 +2090,7 @@ var file_remote_kv_proto_depIdxs = []int32{ 10, // 12: remote.KV.Snapshots:input_type -> remote.SnapshotsRequest 12, // 13: remote.KV.Range:input_type -> remote.RangeReq 13, // 14: remote.KV.DomainGet:input_type -> remote.DomainGetReq - 15, // 15: remote.KV.HistorySeek:input_type -> remote.HistorySeekReq + 15, // 15: remote.KV.HistoryGet:input_type -> remote.HistoryGetReq 17, // 16: remote.KV.IndexRange:input_type -> remote.IndexRangeReq 19, // 17: remote.KV.HistoryRange:input_type -> remote.HistoryRangeReq 20, // 18: remote.KV.DomainRange:input_type -> remote.DomainRangeReq @@ -2101,7 +2100,7 @@ var file_remote_kv_proto_depIdxs = []int32{ 11, // 22: remote.KV.Snapshots:output_type -> remote.SnapshotsReply 21, // 23: remote.KV.Range:output_type -> remote.Pairs 14, // 24: remote.KV.DomainGet:output_type -> remote.DomainGetReply - 16, // 25: remote.KV.HistorySeek:output_type -> remote.HistorySeekReply + 16, // 25: remote.KV.HistoryGet:output_type -> remote.HistoryGetReply 18, // 26: remote.KV.IndexRange:output_type -> remote.IndexRangeReply 21, // 27: remote.KV.HistoryRange:output_type -> remote.Pairs 21, // 28: remote.KV.DomainRange:output_type -> remote.Pairs @@ -2263,7 +2262,7 @@ func file_remote_kv_proto_init() { } } file_remote_kv_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HistorySeekReq); i { + switch v := v.(*HistoryGetReq); i { case 0: return &v.state case 1: @@ -2275,7 +2274,7 @@ func file_remote_kv_proto_init() { } } file_remote_kv_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HistorySeekReply); i { + switch v := v.(*HistoryGetReply); i { case 0: return &v.state case 1: diff --git a/erigon-lib/gointerfaces/remoteproto/kv_client_mock.go b/erigon-lib/gointerfaces/remoteproto/kv_client_mock.go index 75176766b2f..73d1f3e9373 100644 --- a/erigon-lib/gointerfaces/remoteproto/kv_client_mock.go +++ b/erigon-lib/gointerfaces/remoteproto/kv_client_mock.go @@ -130,90 +130,90 @@ func (c *MockKVClientDomainRangeCall) DoAndReturn(f func(context.Context, *Domai return c } -// HistoryRange mocks base method. -func (m *MockKVClient) HistoryRange(arg0 context.Context, arg1 *HistoryRangeReq, arg2 ...grpc.CallOption) (*Pairs, error) { +// HistoryGet mocks base method. +func (m *MockKVClient) HistoryGet(arg0 context.Context, arg1 *HistoryGetReq, arg2 ...grpc.CallOption) (*HistoryGetReply, error) { m.ctrl.T.Helper() varargs := []any{arg0, arg1} for _, a := range arg2 { varargs = append(varargs, a) } - ret := m.ctrl.Call(m, "HistoryRange", varargs...) - ret0, _ := ret[0].(*Pairs) + ret := m.ctrl.Call(m, "HistoryGet", varargs...) + ret0, _ := ret[0].(*HistoryGetReply) ret1, _ := ret[1].(error) return ret0, ret1 } -// HistoryRange indicates an expected call of HistoryRange. -func (mr *MockKVClientMockRecorder) HistoryRange(arg0, arg1 any, arg2 ...any) *MockKVClientHistoryRangeCall { +// HistoryGet indicates an expected call of HistoryGet. +func (mr *MockKVClientMockRecorder) HistoryGet(arg0, arg1 any, arg2 ...any) *MockKVClientHistoryGetCall { mr.mock.ctrl.T.Helper() varargs := append([]any{arg0, arg1}, arg2...) - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HistoryRange", reflect.TypeOf((*MockKVClient)(nil).HistoryRange), varargs...) - return &MockKVClientHistoryRangeCall{Call: call} + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HistoryGet", reflect.TypeOf((*MockKVClient)(nil).HistoryGet), varargs...) + return &MockKVClientHistoryGetCall{Call: call} } -// MockKVClientHistoryRangeCall wrap *gomock.Call -type MockKVClientHistoryRangeCall struct { +// MockKVClientHistoryGetCall wrap *gomock.Call +type MockKVClientHistoryGetCall struct { *gomock.Call } // Return rewrite *gomock.Call.Return -func (c *MockKVClientHistoryRangeCall) Return(arg0 *Pairs, arg1 error) *MockKVClientHistoryRangeCall { +func (c *MockKVClientHistoryGetCall) Return(arg0 *HistoryGetReply, arg1 error) *MockKVClientHistoryGetCall { c.Call = c.Call.Return(arg0, arg1) return c } // Do rewrite *gomock.Call.Do -func (c *MockKVClientHistoryRangeCall) Do(f func(context.Context, *HistoryRangeReq, ...grpc.CallOption) (*Pairs, error)) *MockKVClientHistoryRangeCall { +func (c *MockKVClientHistoryGetCall) Do(f func(context.Context, *HistoryGetReq, ...grpc.CallOption) (*HistoryGetReply, error)) *MockKVClientHistoryGetCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockKVClientHistoryRangeCall) DoAndReturn(f func(context.Context, *HistoryRangeReq, ...grpc.CallOption) (*Pairs, error)) *MockKVClientHistoryRangeCall { +func (c *MockKVClientHistoryGetCall) DoAndReturn(f func(context.Context, *HistoryGetReq, ...grpc.CallOption) (*HistoryGetReply, error)) *MockKVClientHistoryGetCall { c.Call = c.Call.DoAndReturn(f) return c } -// HistorySeek mocks base method. -func (m *MockKVClient) HistorySeek(arg0 context.Context, arg1 *HistorySeekReq, arg2 ...grpc.CallOption) (*HistorySeekReply, error) { +// HistoryRange mocks base method. +func (m *MockKVClient) HistoryRange(arg0 context.Context, arg1 *HistoryRangeReq, arg2 ...grpc.CallOption) (*Pairs, error) { m.ctrl.T.Helper() varargs := []any{arg0, arg1} for _, a := range arg2 { varargs = append(varargs, a) } - ret := m.ctrl.Call(m, "HistorySeek", varargs...) - ret0, _ := ret[0].(*HistorySeekReply) + ret := m.ctrl.Call(m, "HistoryRange", varargs...) + ret0, _ := ret[0].(*Pairs) ret1, _ := ret[1].(error) return ret0, ret1 } -// HistorySeek indicates an expected call of HistorySeek. -func (mr *MockKVClientMockRecorder) HistorySeek(arg0, arg1 any, arg2 ...any) *MockKVClientHistorySeekCall { +// HistoryRange indicates an expected call of HistoryRange. +func (mr *MockKVClientMockRecorder) HistoryRange(arg0, arg1 any, arg2 ...any) *MockKVClientHistoryRangeCall { mr.mock.ctrl.T.Helper() varargs := append([]any{arg0, arg1}, arg2...) - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HistorySeek", reflect.TypeOf((*MockKVClient)(nil).HistorySeek), varargs...) - return &MockKVClientHistorySeekCall{Call: call} + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HistoryRange", reflect.TypeOf((*MockKVClient)(nil).HistoryRange), varargs...) + return &MockKVClientHistoryRangeCall{Call: call} } -// MockKVClientHistorySeekCall wrap *gomock.Call -type MockKVClientHistorySeekCall struct { +// MockKVClientHistoryRangeCall wrap *gomock.Call +type MockKVClientHistoryRangeCall struct { *gomock.Call } // Return rewrite *gomock.Call.Return -func (c *MockKVClientHistorySeekCall) Return(arg0 *HistorySeekReply, arg1 error) *MockKVClientHistorySeekCall { +func (c *MockKVClientHistoryRangeCall) Return(arg0 *Pairs, arg1 error) *MockKVClientHistoryRangeCall { c.Call = c.Call.Return(arg0, arg1) return c } // Do rewrite *gomock.Call.Do -func (c *MockKVClientHistorySeekCall) Do(f func(context.Context, *HistorySeekReq, ...grpc.CallOption) (*HistorySeekReply, error)) *MockKVClientHistorySeekCall { +func (c *MockKVClientHistoryRangeCall) Do(f func(context.Context, *HistoryRangeReq, ...grpc.CallOption) (*Pairs, error)) *MockKVClientHistoryRangeCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockKVClientHistorySeekCall) DoAndReturn(f func(context.Context, *HistorySeekReq, ...grpc.CallOption) (*HistorySeekReply, error)) *MockKVClientHistorySeekCall { +func (c *MockKVClientHistoryRangeCall) DoAndReturn(f func(context.Context, *HistoryRangeReq, ...grpc.CallOption) (*Pairs, error)) *MockKVClientHistoryRangeCall { c.Call = c.Call.DoAndReturn(f) return c } diff --git a/erigon-lib/gointerfaces/remoteproto/kv_grpc.pb.go b/erigon-lib/gointerfaces/remoteproto/kv_grpc.pb.go index 5478e361d44..5bf34438ffd 100644 --- a/erigon-lib/gointerfaces/remoteproto/kv_grpc.pb.go +++ b/erigon-lib/gointerfaces/remoteproto/kv_grpc.pb.go @@ -27,7 +27,7 @@ const ( KV_Snapshots_FullMethodName = "/remote.KV/Snapshots" KV_Range_FullMethodName = "/remote.KV/Range" KV_DomainGet_FullMethodName = "/remote.KV/DomainGet" - KV_HistorySeek_FullMethodName = "/remote.KV/HistorySeek" + KV_HistoryGet_FullMethodName = "/remote.KV/HistoryGet" KV_IndexRange_FullMethodName = "/remote.KV/IndexRange" KV_HistoryRange_FullMethodName = "/remote.KV/HistoryRange" KV_DomainRange_FullMethodName = "/remote.KV/DomainRange" @@ -55,7 +55,7 @@ type KVClient interface { Range(ctx context.Context, in *RangeReq, opts ...grpc.CallOption) (*Pairs, error) // Temporal methods DomainGet(ctx context.Context, in *DomainGetReq, opts ...grpc.CallOption) (*DomainGetReply, error) - HistorySeek(ctx context.Context, in *HistorySeekReq, opts ...grpc.CallOption) (*HistorySeekReply, error) + HistoryGet(ctx context.Context, in *HistoryGetReq, opts ...grpc.CallOption) (*HistoryGetReply, error) IndexRange(ctx context.Context, in *IndexRangeReq, opts ...grpc.CallOption) (*IndexRangeReply, error) HistoryRange(ctx context.Context, in *HistoryRangeReq, opts ...grpc.CallOption) (*Pairs, error) DomainRange(ctx context.Context, in *DomainRangeReq, opts ...grpc.CallOption) (*Pairs, error) @@ -168,9 +168,9 @@ func (c *kVClient) DomainGet(ctx context.Context, in *DomainGetReq, opts ...grpc return out, nil } -func (c *kVClient) HistorySeek(ctx context.Context, in *HistorySeekReq, opts ...grpc.CallOption) (*HistorySeekReply, error) { - out := new(HistorySeekReply) - err := c.cc.Invoke(ctx, KV_HistorySeek_FullMethodName, in, out, opts...) +func (c *kVClient) HistoryGet(ctx context.Context, in *HistoryGetReq, opts ...grpc.CallOption) (*HistoryGetReply, error) { + out := new(HistoryGetReply) + err := c.cc.Invoke(ctx, KV_HistoryGet_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -226,7 +226,7 @@ type KVServer interface { Range(context.Context, *RangeReq) (*Pairs, error) // Temporal methods DomainGet(context.Context, *DomainGetReq) (*DomainGetReply, error) - HistorySeek(context.Context, *HistorySeekReq) (*HistorySeekReply, error) + HistoryGet(context.Context, *HistoryGetReq) (*HistoryGetReply, error) IndexRange(context.Context, *IndexRangeReq) (*IndexRangeReply, error) HistoryRange(context.Context, *HistoryRangeReq) (*Pairs, error) DomainRange(context.Context, *DomainRangeReq) (*Pairs, error) @@ -255,8 +255,8 @@ func (UnimplementedKVServer) Range(context.Context, *RangeReq) (*Pairs, error) { func (UnimplementedKVServer) DomainGet(context.Context, *DomainGetReq) (*DomainGetReply, error) { return nil, status.Errorf(codes.Unimplemented, "method DomainGet not implemented") } -func (UnimplementedKVServer) HistorySeek(context.Context, *HistorySeekReq) (*HistorySeekReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method HistorySeek not implemented") +func (UnimplementedKVServer) HistoryGet(context.Context, *HistoryGetReq) (*HistoryGetReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method HistoryGet not implemented") } func (UnimplementedKVServer) IndexRange(context.Context, *IndexRangeReq) (*IndexRangeReply, error) { return nil, status.Errorf(codes.Unimplemented, "method IndexRange not implemented") @@ -399,20 +399,20 @@ func _KV_DomainGet_Handler(srv interface{}, ctx context.Context, dec func(interf return interceptor(ctx, in, info, handler) } -func _KV_HistorySeek_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(HistorySeekReq) +func _KV_HistoryGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HistoryGetReq) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(KVServer).HistorySeek(ctx, in) + return srv.(KVServer).HistoryGet(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: KV_HistorySeek_FullMethodName, + FullMethod: KV_HistoryGet_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KVServer).HistorySeek(ctx, req.(*HistorySeekReq)) + return srv.(KVServer).HistoryGet(ctx, req.(*HistoryGetReq)) } return interceptor(ctx, in, info, handler) } @@ -495,8 +495,8 @@ var KV_ServiceDesc = grpc.ServiceDesc{ Handler: _KV_DomainGet_Handler, }, { - MethodName: "HistorySeek", - Handler: _KV_HistorySeek_Handler, + MethodName: "HistoryGet", + Handler: _KV_HistoryGet_Handler, }, { MethodName: "IndexRange", diff --git a/erigon-lib/kv/iter/iter_exact.go b/erigon-lib/kv/iter/iter_exact.go index 8261a39dc82..3eb3d8ecc9d 100644 --- a/erigon-lib/kv/iter/iter_exact.go +++ b/erigon-lib/kv/iter/iter_exact.go @@ -173,27 +173,48 @@ func (m *UnionKVIter) Close() { } type WrapKVSIter struct { - y KV + y KV + yHasNext bool + yNextK, yNextV []byte + err error } func WrapKVS(y KV) KVS { if y == nil { return EmptyKVS } - return &WrapKVSIter{y: y} + m := &WrapKVSIter{y: y} + m.advance() + return m } func (m *WrapKVSIter) HasNext() bool { - return m.y.HasNext() + return m.err != nil || m.yHasNext +} +func (m *WrapKVSIter) advance() { + if m.err != nil { + return + } + m.yHasNext = m.y.HasNext() + if m.yHasNext { + m.yNextK, m.yNextV, m.err = m.y.Next() + } } - func (m *WrapKVSIter) Next() ([]byte, []byte, uint64, error) { - k, v, err := m.y.Next() + if m.err != nil { + return nil, nil, 0, m.err + } + k, v, err := m.yNextK, m.yNextV, m.err + m.advance() return k, v, 0, err } +// func (m *WrapKVSIter) ToArray() (keys, values [][]byte, err error) { return ToArrayKV(m) } func (m *WrapKVSIter) Close() { - m.y.Close() + if m.y != nil { + m.y.Close() + m.y = nil + } } type WrapKVIter struct { @@ -216,8 +237,12 @@ func (m *WrapKVIter) Next() ([]byte, []byte, error) { return k, v, err } +// func (m *WrapKVIter) ToArray() (keys, values [][]byte, err error) { return ToArrayKV(m) } func (m *WrapKVIter) Close() { - m.x.Close() + if m.x != nil { + m.x.Close() + m.x = nil + } } // MergedKV - merge 2 kv.Pairs streams (without replacements, or "shadowing", diff --git a/erigon-lib/kv/kv_interface.go b/erigon-lib/kv/kv_interface.go index 1399d2a97d0..8770c759c75 100644 --- a/erigon-lib/kv/kv_interface.go +++ b/erigon-lib/kv/kv_interface.go @@ -449,7 +449,7 @@ type BucketMigrator interface { // Cursor - class for navigating through a database // CursorDupSort are inherit this class // -// If methods (like First/Next/seekInFiles) return error, then returned key SHOULD not be nil (can be []byte{} for example). +// If methods (like First/Next/Seek) return error, then returned key SHOULD not be nil (can be []byte{} for example). // Then looping code will look as: // c := kv.Cursor(bucketName) // @@ -545,7 +545,7 @@ type TemporalTx interface { Tx TemporalGetter DomainGetAsOf(name Domain, k, k2 []byte, ts uint64) (v []byte, ok bool, err error) - HistorySeek(name History, k []byte, ts uint64) (v []byte, ok bool, err error) + HistoryGet(name History, k []byte, ts uint64) (v []byte, ok bool, err error) // IndexRange - return iterator over range of inverted index for given key `k` // Asc semantic: [from, to) AND from > to @@ -564,12 +564,6 @@ type TemporalTx interface { type TemporalCommitment interface { ComputeCommitment(ctx context.Context, saveStateAfter, trace bool) (rootHash []byte, err error) } - -type TemporalRwTx interface { - RwTx - TemporalTx -} - type TemporalPutDel interface { // DomainPut // Optimizations: diff --git a/erigon-lib/kv/kvcache/cache_test.go b/erigon-lib/kv/kvcache/cache_test.go index 8055aa1e68a..d4fb9a79368 100644 --- a/erigon-lib/kv/kvcache/cache_test.go +++ b/erigon-lib/kv/kvcache/cache_test.go @@ -107,7 +107,7 @@ func TestEviction(t *testing.T) { c := New(cfg) dirs := datadir.New(t.TempDir()) - db, _ := temporaltest.NewTestDB(t, dirs) + _, db, _ := temporaltest.NewTestDB(t, dirs) k1, k2 := [20]byte{1}, [20]byte{2} var id uint64 @@ -167,7 +167,7 @@ func TestAPI(t *testing.T) { require := require.New(t) c := New(DefaultCoherentConfig) k1, k2 := [20]byte{1}, [20]byte{2} - db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + _, db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) get := func(key [20]byte, expectTxnID uint64) (res [1]chan []byte) { wg := sync.WaitGroup{} for i := 0; i < len(res); i++ { @@ -357,7 +357,7 @@ func TestCode(t *testing.T) { t.Skip("TODO: use state reader/writer instead of Put()") require, ctx := require.New(t), context.Background() c := New(DefaultCoherentConfig) - db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + _, db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) k1, k2 := [20]byte{1}, [20]byte{2} _ = db.Update(ctx, func(tx kv.RwTx) error { diff --git a/erigon-lib/kv/kvcache/dummy.go b/erigon-lib/kv/kvcache/dummy.go index 2ca48855c60..bb5b311c70f 100644 --- a/erigon-lib/kv/kvcache/dummy.go +++ b/erigon-lib/kv/kvcache/dummy.go @@ -30,7 +30,7 @@ type DummyCache struct { var _ Cache = (*DummyCache)(nil) // compile-time interface check var _ CacheView = (*DummyView)(nil) // compile-time interface check -func NewDummy() *DummyCache { return &DummyCache{stateV3: true} } +func NewDummy(stateV3 bool) *DummyCache { return &DummyCache{stateV3: stateV3} } func (c *DummyCache) View(_ context.Context, tx kv.Tx) (CacheView, error) { return &DummyView{cache: c, tx: tx}, nil } diff --git a/erigon-lib/kv/kvcfg/accessors_config.go b/erigon-lib/kv/kvcfg/accessors_config.go index 5300277f317..5c68771e45d 100644 --- a/erigon-lib/kv/kvcfg/accessors_config.go +++ b/erigon-lib/kv/kvcfg/accessors_config.go @@ -24,6 +24,10 @@ import ( type ConfigKey []byte +var ( + HistoryV3 = ConfigKey("history.v3") +) + func (k ConfigKey) Enabled(tx kv.Tx) (bool, error) { return kv.GetBool(tx, kv.DatabaseInfo, k) } func (k ConfigKey) FromDB(db kv.RoDB) (enabled bool) { if err := db.View(context.Background(), func(tx kv.Tx) error { diff --git a/erigon-lib/kv/mdbx/kv_abstract_test.go b/erigon-lib/kv/mdbx/kv_abstract_test.go index 55b1a8b87e5..ec4451201b2 100644 --- a/erigon-lib/kv/mdbx/kv_abstract_test.go +++ b/erigon-lib/kv/mdbx/kv_abstract_test.go @@ -537,7 +537,7 @@ func testMultiCursor(t *testing.T, db kv.RwDB, bucket1, bucket2 string) { // } // // c3 := tx.Cursor(dbutils.ChaindataTables[0]) -// k, v, err := c3.seekInFiles([]byte{5}) +// k, v, err := c3.Seek([]byte{5}) // if err != nil { // return err // } diff --git a/erigon-lib/kv/mdbx/kv_mdbx.go b/erigon-lib/kv/mdbx/kv_mdbx.go index 652774439fd..d9ec0119400 100644 --- a/erigon-lib/kv/mdbx/kv_mdbx.go +++ b/erigon-lib/kv/mdbx/kv_mdbx.go @@ -1446,7 +1446,7 @@ func (c *MdbxCursor) Seek(seek []byte) (k, v []byte, err error) { if mdbx.IsNotFound(err) { return nil, nil, nil } - err = fmt.Errorf("failed MdbxKV cursor.seekInFiles(): %w, bucket: %s, key: %x", err, c.bucketName, seek) + err = fmt.Errorf("failed MdbxKV cursor.Seek(): %w, bucket: %s, key: %x", err, c.bucketName, seek) return []byte{}, nil, err } diff --git a/erigon-lib/kv/membatchwithdb/memory_mutation.go b/erigon-lib/kv/membatchwithdb/memory_mutation.go index ecfe85b92b8..50ba0f6e3cf 100644 --- a/erigon-lib/kv/membatchwithdb/memory_mutation.go +++ b/erigon-lib/kv/membatchwithdb/memory_mutation.go @@ -717,11 +717,11 @@ func (m *MemoryMutation) CHandle() unsafe.Pointer { } type hasAggCtx interface { - AggTx() interface{} + AggCtx() interface{} } -func (m *MemoryMutation) AggTx() interface{} { - return m.db.(hasAggCtx).AggTx() +func (m *MemoryMutation) AggCtx() interface{} { + return m.db.(hasAggCtx).AggCtx() } func (m *MemoryMutation) DomainGet(name kv.Domain, k, k2 []byte) (v []byte, step uint64, err error) { @@ -731,8 +731,8 @@ func (m *MemoryMutation) DomainGet(name kv.Domain, k, k2 []byte) (v []byte, step func (m *MemoryMutation) DomainGetAsOf(name kv.Domain, k, k2 []byte, ts uint64) (v []byte, ok bool, err error) { return m.db.(kv.TemporalTx).DomainGetAsOf(name, k, k2, ts) } -func (m *MemoryMutation) HistorySeek(name kv.History, k []byte, ts uint64) (v []byte, ok bool, err error) { - return m.db.(kv.TemporalTx).HistorySeek(name, k, ts) +func (m *MemoryMutation) HistoryGet(name kv.History, k []byte, ts uint64) (v []byte, ok bool, err error) { + return m.db.(kv.TemporalTx).HistoryGet(name, k, ts) } func (m *MemoryMutation) IndexRange(name kv.InvertedIdx, k []byte, fromTs, toTs int, asc order.By, limit int) (timestamps iter.U64, err error) { diff --git a/erigon-lib/kv/remotedb/kv_remote.go b/erigon-lib/kv/remotedb/kv_remote.go index 0119ac85e14..dd4fdc48491 100644 --- a/erigon-lib/kv/remotedb/kv_remote.go +++ b/erigon-lib/kv/remotedb/kv_remote.go @@ -549,7 +549,7 @@ func (c *remoteCursor) Current() ([]byte, []byte, error) { return c.getCurrent() } -// Seek - doesn't start streaming (because much of code does only several .seekInFiles calls without reading sequence of data) +// Seek - doesn't start streaming (because much of code does only several .Seek calls without reading sequence of data) // .Next() - does request streaming (if configured by user) func (c *remoteCursor) Seek(seek []byte) ([]byte, []byte, error) { return c.setRange(seek) @@ -674,8 +674,8 @@ func (tx *tx) DomainRange(name kv.Domain, fromKey, toKey []byte, ts uint64, asc return reply.Keys, reply.Values, reply.NextPageToken, nil }), nil } -func (tx *tx) HistorySeek(name kv.History, k []byte, ts uint64) (v []byte, ok bool, err error) { - reply, err := tx.db.remoteKV.HistorySeek(tx.ctx, &remote.HistorySeekReq{TxId: tx.id, Table: string(name), K: k, Ts: ts}) +func (tx *tx) HistoryGet(name kv.History, k []byte, ts uint64) (v []byte, ok bool, err error) { + reply, err := tx.db.remoteKV.HistoryGet(tx.ctx, &remote.HistoryGetReq{TxId: tx.id, Table: string(name), K: k, Ts: ts}) if err != nil { return nil, false, err } diff --git a/erigon-lib/kv/remotedbserver/remotedbserver.go b/erigon-lib/kv/remotedbserver/remotedbserver.go index 1cd23b95fdd..f4908cd734b 100644 --- a/erigon-lib/kv/remotedbserver/remotedbserver.go +++ b/erigon-lib/kv/remotedbserver/remotedbserver.go @@ -62,7 +62,7 @@ const MaxTxTTL = 60 * time.Second // 5.0 - BlockTransaction table now has canonical ids (txs of non-canonical blocks moving to NonCanonicalTransaction table) // 5.1.0 - Added blockGasLimit to the StateChangeBatch // 6.0.0 - Blocks now have system-txs - in the begin/end of block -// 6.1.0 - Add methods Range, IndexRange, HistorySeek, HistoryRange +// 6.1.0 - Add methods Range, IndexRange, HistoryGet, HistoryRange // 6.2.0 - Add HistoryFiles to reply of Snapshots() method var KvServiceAPIVersion = &types.VersionReply{Major: 6, Minor: 2, Patch: 0} @@ -561,14 +561,14 @@ func (s *KvServer) DomainGet(_ context.Context, req *remote.DomainGetReq) (reply } return reply, nil } -func (s *KvServer) HistorySeek(_ context.Context, req *remote.HistorySeekReq) (reply *remote.HistorySeekReply, err error) { - reply = &remote.HistorySeekReply{} +func (s *KvServer) HistoryGet(_ context.Context, req *remote.HistoryGetReq) (reply *remote.HistoryGetReply, err error) { + reply = &remote.HistoryGetReply{} if err := s.with(req.TxId, func(tx kv.Tx) error { ttx, ok := tx.(kv.TemporalTx) if !ok { return fmt.Errorf("server DB doesn't implement kv.Temporal interface") } - reply.V, reply.Ok, err = ttx.HistorySeek(kv.History(req.Table), req.K, req.Ts) + reply.V, reply.Ok, err = ttx.HistoryGet(kv.History(req.Table), req.K, req.Ts) if err != nil { return err } diff --git a/erigon-lib/kv/tables.go b/erigon-lib/kv/tables.go index a68731692e1..1898168c0cf 100644 --- a/erigon-lib/kv/tables.go +++ b/erigon-lib/kv/tables.go @@ -126,12 +126,12 @@ AccountsHistory and StorageHistory - indices designed to serve next 2 type of re 2. get last shard of A - to append there new block numbers Task 1. is part of "get historical state" operation (see `core/state:GetAsOf`): -If `db.seekInFiles(A+bigEndian(X))` returns non-last shard - +If `db.Seek(A+bigEndian(X))` returns non-last shard - then get block number from shard value Y := RoaringBitmap(shard_value).GetGte(X) and with Y go to ChangeSets: db.Get(ChangeSets, Y+A) -If `db.seekInFiles(A+bigEndian(X))` returns last shard - +If `db.Seek(A+bigEndian(X))` returns last shard - then we go to PlainState: db.Get(PlainState, A) @@ -143,7 +143,7 @@ Format: - if shard is last - then key has suffix 8 bytes = 0xFF It allows: - - server task 1. by 1 db operation db.seekInFiles(A+bigEndian(X)) + - server task 1. by 1 db operation db.Seek(A+bigEndian(X)) - server task 2. by 1 db operation db.Get(A+0xFF) see also: docs/programmers_guide/db_walkthrough.MD#table-change-sets @@ -528,8 +528,6 @@ var ( PruneTxIndexType = []byte("pruneTxIndexType") PruneCallTraces = []byte("pruneCallTraces") PruneCallTracesType = []byte("pruneCallTracesType") - PruneBlocks = []byte("pruneBlocks") - PruneBlocksType = []byte("pruneBlocksType") DBSchemaVersionKey = []byte("dbVersion") diff --git a/erigon-lib/kv/temporal/kv_temporal.go b/erigon-lib/kv/temporal/kv_temporal.go index 67f026d1b56..03077f7dfab 100644 --- a/erigon-lib/kv/temporal/kv_temporal.go +++ b/erigon-lib/kv/temporal/kv_temporal.go @@ -2,10 +2,12 @@ package temporal import ( "context" + "fmt" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/iter" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon-lib/kv/order" "github.com/ledgerwatch/erigon-lib/state" @@ -50,6 +52,9 @@ type DB struct { } func New(db kv.RwDB, agg *state.Aggregator) (*DB, error) { + if !kvcfg.HistoryV3.FromDB(db) { + panic("not supported") + } return &DB{RwDB: db, agg: agg}, nil } func (db *DB) Agg() *state.Aggregator { return db.agg } @@ -151,7 +156,7 @@ func (tx *Tx) ForceReopenAggCtx() { func (tx *Tx) WarmupDB(force bool) error { return tx.MdbxTx.WarmupDB(force) } func (tx *Tx) LockDBInRam() error { return tx.MdbxTx.LockDBInRam() } -func (tx *Tx) AggTx() interface{} { return tx.aggCtx } +func (tx *Tx) AggCtx() interface{} { return tx.aggCtx } func (tx *Tx) Agg() *state.Aggregator { return tx.db.agg } func (tx *Tx) Rollback() { tx.autoClose() @@ -178,12 +183,14 @@ func (tx *Tx) Commit() error { return mdbxTx.Commit() } -func (tx *Tx) DomainRange(name kv.Domain, fromKey, toKey []byte, asOfTs uint64, asc order.By, limit int) (iter.KV, error) { - it, err := tx.aggCtx.DomainRange(tx.MdbxTx, name, fromKey, toKey, asOfTs, asc, limit) +func (tx *Tx) DomainRange(name kv.Domain, fromKey, toKey []byte, asOfTs uint64, asc order.By, limit int) (it iter.KV, err error) { + it, err = tx.aggCtx.DomainRange(tx.MdbxTx, name, fromKey, toKey, asOfTs, asc, limit) if err != nil { return nil, err } - tx.resourcesToClose = append(tx.resourcesToClose, it) + if closer, ok := it.(kv.Closer); ok { + tx.resourcesToClose = append(tx.resourcesToClose, closer) + } return it, nil } @@ -204,8 +211,8 @@ func (tx *Tx) DomainGetAsOf(name kv.Domain, key, key2 []byte, ts uint64) (v []by return tx.aggCtx.DomainGetAsOf(tx.MdbxTx, name, key, ts) } -func (tx *Tx) HistorySeek(name kv.History, key []byte, ts uint64) (v []byte, ok bool, err error) { - return tx.aggCtx.HistorySeek(name, key, ts, tx.MdbxTx) +func (tx *Tx) HistoryGet(name kv.History, key []byte, ts uint64) (v []byte, ok bool, err error) { + return tx.aggCtx.HistoryGet(name, key, ts, tx.MdbxTx) } func (tx *Tx) IndexRange(name kv.InvertedIdx, k []byte, fromTs, toTs int, asc order.By, limit int) (timestamps iter.U64, err error) { @@ -213,15 +220,34 @@ func (tx *Tx) IndexRange(name kv.InvertedIdx, k []byte, fromTs, toTs int, asc or if err != nil { return nil, err } - tx.resourcesToClose = append(tx.resourcesToClose, timestamps) + if closer, ok := timestamps.(kv.Closer); ok { + tx.resourcesToClose = append(tx.resourcesToClose, closer) + } return timestamps, nil } -func (tx *Tx) HistoryRange(name kv.History, fromTs, toTs int, asc order.By, limit int) (iter.KV, error) { - it, err := tx.aggCtx.HistoryRange(name, fromTs, toTs, asc, limit, tx.MdbxTx) +func (tx *Tx) HistoryRange(name kv.History, fromTs, toTs int, asc order.By, limit int) (it iter.KV, err error) { + if asc == order.Desc { + panic("not implemented yet") + } + if limit >= 0 { + panic("not implemented yet") + } + switch name { + case kv.AccountsHistory: + it, err = tx.aggCtx.AccountHistoryRange(fromTs, toTs, asc, limit, tx) + case kv.StorageHistory: + it, err = tx.aggCtx.StorageHistoryRange(fromTs, toTs, asc, limit, tx) + case kv.CodeHistory: + it, err = tx.aggCtx.CodeHistoryRange(fromTs, toTs, asc, limit, tx) + default: + return nil, fmt.Errorf("unexpected history name: %s", name) + } if err != nil { return nil, err } - tx.resourcesToClose = append(tx.resourcesToClose, it) - return it, nil + if closer, ok := it.(kv.Closer); ok { + tx.resourcesToClose = append(tx.resourcesToClose, closer) + } + return it, err } diff --git a/erigon-lib/kv/temporal/temporaltest/kv_temporal_testdb.go b/erigon-lib/kv/temporal/temporaltest/kv_temporal_testdb.go index eb49c434a2d..4d5c9852086 100644 --- a/erigon-lib/kv/temporal/temporaltest/kv_temporal_testdb.go +++ b/erigon-lib/kv/temporal/temporaltest/kv_temporal_testdb.go @@ -7,6 +7,7 @@ import ( "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/config3" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon-lib/kv/temporal" "github.com/ledgerwatch/erigon-lib/state" @@ -14,10 +15,11 @@ import ( ) // nolint:thelper -func NewTestDB(tb testing.TB, dirs datadir.Dirs) (db kv.RwDB, agg *state.Aggregator) { +func NewTestDB(tb testing.TB, dirs datadir.Dirs) (histV3 bool, db kv.RwDB, agg *state.Aggregator) { if tb != nil { tb.Helper() } + historyV3 := true logger := log.New() if tb != nil { @@ -25,8 +27,15 @@ func NewTestDB(tb testing.TB, dirs datadir.Dirs) (db kv.RwDB, agg *state.Aggrega } else { db = memdb.New(dirs.DataDir) } - var err error + err = db.UpdateNosync(context.Background(), func(tx kv.RwTx) error { + _, _ = kvcfg.HistoryV3.WriteOnce(tx, historyV3) + return nil + }) + if err != nil { + panic(err) + } + agg, err = state.NewAggregator(context.Background(), dirs, config3.HistoryV3AggregationStep, db, logger) if err != nil { panic(err) @@ -39,5 +48,5 @@ func NewTestDB(tb testing.TB, dirs datadir.Dirs) (db kv.RwDB, agg *state.Aggrega if err != nil { panic(err) } - return db, agg + return true, db, agg } diff --git a/erigon-lib/metrics/register.go b/erigon-lib/metrics/register.go index 4a2e68f55e4..2ac13a6b4ca 100644 --- a/erigon-lib/metrics/register.go +++ b/erigon-lib/metrics/register.go @@ -140,8 +140,8 @@ func GetOrCreateSummary(name string) Summary { // - foo{bar="baz",aaa="b"} // // The returned histogram is safe to use from concurrent goroutines. -func NewHistogram(name string, buckets []float64) Histogram { - h, err := defaultSet.NewHistogram(name, buckets) +func NewHistogram(name string) Histogram { + h, err := defaultSet.NewHistogram(name) if err != nil { panic(fmt.Errorf("could not create new histogram: %w", err)) } @@ -171,12 +171,3 @@ func GetOrCreateHistogram(name string) Histogram { return &histogram{h} } - -func GetOrCreateHistogramWithBuckets(name string) Histogram { - h, err := defaultSet.GetOrCreateHistogram(name) - if err != nil { - panic(fmt.Errorf("could not get or create new histogram: %w", err)) - } - - return &histogram{h} -} diff --git a/erigon-lib/metrics/set.go b/erigon-lib/metrics/set.go index ad4b164c239..2b0418fd2bd 100644 --- a/erigon-lib/metrics/set.go +++ b/erigon-lib/metrics/set.go @@ -78,8 +78,8 @@ func (s *Set) Collect(ch chan<- prometheus.Metric) { // - foo{bar="baz",aaa="b"} // // The returned histogram is safe to use from concurrent goroutines. -func (s *Set) NewHistogram(name string, buckets []float64, help ...string) (prometheus.Histogram, error) { - h, err := newHistogram(name, buckets, help...) +func (s *Set) NewHistogram(name string, help ...string) (prometheus.Histogram, error) { + h, err := newHistogram(name, help...) if err != nil { return nil, err } @@ -88,7 +88,7 @@ func (s *Set) NewHistogram(name string, buckets []float64, help ...string) (prom return h, nil } -func newHistogram(name string, buckets []float64, help ...string) (prometheus.Histogram, error) { +func newHistogram(name string, help ...string) (prometheus.Histogram, error) { name, labels, err := parseMetric(name) if err != nil { return nil, err @@ -97,7 +97,6 @@ func newHistogram(name string, buckets []float64, help ...string) (prometheus.Hi return prometheus.NewHistogram(prometheus.HistogramOpts{ Name: name, ConstLabels: labels, - Buckets: buckets, Help: strings.Join(help, " "), }), nil } @@ -120,7 +119,7 @@ func (s *Set) GetOrCreateHistogram(name string, help ...string) (prometheus.Hist nm := s.m[name] s.mu.Unlock() if nm == nil { - metric, err := newHistogram(name, nil, help...) + metric, err := newHistogram(name, help...) if err != nil { return nil, fmt.Errorf("invalid metric name %q: %w", name, err) } diff --git a/erigon-lib/recsplit/recsplit.go b/erigon-lib/recsplit/recsplit.go index 4e18cdff9a0..659dc9d2f12 100644 --- a/erigon-lib/recsplit/recsplit.go +++ b/erigon-lib/recsplit/recsplit.go @@ -136,8 +136,6 @@ type RecSplitArgs struct { EtlBufLimit datasize.ByteSize Salt *uint32 // Hash seed (salt) for the hash function used for allocating the initial buckets - need to be generated randomly LeafSize uint16 - - NoFsync bool // fsync is enabled by default, but tests can manually disable } // NewRecSplit creates a new RecSplit instance with given number of keys and given bucket size @@ -209,9 +207,6 @@ func NewRecSplit(args RecSplitArgs, logger log.Logger) (*RecSplit, error) { } rs.startSeed = args.StartSeed rs.count = make([]uint16, rs.secondaryAggrBound) - if args.NoFsync { - rs.DisableFsync() - } return rs, nil } diff --git a/erigon-lib/state/aggregator.go b/erigon-lib/state/aggregator.go index cce320a8374..380d2caf91e 100644 --- a/erigon-lib/state/aggregator.go +++ b/erigon-lib/state/aggregator.go @@ -49,9 +49,14 @@ import ( "github.com/ledgerwatch/erigon-lib/kv/iter" "github.com/ledgerwatch/erigon-lib/kv/order" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" + "github.com/ledgerwatch/erigon-lib/metrics" "github.com/ledgerwatch/erigon-lib/seg" ) +var ( + mxPruneTookAgg = metrics.GetOrCreateSummary(`prune_seconds{type="state"}`) +) + type Aggregator struct { db kv.RoDB d [kv.DomainLen]*Domain @@ -181,19 +186,19 @@ func NewAggregator(ctx context.Context, dirs datadir.Dirs, aggregationStep uint6 // return nil, err //} idxCfg := iiCfg{salt: salt, dirs: dirs, db: db} - if a.logAddrs, err = NewInvertedIndex(idxCfg, aggregationStep, "logaddrs", kv.TblLogAddressKeys, kv.TblLogAddressIdx, nil, logger); err != nil { + if a.logAddrs, err = NewInvertedIndex(idxCfg, aggregationStep, "logaddrs", kv.TblLogAddressKeys, kv.TblLogAddressIdx, false, nil, logger); err != nil { return nil, err } idxCfg = iiCfg{salt: salt, dirs: dirs, db: db} - if a.logTopics, err = NewInvertedIndex(idxCfg, aggregationStep, "logtopics", kv.TblLogTopicsKeys, kv.TblLogTopicsIdx, nil, logger); err != nil { + if a.logTopics, err = NewInvertedIndex(idxCfg, aggregationStep, "logtopics", kv.TblLogTopicsKeys, kv.TblLogTopicsIdx, false, nil, logger); err != nil { return nil, err } idxCfg = iiCfg{salt: salt, dirs: dirs, db: db} - if a.tracesFrom, err = NewInvertedIndex(idxCfg, aggregationStep, "tracesfrom", kv.TblTracesFromKeys, kv.TblTracesFromIdx, nil, logger); err != nil { + if a.tracesFrom, err = NewInvertedIndex(idxCfg, aggregationStep, "tracesfrom", kv.TblTracesFromKeys, kv.TblTracesFromIdx, false, nil, logger); err != nil { return nil, err } idxCfg = iiCfg{salt: salt, dirs: dirs, db: db} - if a.tracesTo, err = NewInvertedIndex(idxCfg, aggregationStep, "tracesto", kv.TblTracesToKeys, kv.TblTracesToIdx, nil, logger); err != nil { + if a.tracesTo, err = NewInvertedIndex(idxCfg, aggregationStep, "tracesto", kv.TblTracesToKeys, kv.TblTracesToIdx, false, nil, logger); err != nil { return nil, err } a.KeepStepsInDB(1) @@ -796,99 +801,6 @@ func (ac *AggregatorRoTx) CanUnwindBeforeBlockNum(blockNum uint64, tx kv.Tx) (ui return blockNumWithCommitment, true, nil } -func (ac *AggregatorRoTx) PruneSmallBatchesDb(ctx context.Context, timeout time.Duration, db kv.RwDB) (haveMore bool, err error) { - // On tip-of-chain timeout is about `3sec` - // On tip of chain: must be real-time - prune by small batches and prioritize exact-`timeout` - // Not on tip of chain: must be aggressive (prune as much as possible) by bigger batches - - furiousPrune := timeout > 5*time.Hour - aggressivePrune := !furiousPrune && timeout >= 1*time.Minute - - var pruneLimit uint64 = 1_000 - var withWarmup bool = false //nolint - if furiousPrune { - pruneLimit = 1_000_000 - /* disabling this feature for now - seems it doesn't cancel even after prune finished - // start from a bit high limit to give time for warmup - // will disable warmup after first iteration and will adjust pruneLimit based on `time` - withWarmup = true - */ - } - - started := time.Now() - localTimeout := time.NewTicker(timeout) - defer localTimeout.Stop() - logPeriod := 30 * time.Second - logEvery := time.NewTicker(logPeriod) - defer logEvery.Stop() - aggLogEvery := time.NewTicker(600 * time.Second) // to hide specific domain/idx logging - defer aggLogEvery.Stop() - - fullStat := newAggregatorPruneStat() - innerCtx := context.Background() - goExit := false - - for { - err = db.Update(innerCtx, func(tx kv.RwTx) error { - iterationStarted := time.Now() - // `context.Background()` is important here! - // it allows keep DB consistent - prune all keys-related data or noting - // can't interrupt by ctrl+c and leave dirt in DB - stat, err := ac.Prune(innerCtx, tx, pruneLimit, withWarmup, aggLogEvery) - if err != nil { - ac.a.logger.Warn("[snapshots] PruneSmallBatches failed", "err", err) - return err - } - if stat == nil { - if fstat := fullStat.String(); fstat != "" { - ac.a.logger.Info("[snapshots] PruneSmallBatches finished", "took", time.Since(started).String(), "stat", fstat) - } - goExit = true - return nil - } - fullStat.Accumulate(stat) - - withWarmup = false // warmup once is enough - - if aggressivePrune { - took := time.Since(iterationStarted) - if took < 2*time.Second { - pruneLimit *= 10 - } - if took > logPeriod { - pruneLimit /= 10 - } - } - - select { - case <-logEvery.C: - ac.a.logger.Info("[snapshots] pruning state", - "until commit", time.Until(started.Add(timeout)).String(), - "pruneLimit", pruneLimit, - "aggregatedStep", (ac.minimaxTxNumInDomainFiles(false)-1)/ac.a.StepSize(), - "stepsRangeInDB", ac.a.StepsRangeInDBAsStr(tx), - "pruned", fullStat.String(), - ) - default: - } - return nil - }) - if err != nil { - return false, err - } - select { - case <-localTimeout.C: //must be first to improve responsivness - return true, nil - case <-ctx.Done(): - return false, ctx.Err() - default: - } - if goExit { - return false, nil - } - } -} - // PruneSmallBatches is not cancellable, it's over when it's over or failed. // It fills whole timeout with pruning by small batches (of 100 keys) and making some progress func (ac *AggregatorRoTx) PruneSmallBatches(ctx context.Context, timeout time.Duration, tx kv.RwTx) (haveMore bool, err error) { @@ -1691,10 +1603,10 @@ func (ac *AggregatorRoTx) IndexRange(name kv.InvertedIdx, k []byte, fromTs, toTs // -- range end -func (ac *AggregatorRoTx) HistorySeek(name kv.History, key []byte, ts uint64, tx kv.Tx) (v []byte, ok bool, err error) { +func (ac *AggregatorRoTx) HistoryGet(name kv.History, key []byte, ts uint64, tx kv.Tx) (v []byte, ok bool, err error) { switch name { case kv.AccountsHistory: - v, ok, err = ac.d[kv.AccountsDomain].ht.HistorySeek(key, ts, tx) + v, ok, err = ac.d[kv.AccountsDomain].ht.GetNoStateWithRecent(key, ts, tx) if err != nil { return nil, false, err } @@ -1703,34 +1615,36 @@ func (ac *AggregatorRoTx) HistorySeek(name kv.History, key []byte, ts uint64, tx } return v, true, nil case kv.StorageHistory: - return ac.d[kv.StorageDomain].ht.HistorySeek(key, ts, tx) + return ac.d[kv.StorageDomain].ht.GetNoStateWithRecent(key, ts, tx) case kv.CodeHistory: - return ac.d[kv.CodeDomain].ht.HistorySeek(key, ts, tx) + return ac.d[kv.CodeDomain].ht.GetNoStateWithRecent(key, ts, tx) case kv.CommitmentHistory: - return ac.d[kv.CommitmentDomain].ht.HistorySeek(key, ts, tx) + return ac.d[kv.CommitmentDomain].ht.GetNoStateWithRecent(key, ts, tx) //case kv.GasUsedHistory: - // return ac.d[kv.GasUsedDomain].ht.HistorySeek(key, ts, tx) + // return ac.d[kv.GasUsedDomain].ht.GetNoStateWithRecent(key, ts, tx) default: panic(fmt.Sprintf("unexpected: %s", name)) } } -func (ac *AggregatorRoTx) HistoryRange(name kv.History, fromTs, toTs int, asc order.By, limit int, tx kv.Tx) (it iter.KV, err error) { - //TODO: aggTx to store array of histories - var domainName kv.Domain +func (ac *AggregatorRoTx) AccountHistoryRange(startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) (iter.KV, error) { + hr, err := ac.d[kv.AccountsDomain].ht.HistoryRange(startTxNum, endTxNum, asc, limit, tx) + if err != nil { + return nil, err + } + return iter.WrapKV(hr), nil +} - switch name { - case kv.AccountsHistory: - domainName = kv.AccountsDomain - case kv.StorageHistory: - domainName = kv.StorageDomain - case kv.CodeHistory: - domainName = kv.CodeDomain - default: - return nil, fmt.Errorf("unexpected history name: %s", name) +func (ac *AggregatorRoTx) StorageHistoryRange(startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) (iter.KV, error) { + hr, err := ac.d[kv.StorageDomain].ht.HistoryRange(startTxNum, endTxNum, asc, limit, tx) + if err != nil { + return nil, err } + return iter.WrapKV(hr), nil +} - hr, err := ac.d[domainName].ht.HistoryRange(fromTs, toTs, asc, limit, tx) +func (ac *AggregatorRoTx) CodeHistoryRange(startTxNum, endTxNum int, asc order.By, limit int, tx kv.Tx) (iter.KV, error) { + hr, err := ac.d[kv.CodeDomain].ht.HistoryRange(startTxNum, endTxNum, asc, limit, tx) if err != nil { return nil, err } @@ -1763,17 +1677,17 @@ type AggregatorRoTx struct { } func (a *Aggregator) BeginFilesRo() *AggregatorRoTx { + a.visibleFilesLock.RLock() ac := &AggregatorRoTx{ - a: a, + a: a, + logAddrs: a.logAddrs.BeginFilesRo(), + logTopics: a.logTopics.BeginFilesRo(), + tracesFrom: a.tracesFrom.BeginFilesRo(), + tracesTo: a.tracesTo.BeginFilesRo(), + id: a.ctxAutoIncrement.Add(1), _leakID: a.leakDetector.Add(), } - - a.visibleFilesLock.RLock() - ac.logAddrs = a.logAddrs.BeginFilesRo() - ac.logTopics = a.logTopics.BeginFilesRo() - ac.tracesFrom = a.tracesFrom.BeginFilesRo() - ac.tracesTo = a.tracesTo.BeginFilesRo() for id, d := range a.d { ac.d[id] = d.BeginFilesRo() } diff --git a/erigon-lib/state/aggregator_bench_test.go b/erigon-lib/state/aggregator_bench_test.go index bf9c7fa15c6..7d641c14a2a 100644 --- a/erigon-lib/state/aggregator_bench_test.go +++ b/erigon-lib/state/aggregator_bench_test.go @@ -44,7 +44,7 @@ type txWithCtx struct { } func WrapTxWithCtx(tx kv.Tx, ctx *AggregatorRoTx) *txWithCtx { return &txWithCtx{Tx: tx, ac: ctx} } -func (tx *txWithCtx) AggTx() interface{} { return tx.ac } +func (tx *txWithCtx) AggCtx() interface{} { return tx.ac } func BenchmarkAggregator_Processing(b *testing.B) { ctx, cancel := context.WithCancel(context.Background()) diff --git a/erigon-lib/state/aggregator_files.go b/erigon-lib/state/aggregator_files.go index ad5f8dbdb93..c8ad7dc545e 100644 --- a/erigon-lib/state/aggregator_files.go +++ b/erigon-lib/state/aggregator_files.go @@ -23,6 +23,48 @@ import ( "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/metrics" +) + +// StepsInBiggestFile - files of this size are completely frozen/immutable. +// files of smaller size are also immutable, but can be removed after merge to bigger files. +const StepsInBiggestFile = 32 + +var ( + //LatestStateReadWarm = metrics.GetOrCreateSummary(`latest_state_read{type="warm",found="yes"}`) //nolint + //LatestStateReadWarmNotFound = metrics.GetOrCreateSummary(`latest_state_read{type="warm",found="no"}`) //nolint + //LatestStateReadGrind = metrics.GetOrCreateSummary(`latest_state_read{type="grind",found="yes"}`) //nolint + //LatestStateReadGrindNotFound = metrics.GetOrCreateSummary(`latest_state_read{type="grind",found="no"}`) //nolint + //LatestStateReadCold = metrics.GetOrCreateSummary(`latest_state_read{type="cold",found="yes"}`) //nolint + //LatestStateReadColdNotFound = metrics.GetOrCreateSummary(`latest_state_read{type="cold",found="no"}`) //nolint + mxPrunableDAcc = metrics.GetOrCreateGauge(`domain_prunable{type="domain",table="account"}`) + mxPrunableDSto = metrics.GetOrCreateGauge(`domain_prunable{type="domain",table="storage"}`) + mxPrunableDCode = metrics.GetOrCreateGauge(`domain_prunable{type="domain",table="code"}`) + mxPrunableDComm = metrics.GetOrCreateGauge(`domain_prunable{type="domain",table="commitment"}`) + mxPrunableHAcc = metrics.GetOrCreateGauge(`domain_prunable{type="history",table="account"}`) + mxPrunableHSto = metrics.GetOrCreateGauge(`domain_prunable{type="history",table="storage"}`) + mxPrunableHCode = metrics.GetOrCreateGauge(`domain_prunable{type="history",table="code"}`) + mxPrunableHComm = metrics.GetOrCreateGauge(`domain_prunable{type="history",table="commitment"}`) + + mxRunningMerges = metrics.GetOrCreateGauge("domain_running_merges") + mxRunningFilesBuilding = metrics.GetOrCreateGauge("domain_running_files_building") + mxCollateTook = metrics.GetOrCreateHistogram(`domain_collate_took{type="domain"}`) + mxCollateTookHistory = metrics.GetOrCreateHistogram(`domain_collate_took{type="history"}`) + mxCollateTookIndex = metrics.GetOrCreateHistogram(`domain_collate_took{type="index"}`) + mxPruneTookDomain = metrics.GetOrCreateHistogram(`domain_prune_took{type="domain"}`) + mxPruneTookHistory = metrics.GetOrCreateHistogram(`domain_prune_took{type="history"}`) + mxPruneTookIndex = metrics.GetOrCreateHistogram(`domain_prune_took{type="index"}`) + mxPruneInProgress = metrics.GetOrCreateGauge("domain_pruning_progress") + mxCollationSize = metrics.GetOrCreateGauge("domain_collation_size") + mxCollationSizeHist = metrics.GetOrCreateGauge("domain_collation_hist_size") + mxPruneSizeDomain = metrics.GetOrCreateCounter(`domain_prune_size{type="domain"}`) + mxPruneSizeHistory = metrics.GetOrCreateCounter(`domain_prune_size{type="history"}`) + mxPruneSizeIndex = metrics.GetOrCreateCounter(`domain_prune_size{type="index"}`) + mxBuildTook = metrics.GetOrCreateSummary("domain_build_files_took") + mxStepTook = metrics.GetOrCreateHistogram("domain_step_took") + mxFlushTook = metrics.GetOrCreateSummary("domain_flush_took") + mxCommitmentRunning = metrics.GetOrCreateGauge("domain_running_commitment") + mxCommitmentTook = metrics.GetOrCreateSummary("domain_commitment_took") ) type SelectedStaticFilesV3 struct { @@ -65,6 +107,7 @@ func (ac *AggregatorRoTx) staticFilesInRange(r RangesV3) (sf SelectedStaticFiles for id := range ac.d { if r.d[id].any() { sf.d[id], sf.dIdx[id], sf.dHist[id], sf.dI[id] = ac.d[id].staticFilesInRange(r.d[id]) + } } if r.logAddrs { diff --git a/erigon-lib/state/bps_tree.go b/erigon-lib/state/bps_tree.go index 53f4f8c85b2..b8502e488de 100644 --- a/erigon-lib/state/bps_tree.go +++ b/erigon-lib/state/bps_tree.go @@ -4,7 +4,6 @@ import ( "bytes" "errors" "fmt" - "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/recsplit/eliasfano32" ) @@ -13,7 +12,7 @@ import ( type indexSeeker interface { WarmUp(g ArchiveGetter) error Get(g ArchiveGetter, key []byte) (k []byte, found bool, di uint64, err error) - //seekInFiles(g ArchiveGetter, key []byte) (indexSeekerIterator, error) + //Seek(g ArchiveGetter, key []byte) (indexSeekerIterator, error) Seek(g ArchiveGetter, seek []byte) (k []byte, di uint64, found bool, err error) } diff --git a/erigon-lib/state/btree_index.go b/erigon-lib/state/btree_index.go index da1029b7d8b..3739d246233 100644 --- a/erigon-lib/state/btree_index.go +++ b/erigon-lib/state/btree_index.go @@ -7,7 +7,6 @@ import ( "encoding/binary" "errors" "fmt" - "github.com/ledgerwatch/erigon-lib/common" "math" "os" "path" @@ -21,6 +20,7 @@ import ( "github.com/ledgerwatch/log/v3" "github.com/spaolacci/murmur3" + "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/background" "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/etl" @@ -626,7 +626,26 @@ func (btw *BtIndexWriter) AddKey(key []byte, offset uint64) error { // loadFuncBucket is required to satisfy the type etl.LoadFunc type, to use with collector.Load func (btw *BtIndexWriter) loadFuncBucket(k, v []byte, _ etl.CurrentTableReader, _ etl.LoadNextFunc) error { + // k is the BigEndian encoding of the bucket number, and the v is the key that is assigned into that bucket + //if uint64(len(btw.vals)) >= btw.batchSizeLimit { + // if err := btw.drainBatch(); err != nil { + // return err + // } + //} + + // if _, err := btw.indexW.Write(k); err != nil { + // return err + // } + //if _, err := btw.indexW.Write(v); err != nil { + // return err + //} + //copy(btw.numBuf[8-btw.bytesPerRec:], v) + //btw.ef.AddOffset(binary.BigEndian.Uint64(btw.numBuf[:])) + btw.ef.AddOffset(binary.BigEndian.Uint64(v)) + + //btw.keys = append(btw.keys, binary.BigEndian.Uint64(k), binary.BigEndian.Uint64(k[8:])) + //btw.vals = append(btw.vals, binary.BigEndian.Uint64(v)) return nil } @@ -715,7 +734,7 @@ type BtIndex struct { filePath string } -// Decompressor should be managed by caller (could be closed after index is built). When index is built, external getter should be passed to seekInFiles function +// Decompressor should be managed by caller (could be closed after index is built). When index is built, external getter should be passed to Seek function func CreateBtreeIndexWithDecompressor(indexPath string, M uint64, decompressor *seg.Decompressor, compressed FileCompression, seed uint32, ps *background.ProgressSet, tmpdir string, logger log.Logger, noFsync bool) (*BtIndex, error) { err := BuildBtreeIndexWithDecompressor(indexPath, decompressor, compressed, ps, tmpdir, seed, logger, noFsync) if err != nil { @@ -896,12 +915,12 @@ func (b *BtIndex) keyCmp(k []byte, di uint64, g ArchiveGetter) (int, []byte, err // Key and value is valid until cursor.Next is called func (b *BtIndex) newCursor(ctx context.Context, k, v []byte, d uint64, g ArchiveGetter) *Cursor { return &Cursor{ + btt: b, ctx: ctx, getter: g, key: common.Copy(k), value: common.Copy(v), d: d, - btt: b, } } @@ -1003,7 +1022,7 @@ func (b *BtIndex) Seek(g ArchiveGetter, x []byte) (*Cursor, error) { } // defer func() { - // fmt.Printf("[Bindex][%s] seekInFiles '%x' -> '%x' di=%d\n", b.FileName(), x, cursor.Value(), cursor.d) + // fmt.Printf("[Bindex][%s] Seek '%x' -> '%x' di=%d\n", b.FileName(), x, cursor.Value(), cursor.d) // }() var ( k []byte diff --git a/erigon-lib/state/domain.go b/erigon-lib/state/domain.go index 9d5cfbf5b65..2239b990405 100644 --- a/erigon-lib/state/domain.go +++ b/erigon-lib/state/domain.go @@ -456,8 +456,8 @@ func (dt *DomainRoTx) newWriter(tmpdir string, discard bool) *domainBufferedWrit aux: make([]byte, 0, 128), keysTable: dt.d.keysTable, valsTable: dt.d.valsTable, - keys: etl.NewCollector("flush "+dt.d.keysTable, tmpdir, etl.NewSortableBuffer(WALCollectorRAM), dt.d.logger), - values: etl.NewCollector("flush "+dt.d.valsTable, tmpdir, etl.NewSortableBuffer(WALCollectorRAM), dt.d.logger), + keys: etl.NewCollector(dt.d.keysTable, tmpdir, etl.NewSortableBuffer(WALCollectorRAM), dt.d.logger), + values: etl.NewCollector(dt.d.valsTable, tmpdir, etl.NewSortableBuffer(WALCollectorRAM), dt.d.logger), h: dt.ht.newWriter(tmpdir, discardHistory), } @@ -631,7 +631,6 @@ type DomainRoTx struct { keyBuf [60]byte // 52b key and 8b for inverted step valBuf [128]byte - comBuf []byte keysC kv.CursorDupSort valsC kv.Cursor @@ -819,7 +818,7 @@ func (d *Domain) collate(ctx context.Context, step, txFrom, txTo uint64, roTx kv }() coll.valuesPath = d.kvFilePath(step, step+1) - if coll.valuesComp, err = seg.NewCompressor(ctx, "collate domain "+d.filenameBase, coll.valuesPath, d.dirs.Tmp, seg.MinPatternScore, d.compressWorkers, log.LvlTrace, d.logger); err != nil { + if coll.valuesComp, err = seg.NewCompressor(ctx, "collate values", coll.valuesPath, d.dirs.Tmp, seg.MinPatternScore, d.compressWorkers, log.LvlTrace, d.logger); err != nil { return Collation{}, fmt.Errorf("create %s values compressor: %w", d.filenameBase, err) } comp := NewArchiveWriter(coll.valuesComp, d.compression) @@ -1006,9 +1005,8 @@ func (d *Domain) buildMapIdx(ctx context.Context, fromStep, toStep uint64, data TmpDir: d.dirs.Tmp, IndexFile: idxPath, Salt: d.salt, - NoFsync: d.noFsync, } - return buildIndex(ctx, data, d.compression, idxPath, false, cfg, ps, d.logger) + return buildIndex(ctx, data, d.compression, idxPath, false, cfg, ps, d.logger, d.noFsync) } func (d *Domain) missedBtreeIdxFiles() (l []*filesItem) { @@ -1102,7 +1100,16 @@ func (d *Domain) BuildMissedIndices(ctx context.Context, g *errgroup.Group, ps * } } -func buildIndex(ctx context.Context, d *seg.Decompressor, compressed FileCompression, idxPath string, values bool, cfg recsplit.RecSplitArgs, ps *background.ProgressSet, logger log.Logger) error { +func buildIndexFilterThenOpen(ctx context.Context, d *seg.Decompressor, compressed FileCompression, idxPath, tmpdir string, salt *uint32, ps *background.ProgressSet, logger log.Logger, noFsync bool) (*ExistenceFilter, error) { + if err := buildIdxFilter(ctx, d, compressed, idxPath, salt, ps, logger, noFsync); err != nil { + return nil, err + } + if !dir.FileExist(idxPath) { + return nil, nil + } + return OpenExistenceFilter(idxPath) +} +func buildIndex(ctx context.Context, d *seg.Decompressor, compressed FileCompression, idxPath string, values bool, cfg recsplit.RecSplitArgs, ps *background.ProgressSet, logger log.Logger, noFsync bool) error { _, fileName := filepath.Split(idxPath) count := d.Count() if !values { @@ -1122,10 +1129,13 @@ func buildIndex(ctx context.Context, d *seg.Decompressor, compressed FileCompres } defer rs.Close() rs.LogLvl(log.LvlTrace) + if noFsync { + rs.DisableFsync() + } + word := make([]byte, 0, 256) var keyPos, valPos uint64 for { - word := make([]byte, 0, 256) if err := ctx.Err(); err != nil { return err } @@ -1182,11 +1192,6 @@ func (dt *DomainRoTx) Unwind(ctx context.Context, rwTx kv.RwTx, step, txNumUnwin if err != nil { return fmt.Errorf("historyRange %s: %w", dt.ht.h.filenameBase, err) } - sf := time.Now() - defer mxUnwindTook.ObserveDuration(sf) - mxRunningUnwind.Inc() - defer mxRunningUnwind.Dec() - defer histRng.Close() seen := make(map[string]struct{}) restored := dt.NewWriter() @@ -1199,13 +1204,11 @@ func (dt *DomainRoTx) Unwind(ctx context.Context, rwTx kv.RwTx, step, txNumUnwin ic, err := dt.ht.IdxRange(k, int(txNumUnwindTo)-1, 0, order.Desc, -1, rwTx) if err != nil { - ic.Close() return err } if ic.HasNext() { nextTxn, err := ic.Next() if err != nil { - ic.Close() return err } restored.SetTxNum(nextTxn) // todo what if we actually had to decrease current step to provide correct update? @@ -1214,10 +1217,12 @@ func (dt *DomainRoTx) Unwind(ctx context.Context, rwTx kv.RwTx, step, txNumUnwin } //fmt.Printf("[%s] unwinding %x ->'%x'\n", dt.d.filenameBase, k, v) if err := restored.addValue(k, nil, v); err != nil { - ic.Close() return err } - ic.Close() + type closable interface { + Close() + } + ic.(closable).Close() seen[string(k)] = struct{}{} } @@ -1355,7 +1360,7 @@ func (dt *DomainRoTx) getFromFiles(filekey []byte) (v []byte, found bool, fileSt // GetAsOf does not always require usage of roTx. If it is possible to determine // historical value based only on static files, roTx will not be used. func (dt *DomainRoTx) GetAsOf(key []byte, txNum uint64, roTx kv.Tx) ([]byte, error) { - v, hOk, err := dt.ht.HistorySeek(key, txNum, roTx) + v, hOk, err := dt.ht.GetNoStateWithRecent(key, txNum, roTx) if err != nil { return nil, err } @@ -1441,7 +1446,7 @@ func (dt *DomainRoTx) valsCursor(tx kv.Tx) (c kv.Cursor, err error) { } dt.valsC, err = tx.Cursor(dt.d.valsTable) if err != nil { - return nil, fmt.Errorf("valsCursor: %w", err) + return nil, err } return dt.valsC, nil } @@ -1452,7 +1457,7 @@ func (dt *DomainRoTx) keysCursor(tx kv.Tx) (c kv.CursorDupSort, err error) { } dt.keysC, err = tx.CursorDupSort(dt.d.keysTable) if err != nil { - return nil, fmt.Errorf("keysCursor: %w", err) + return nil, err } return dt.keysC, nil } @@ -1523,7 +1528,7 @@ func (dt *DomainRoTx) GetLatest(key1, key2 []byte, roTx kv.Tx) ([]byte, uint64, v, foundStep, found, err = dt.getLatestFromDb(key, roTx) if err != nil { - return nil, 0, false, fmt.Errorf("getLatestFromDb: %w", err) + return nil, 0, false, err } if found { return v, foundStep, true, nil @@ -1531,7 +1536,7 @@ func (dt *DomainRoTx) GetLatest(key1, key2 []byte, roTx kv.Tx) ([]byte, uint64, v, foundInFile, _, endTxNum, err := dt.getFromFiles(key) if err != nil { - return nil, 0, false, fmt.Errorf("getFromFiles: %w", err) + return nil, 0, false, err } return v, endTxNum / dt.d.aggregationStep, foundInFile, nil } @@ -1684,11 +1689,11 @@ func (dt *DomainRoTx) DomainRange(tx kv.Tx, fromKey, toKey []byte, ts uint64, as if !asc { panic("implement me") } - //histStateIt, err := tx.aggTx.AccountHistoricalStateRange(asOfTs, fromKey, toKey, limit, tx.MdbxTx) + //histStateIt, err := tx.aggCtx.AccountHistoricalStateRange(asOfTs, fromKey, toKey, limit, tx.MdbxTx) //if err != nil { // return nil, err //} - //lastestStateIt, err := tx.aggTx.DomainRangeLatest(tx.MdbxTx, kv.AccountDomain, fromKey, toKey, limit) + //lastestStateIt, err := tx.aggCtx.DomainRangeLatest(tx.MdbxTx, kv.AccountDomain, fromKey, toKey, limit) //if err != nil { // return nil, err //} @@ -2038,7 +2043,6 @@ func (hi *DomainLatestIterFile) init(dc *DomainRoTx) error { } for i, item := range dc.files { - // todo release btcursor when iter over/make it truly stateless btCursor, err := dc.statelessBtree(i).Seek(dc.statelessGetter(i), hi.from) if err != nil { return err diff --git a/erigon-lib/state/domain_committed.go b/erigon-lib/state/domain_committed.go index 2374b99ef28..eac2e22b636 100644 --- a/erigon-lib/state/domain_committed.go +++ b/erigon-lib/state/domain_committed.go @@ -20,15 +20,200 @@ import ( "bytes" "encoding/binary" "fmt" + "slices" "strings" + "github.com/google/btree" + "github.com/ledgerwatch/erigon-lib/recsplit" + "github.com/ledgerwatch/erigon-lib/types" + "golang.org/x/crypto/sha3" + "github.com/ledgerwatch/erigon-lib/commitment" + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/cryptozerocopy" "github.com/ledgerwatch/erigon-lib/common/length" - "github.com/ledgerwatch/erigon-lib/recsplit" ) +// Defines how to evaluate commitments +type CommitmentMode uint + +const ( + CommitmentModeDisabled CommitmentMode = 0 + CommitmentModeDirect CommitmentMode = 1 + CommitmentModeUpdate CommitmentMode = 2 +) + +func (m CommitmentMode) String() string { + switch m { + case CommitmentModeDisabled: + return "disabled" + case CommitmentModeDirect: + return "direct" + case CommitmentModeUpdate: + return "update" + default: + return "unknown" + } +} + +func ParseCommitmentMode(s string) CommitmentMode { + var mode CommitmentMode + switch s { + case "off": + mode = CommitmentModeDisabled + case "update": + mode = CommitmentModeUpdate + default: + mode = CommitmentModeDirect + } + return mode +} + type ValueMerger func(prev, current []byte) (merged []byte, err error) +type UpdateTree struct { + tree *btree.BTreeG[*commitmentItem] + keccak cryptozerocopy.KeccakState + keys map[string]struct{} + mode CommitmentMode +} + +func NewUpdateTree(m CommitmentMode) *UpdateTree { + return &UpdateTree{ + tree: btree.NewG[*commitmentItem](64, commitmentItemLessPlain), + keccak: sha3.NewLegacyKeccak256().(cryptozerocopy.KeccakState), + keys: map[string]struct{}{}, + mode: m, + } +} + +func (t *UpdateTree) get(key []byte) (*commitmentItem, bool) { + c := &commitmentItem{plainKey: key, update: commitment.Update{CodeHashOrStorage: commitment.EmptyCodeHashArray}} + el, ok := t.tree.Get(c) + if ok { + return el, true + } + c.plainKey = common.Copy(c.plainKey) + return c, false +} + +// TouchPlainKey marks plainKey as updated and applies different fn for different key types +// (different behaviour for Code, Account and Storage key modifications). +func (t *UpdateTree) TouchPlainKey(key string, val []byte, fn func(c *commitmentItem, val []byte)) { + switch t.mode { + case CommitmentModeUpdate: + item, _ := t.get([]byte(key)) + fn(item, val) + t.tree.ReplaceOrInsert(item) + case CommitmentModeDirect: + t.keys[key] = struct{}{} + default: + } +} + +func (t *UpdateTree) Size() uint64 { + return uint64(len(t.keys)) +} + +func (t *UpdateTree) TouchAccount(c *commitmentItem, val []byte) { + if len(val) == 0 { + c.update.Flags = commitment.DeleteUpdate + return + } + if c.update.Flags&commitment.DeleteUpdate != 0 { + c.update.Flags ^= commitment.DeleteUpdate + } + nonce, balance, chash := types.DecodeAccountBytesV3(val) + if c.update.Nonce != nonce { + c.update.Nonce = nonce + c.update.Flags |= commitment.NonceUpdate + } + if !c.update.Balance.Eq(balance) { + c.update.Balance.Set(balance) + c.update.Flags |= commitment.BalanceUpdate + } + if !bytes.Equal(chash, c.update.CodeHashOrStorage[:]) { + if len(chash) == 0 { + c.update.ValLength = length.Hash + copy(c.update.CodeHashOrStorage[:], commitment.EmptyCodeHash) + } else { + copy(c.update.CodeHashOrStorage[:], chash) + c.update.ValLength = length.Hash + c.update.Flags |= commitment.CodeUpdate + } + } +} + +func (t *UpdateTree) UpdatePrefix(prefix, val []byte, fn func(c *commitmentItem, val []byte)) { + t.tree.AscendGreaterOrEqual(&commitmentItem{}, func(item *commitmentItem) bool { + if !bytes.HasPrefix(item.plainKey, prefix) { + return false + } + fn(item, val) + return true + }) +} + +func (t *UpdateTree) TouchStorage(c *commitmentItem, val []byte) { + c.update.ValLength = len(val) + if len(val) == 0 { + c.update.Flags = commitment.DeleteUpdate + } else { + c.update.Flags |= commitment.StorageUpdate + copy(c.update.CodeHashOrStorage[:], val) + } +} + +func (t *UpdateTree) TouchCode(c *commitmentItem, val []byte) { + t.keccak.Reset() + t.keccak.Write(val) + t.keccak.Read(c.update.CodeHashOrStorage[:]) + if c.update.Flags == commitment.DeleteUpdate && len(val) == 0 { + c.update.Flags = commitment.DeleteUpdate + c.update.ValLength = 0 + return + } + c.update.ValLength = length.Hash + if len(val) != 0 { + c.update.Flags |= commitment.CodeUpdate + } +} + +// Returns list of both plain and hashed keys. If .mode is CommitmentModeUpdate, updates also returned. +// No ordering guarantees is provided. +func (t *UpdateTree) List(clear bool) ([][]byte, []commitment.Update) { + switch t.mode { + case CommitmentModeDirect: + plainKeys := make([][]byte, len(t.keys)) + i := 0 + for key := range t.keys { + plainKeys[i] = []byte(key) + i++ + } + slices.SortFunc(plainKeys, bytes.Compare) + if clear { + t.keys = make(map[string]struct{}, len(t.keys)/8) + } + + return plainKeys, nil + case CommitmentModeUpdate: + plainKeys := make([][]byte, t.tree.Len()) + updates := make([]commitment.Update, t.tree.Len()) + i := 0 + t.tree.Ascend(func(item *commitmentItem) bool { + plainKeys[i], updates[i] = item.plainKey, item.update + i++ + return true + }) + if clear { + t.tree.Clear(true) + } + return plainKeys, updates + default: + return nil, nil + } +} + type commitmentState struct { txNum uint64 blockNum uint64 @@ -83,19 +268,27 @@ func encodeShorterKey(buf []byte, offset uint64) []byte { return binary.AppendUvarint(buf, offset) } +type commitmentItem struct { + plainKey []byte + update commitment.Update +} + +func commitmentItemLessPlain(i, j *commitmentItem) bool { + return bytes.Compare(i.plainKey, j.plainKey) < 0 +} + // Finds shorter replacement for full key in given file item. filesItem -- result of merging of multiple files. // If item is nil, or shorter key was not found, or anything else goes wrong, nil key and false returned. -func (dt *DomainRoTx) findShortenedKey(fullKey []byte, itemGetter ArchiveGetter, item *filesItem) (shortened []byte, found bool) { +func (dt *DomainRoTx) findShortenedKey(fullKey []byte, item *filesItem) (shortened []byte, found bool) { if item == nil { return nil, false } + if !strings.Contains(item.decompressor.FileName(), dt.d.filenameBase) { panic(fmt.Sprintf("findShortenedKeyEasier of %s called with merged file %s", dt.d.filenameBase, item.decompressor.FileName())) } - if /*assert.Enable && */ itemGetter.FileName() != item.decompressor.FileName() { - panic(fmt.Sprintf("findShortenedKey of %s itemGetter (%s) is different to item.decompressor (%s)", - dt.d.filenameBase, itemGetter.FileName(), item.decompressor.FileName())) - } + + g := NewArchiveGetter(item.decompressor.MakeGetter(), dt.d.compression) //if idxList&withExistence != 0 { // hi, _ := dt.ht.iit.hashKey(fullKey) @@ -113,14 +306,14 @@ func (dt *DomainRoTx) findShortenedKey(fullKey []byte, itemGetter ArchiveGetter, return nil, false } - itemGetter.Reset(offset) - if !itemGetter.HasNext() { + g.Reset(offset) + if !g.HasNext() { dt.d.logger.Warn("commitment branch key replacement seek failed", "key", fmt.Sprintf("%x", fullKey), "idx", "hash", "file", item.decompressor.FileName()) return nil, false } - k, _ := itemGetter.Next(nil) + k, _ := g.Next(nil) if !bytes.Equal(fullKey, k) { dt.d.logger.Warn("commitment branch key replacement seek invalid key", "key", fmt.Sprintf("%x", fullKey), "idx", "hash", "file", item.decompressor.FileName()) @@ -130,7 +323,7 @@ func (dt *DomainRoTx) findShortenedKey(fullKey []byte, itemGetter ArchiveGetter, return encodeShorterKey(nil, offset), true } if dt.d.indexList&withBTree != 0 { - cur, err := item.bindex.Seek(itemGetter, fullKey) + cur, err := item.bindex.Seek(g, fullKey) if err != nil { dt.d.logger.Warn("commitment branch key replacement seek failed", "key", fmt.Sprintf("%x", fullKey), "idx", "bt", "err", err, "file", item.decompressor.FileName()) @@ -141,9 +334,9 @@ func (dt *DomainRoTx) findShortenedKey(fullKey []byte, itemGetter ArchiveGetter, } offset := cur.offsetInFile() - if uint64(itemGetter.Size()) <= offset { + if uint64(g.Size()) <= offset { dt.d.logger.Warn("commitment branch key replacement seek gone too far", - "key", fmt.Sprintf("%x", fullKey), "offset", offset, "size", itemGetter.Size(), "file", item.decompressor.FileName()) + "key", fmt.Sprintf("%x", fullKey), "offset", offset, "size", g.Size(), "file", item.decompressor.FileName()) return nil, false } return encodeShorterKey(nil, offset), true @@ -151,7 +344,12 @@ func (dt *DomainRoTx) findShortenedKey(fullKey []byte, itemGetter ArchiveGetter, return nil, false } -func (dt *DomainRoTx) lookupFileByItsRange(txFrom uint64, txTo uint64) *filesItem { +// searches in given list of files for a key or searches in domain files if list is empty +func (dt *DomainRoTx) lookupByShortenedKey(shortKey []byte, txFrom uint64, txTo uint64) (fullKey []byte, found bool) { + if len(shortKey) < 1 { + return nil, false + } + var item *filesItem for _, f := range dt.files { if f.startTxNum == txFrom && f.endTxNum == txTo { @@ -180,43 +378,47 @@ func (dt *DomainRoTx) lookupFileByItsRange(txFrom uint64, txTo uint64) *filesIte for _, f := range dt.files { visibleFiles += fmt.Sprintf("%d-%d;", f.startTxNum/dt.d.aggregationStep, f.endTxNum/dt.d.aggregationStep) } - dt.d.logger.Warn("lookupFileByItsRange: file not found", + dt.d.logger.Warn("lookupByShortenedKey file not found", "stepFrom", txFrom/dt.d.aggregationStep, "stepTo", txTo/dt.d.aggregationStep, + "shortened", fmt.Sprintf("%x", shortKey), "domain", dt.d.keysTable, "files", fileStepsss, "_visibleFiles", visibleFiles, "visibleFilesCount", len(dt.files), "filesCount", dt.d.dirtyFiles.Len()) - return nil - } - return item -} - -// searches in given list of files for a key or searches in domain files if list is empty -func (dt *DomainRoTx) lookupByShortenedKey(shortKey []byte, getter ArchiveGetter) (fullKey []byte, found bool) { - if len(shortKey) < 1 { return nil, false } + offset := decodeShorterKey(shortKey) defer func() { if r := recover(); r != nil { dt.d.logger.Crit("lookupByShortenedKey panics", "err", r, "domain", dt.d.keysTable, - "offset", offset, "short", fmt.Sprintf("%x", shortKey), - "cleanFilesCount", len(dt.files), "dirtyFilesCount", dt.d.dirtyFiles.Len(), - "file", getter.FileName()) + "short", fmt.Sprintf("%x", shortKey), + "stepFrom", txFrom/dt.d.aggregationStep, "stepTo", txTo/dt.d.aggregationStep, "offset", offset, + "visibleFilesCount", len(dt.files), "filesCount", dt.d.dirtyFiles.Len(), + "fileFound", item != nil) } }() - //getter := NewArchiveGetter(item.decompressor.MakeGetter(), dt.d.compression) - getter.Reset(offset) - if !getter.HasNext() || uint64(getter.Size()) <= offset { - dt.d.logger.Warn("lookupByShortenedKey failed", "short", shortKey, "offset", offset, "file", getter.FileName()) + g := NewArchiveGetter(item.decompressor.MakeGetter(), dt.d.compression) + g.Reset(offset) + if !g.HasNext() || uint64(g.Size()) <= offset { + dt.d.logger.Warn("lookupByShortenedKey failed", + "stepFrom", txFrom/dt.d.aggregationStep, "stepTo", txTo/dt.d.aggregationStep, "offset", offset, + "size", g.Size(), "short", shortKey, "file", item.decompressor.FileName()) return nil, false } - fullKey, _ = getter.Next(nil) + fullKey, _ = g.Next(nil) + // dt.d.logger.Debug(fmt.Sprintf("lookupByShortenedKey [%x]=>{%x}", shortKey, fullKey), + // "stepFrom", stepFrom, "stepTo", stepTo, "offset", offset, "file", item.decompressor.FileName()) return fullKey, true } +//func (dc *DomainRoTx) SqueezeExistingCommitmentFile() { +// dc.commitmentValTransformDomain() +// +//} + // commitmentValTransform parses the value of the commitment record to extract references // to accounts and storage items, then looks them up in the new, merged files, and replaces them with // the updated references @@ -234,90 +436,69 @@ func (dt *DomainRoTx) commitmentValTransformDomain(accounts, storage *DomainRoTx if !dt.d.replaceKeysInValues || len(valBuf) == 0 { return valBuf, nil } - si := storage.lookupFileByItsRange(keyFromTxNum, keyEndTxNum) - if si == nil { - return nil, fmt.Errorf("storage file not found for %d-%d", keyFromTxNum, keyEndTxNum) - } - ai := accounts.lookupFileByItsRange(keyFromTxNum, keyEndTxNum) - if ai == nil { - return nil, fmt.Errorf("account file not found for %d-%d", keyFromTxNum, keyEndTxNum) - } - if si.decompressor == nil || ai.decompressor == nil { - return nil, fmt.Errorf("decompressor is nil for existing storage or account") - } - if mergedStorage == nil || mergedAccount == nil { - return nil, fmt.Errorf("mergedStorage or mergedAccount is nil") - } + return commitment.BranchData(valBuf). + ReplacePlainKeys(nil, func(key []byte, isStorage bool) ([]byte, error) { + var found bool + var buf []byte + if isStorage { + if len(key) == length.Addr+length.Hash { + // Non-optimised key originating from a database record + buf = append(buf[:0], key...) + } else { + // Optimised key referencing a state file record (file number and offset within the file) + buf, found = storage.lookupByShortenedKey(key, keyFromTxNum, keyEndTxNum) + if !found { + dt.d.logger.Crit("valTransform: lost storage full key", + "shortened", fmt.Sprintf("%x", key), + "merging", stoMerged, + "valBuf", fmt.Sprintf("l=%d %x", len(valBuf), valBuf), + ) + return nil, fmt.Errorf("lookup lost storage full key %x", key) + } + } - sig := NewArchiveGetter(si.decompressor.MakeGetter(), storage.d.compression) - aig := NewArchiveGetter(ai.decompressor.MakeGetter(), accounts.d.compression) - ms := NewArchiveGetter(mergedStorage.decompressor.MakeGetter(), storage.d.compression) - ma := NewArchiveGetter(mergedAccount.decompressor.MakeGetter(), storage.d.compression) + shortened, found := storage.findShortenedKey(buf, mergedStorage) + if !found { + if len(buf) == length.Addr+length.Hash { + return buf, nil // if plain key is lost, we can save original fullkey + } + // if shortened key lost, we can't continue + dt.d.logger.Crit("valTransform: replacement for full storage key was not found", + "step", fmt.Sprintf("%d-%d", keyFromTxNum/dt.d.aggregationStep, keyEndTxNum/dt.d.aggregationStep), + "shortened", fmt.Sprintf("%x", shortened), "toReplace", fmt.Sprintf("%x", buf)) + + return nil, fmt.Errorf("replacement not found for storage %x", buf) + } + return shortened, nil + } - replacer := func(key []byte, isStorage bool) ([]byte, error) { - var found bool - auxBuf := dt.keyBuf[:0] - if isStorage { - if len(key) == length.Addr+length.Hash { + if len(key) == length.Addr { // Non-optimised key originating from a database record - auxBuf = append(auxBuf[:0], key...) + buf = append(buf[:0], key...) } else { - // Optimised key referencing a state file record (file number and offset within the file) - auxBuf, found = storage.lookupByShortenedKey(key, sig) + buf, found = accounts.lookupByShortenedKey(key, keyFromTxNum, keyEndTxNum) if !found { - dt.d.logger.Crit("valTransform: lost storage full key", + dt.d.logger.Crit("valTransform: lost account full key", "shortened", fmt.Sprintf("%x", key), - "merging", stoMerged, + "merging", accMerged, "valBuf", fmt.Sprintf("l=%d %x", len(valBuf), valBuf), ) - return nil, fmt.Errorf("lookup lost storage full key %x", key) + return nil, fmt.Errorf("lookup account full key: %x", key) } } - shortened, found := storage.findShortenedKey(auxBuf, ms, mergedStorage) + shortened, found := accounts.findShortenedKey(buf, mergedAccount) if !found { - if len(auxBuf) == length.Addr+length.Hash { - return auxBuf, nil // if plain key is lost, we can save original fullkey + if len(buf) == length.Addr { + return buf, nil // if plain key is lost, we can save original fullkey } - // if shortened key lost, we can't continue - dt.d.logger.Crit("valTransform: replacement for full storage key was not found", + dt.d.logger.Crit("valTransform: replacement for full account key was not found", "step", fmt.Sprintf("%d-%d", keyFromTxNum/dt.d.aggregationStep, keyEndTxNum/dt.d.aggregationStep), - "shortened", fmt.Sprintf("%x", shortened), "toReplace", fmt.Sprintf("%x", auxBuf)) - - return nil, fmt.Errorf("replacement not found for storage %x", auxBuf) + "shortened", fmt.Sprintf("%x", shortened), "toReplace", fmt.Sprintf("%x", buf)) + return nil, fmt.Errorf("replacement not found for account %x", buf) } return shortened, nil - } - - if len(key) == length.Addr { - // Non-optimised key originating from a database record - auxBuf = append(auxBuf[:0], key...) - } else { - auxBuf, found = accounts.lookupByShortenedKey(key, aig) - if !found { - dt.d.logger.Crit("valTransform: lost account full key", - "shortened", fmt.Sprintf("%x", key), - "merging", accMerged, - "valBuf", fmt.Sprintf("l=%d %x", len(valBuf), valBuf), - ) - return nil, fmt.Errorf("lookup account full key: %x", key) - } - } - - shortened, found := accounts.findShortenedKey(auxBuf, ma, mergedAccount) - if !found { - if len(auxBuf) == length.Addr { - return auxBuf, nil // if plain key is lost, we can save original fullkey - } - dt.d.logger.Crit("valTransform: replacement for full account key was not found", - "step", fmt.Sprintf("%d-%d", keyFromTxNum/dt.d.aggregationStep, keyEndTxNum/dt.d.aggregationStep), - "shortened", fmt.Sprintf("%x", shortened), "toReplace", fmt.Sprintf("%x", auxBuf)) - return nil, fmt.Errorf("replacement not found for account %x", auxBuf) - } - return shortened, nil - } - - return commitment.BranchData(valBuf).ReplacePlainKeys(dt.comBuf[:0], replacer) + }) } } diff --git a/erigon-lib/state/domain_shared.go b/erigon-lib/state/domain_shared.go index 72971ba8a2b..27461fcce18 100644 --- a/erigon-lib/state/domain_shared.go +++ b/erigon-lib/state/domain_shared.go @@ -13,9 +13,6 @@ import ( "time" "unsafe" - "github.com/ledgerwatch/erigon-lib/common/cryptozerocopy" - "golang.org/x/crypto/sha3" - btree2 "github.com/tidwall/btree" "github.com/ledgerwatch/erigon-lib/commitment" @@ -57,7 +54,7 @@ func (l *KvList) Swap(i, j int) { type SharedDomains struct { noFlush int - aggTx *AggregatorRoTx + aggCtx *AggregatorRoTx sdCtx *SharedDomainsCommitmentContext roTx kv.Tx logger log.Logger @@ -79,30 +76,40 @@ type SharedDomains struct { tracesToWriter *invertedIndexBufferedWriter } -type HasAggTx interface { - AggTx() interface{} +type HasAggCtx interface { + AggCtx() interface{} } func NewSharedDomains(tx kv.Tx, logger log.Logger) (*SharedDomains, error) { - sd := &SharedDomains{ - logger: logger, - storage: btree2.NewMap[string, []byte](128), - //trace: true, + var ac *AggregatorRoTx + if casted, ok := tx.(HasAggCtx); ok { + ac = casted.AggCtx().(*AggregatorRoTx) + } else { + return nil, fmt.Errorf("type %T need AggCtx method", tx) + } + if tx == nil { + return nil, fmt.Errorf("tx is nil") } - sd.SetTx(tx) - sd.logAddrsWriter = sd.aggTx.logAddrs.NewWriter() - sd.logTopicsWriter = sd.aggTx.logTopics.NewWriter() - sd.tracesFromWriter = sd.aggTx.tracesFrom.NewWriter() - sd.tracesToWriter = sd.aggTx.tracesTo.NewWriter() + sd := &SharedDomains{ + logger: logger, + aggCtx: ac, + roTx: tx, + //trace: true, + logAddrsWriter: ac.logAddrs.NewWriter(), + logTopicsWriter: ac.logTopics.NewWriter(), + tracesFromWriter: ac.tracesFrom.NewWriter(), + tracesToWriter: ac.tracesTo.NewWriter(), - for id, d := range sd.aggTx.d { + storage: btree2.NewMap[string, []byte](128), + } + for id, d := range ac.d { sd.domains[id] = map[string][]byte{} sd.dWriter[id] = d.NewWriter() } sd.SetTxNum(0) - sd.sdCtx = NewSharedDomainsCommitmentContext(sd, commitment.ModeDirect, commitment.VariantHexPatriciaTrie) + sd.sdCtx = NewSharedDomainsCommitmentContext(sd, CommitmentModeDirect, commitment.VariantHexPatriciaTrie) if _, err := sd.SeekCommitment(context.Background(), tx); err != nil { return nil, fmt.Errorf("SeekCommitment: %w", err) @@ -110,39 +117,37 @@ func NewSharedDomains(tx kv.Tx, logger log.Logger) (*SharedDomains, error) { return sd, nil } -func (sd *SharedDomains) AggTx() interface{} { return sd.aggTx } +func (sd *SharedDomains) AggCtx() interface{} { return sd.aggCtx } -// aggregator context should call aggTx.Unwind before this one. +// aggregator context should call aggCtx.Unwind before this one. func (sd *SharedDomains) Unwind(ctx context.Context, rwTx kv.RwTx, blockUnwindTo, txUnwindTo uint64) error { - step := txUnwindTo / sd.aggTx.a.StepSize() + step := txUnwindTo / sd.aggCtx.a.StepSize() logEvery := time.NewTicker(30 * time.Second) defer logEvery.Stop() - sd.aggTx.a.logger.Info("aggregator unwind", "step", step, - "txUnwindTo", txUnwindTo, "stepsRangeInDB", sd.aggTx.a.StepsRangeInDBAsStr(rwTx)) - //fmt.Printf("aggregator unwind step %d txUnwindTo %d stepsRangeInDB %s\n", step, txUnwindTo, sd.aggTx.a.StepsRangeInDBAsStr(rwTx)) - sf := time.Now() - defer mxUnwindSharedTook.ObserveDuration(sf) + sd.aggCtx.a.logger.Info("aggregator unwind", "step", step, + "txUnwindTo", txUnwindTo, "stepsRangeInDB", sd.aggCtx.a.StepsRangeInDBAsStr(rwTx)) + //fmt.Printf("aggregator unwind step %d txUnwindTo %d stepsRangeInDB %s\n", step, txUnwindTo, sd.aggCtx.a.StepsRangeInDBAsStr(rwTx)) if err := sd.Flush(ctx, rwTx); err != nil { return err } withWarmup := false - for _, d := range sd.aggTx.d { + for _, d := range sd.aggCtx.d { if err := d.Unwind(ctx, rwTx, step, txUnwindTo); err != nil { return err } } - if _, err := sd.aggTx.logAddrs.Prune(ctx, rwTx, txUnwindTo, math.MaxUint64, math.MaxUint64, logEvery, true, withWarmup, nil); err != nil { + if _, err := sd.aggCtx.logAddrs.Prune(ctx, rwTx, txUnwindTo, math.MaxUint64, math.MaxUint64, logEvery, true, withWarmup, nil); err != nil { return err } - if _, err := sd.aggTx.logTopics.Prune(ctx, rwTx, txUnwindTo, math.MaxUint64, math.MaxUint64, logEvery, true, withWarmup, nil); err != nil { + if _, err := sd.aggCtx.logTopics.Prune(ctx, rwTx, txUnwindTo, math.MaxUint64, math.MaxUint64, logEvery, true, withWarmup, nil); err != nil { return err } - if _, err := sd.aggTx.tracesFrom.Prune(ctx, rwTx, txUnwindTo, math.MaxUint64, math.MaxUint64, logEvery, true, withWarmup, nil); err != nil { + if _, err := sd.aggCtx.tracesFrom.Prune(ctx, rwTx, txUnwindTo, math.MaxUint64, math.MaxUint64, logEvery, true, withWarmup, nil); err != nil { return err } - if _, err := sd.aggTx.tracesTo.Prune(ctx, rwTx, txUnwindTo, math.MaxUint64, math.MaxUint64, logEvery, true, withWarmup, nil); err != nil { + if _, err := sd.aggCtx.tracesTo.Prune(ctx, rwTx, txUnwindTo, math.MaxUint64, math.MaxUint64, logEvery, true, withWarmup, nil); err != nil { return err } @@ -153,31 +158,29 @@ func (sd *SharedDomains) Unwind(ctx context.Context, rwTx kv.RwTx, blockUnwindTo } func (sd *SharedDomains) rebuildCommitment(ctx context.Context, roTx kv.Tx, blockNum uint64) ([]byte, error) { - it, err := sd.aggTx.HistoryRange(kv.AccountsHistory, int(sd.TxNum()), math.MaxInt64, order.Asc, -1, roTx) + it, err := sd.aggCtx.AccountHistoryRange(int(sd.TxNum()), math.MaxInt64, order.Asc, -1, roTx) if err != nil { return nil, err } - defer it.Close() for it.HasNext() { k, _, err := it.Next() if err != nil { return nil, err } - sd.sdCtx.TouchKey(kv.AccountsDomain, string(k), nil) + sd.sdCtx.TouchPlainKey(string(k), nil, sd.sdCtx.TouchAccount) } - it, err = sd.aggTx.HistoryRange(kv.StorageHistory, int(sd.TxNum()), math.MaxInt64, order.Asc, -1, roTx) + it, err = sd.aggCtx.StorageHistoryRange(int(sd.TxNum()), math.MaxInt64, order.Asc, -1, roTx) if err != nil { return nil, err } - defer it.Close() for it.HasNext() { k, _, err := it.Next() if err != nil { return nil, err } - sd.sdCtx.TouchKey(kv.StorageDomain, string(k), nil) + sd.sdCtx.TouchPlainKey(string(k), nil, sd.sdCtx.TouchStorage) } sd.sdCtx.Reset() @@ -186,7 +189,7 @@ func (sd *SharedDomains) rebuildCommitment(ctx context.Context, roTx kv.Tx, bloc // SeekCommitment lookups latest available commitment and sets it as current func (sd *SharedDomains) SeekCommitment(ctx context.Context, tx kv.Tx) (txsFromBlockBeginning uint64, err error) { - bn, txn, ok, err := sd.sdCtx.SeekCommitment(tx, sd.aggTx.d[kv.CommitmentDomain], 0, math.MaxUint64) + bn, txn, ok, err := sd.sdCtx.SeekCommitment(tx, sd.aggCtx.d[kv.CommitmentDomain], 0, math.MaxUint64) if err != nil { return 0, err } @@ -243,7 +246,7 @@ func (sd *SharedDomains) SeekCommitment(ctx context.Context, tx kv.Tx) (txsFromB func (sd *SharedDomains) ClearRam(resetCommitment bool) { //sd.muMaps.Lock() //defer sd.muMaps.Unlock() - for i := range sd.domains { + for i, _ := range sd.domains { sd.domains[i] = map[string][]byte{} } if resetCommitment { @@ -302,7 +305,7 @@ func (sd *SharedDomains) LatestCommitment(prefix []byte) ([]byte, uint64, error) // sd cache values as is (without transformation) so safe to return return v, 0, nil } - v, step, found, err := sd.aggTx.d[kv.CommitmentDomain].getLatestFromDb(prefix, sd.roTx) + v, step, found, err := sd.aggCtx.d[kv.CommitmentDomain].getLatestFromDb(prefix, sd.roTx) if err != nil { return nil, 0, fmt.Errorf("commitment prefix %x read error: %w", prefix, err) } @@ -313,12 +316,12 @@ func (sd *SharedDomains) LatestCommitment(prefix []byte) ([]byte, uint64, error) // GetfromFiles doesn't provide same semantics as getLatestFromDB - it returns start/end tx // of file where the value is stored (not exact step when kv has been set) - v, _, startTx, endTx, err := sd.aggTx.d[kv.CommitmentDomain].getFromFiles(prefix) + v, _, startTx, endTx, err := sd.aggCtx.d[kv.CommitmentDomain].getFromFiles(prefix) if err != nil { return nil, 0, fmt.Errorf("commitment prefix %x read error: %w", prefix, err) } - if !sd.aggTx.a.commitmentValuesTransform || bytes.Equal(prefix, keyCommitmentState) { + if !sd.aggCtx.a.commitmentValuesTransform || bytes.Equal(prefix, keyCommitmentState) { return v, endTx, nil } @@ -327,42 +330,35 @@ func (sd *SharedDomains) LatestCommitment(prefix []byte) ([]byte, uint64, error) if err != nil { return nil, 0, err } - return rv, endTx / sd.aggTx.a.StepSize(), nil + return rv, endTx / sd.aggCtx.a.StepSize(), nil } // replaceShortenedKeysInBranch replaces shortened keys in the branch with full keys func (sd *SharedDomains) replaceShortenedKeysInBranch(prefix []byte, branch commitment.BranchData, fStartTxNum uint64, fEndTxNum uint64) (commitment.BranchData, error) { - if !sd.aggTx.d[kv.CommitmentDomain].d.replaceKeysInValues && sd.aggTx.a.commitmentValuesTransform { + if !sd.aggCtx.d[kv.CommitmentDomain].d.replaceKeysInValues && sd.aggCtx.a.commitmentValuesTransform { panic("domain.replaceKeysInValues is disabled, but agg.commitmentValuesTransform is enabled") } - if !sd.aggTx.a.commitmentValuesTransform || + if !sd.aggCtx.a.commitmentValuesTransform || len(branch) == 0 || - sd.aggTx.minimaxTxNumInDomainFiles(false) == 0 || + sd.aggCtx.minimaxTxNumInDomainFiles(false) == 0 || bytes.Equal(prefix, keyCommitmentState) { return branch, nil // do not transform, return as is } - sto := sd.aggTx.d[kv.StorageDomain] - acc := sd.aggTx.d[kv.AccountsDomain] - storageItem := sto.lookupFileByItsRange(fStartTxNum, fEndTxNum) - accountItem := acc.lookupFileByItsRange(fStartTxNum, fEndTxNum) - storageGetter := NewArchiveGetter(storageItem.decompressor.MakeGetter(), sto.d.compression) - accountGetter := NewArchiveGetter(accountItem.decompressor.MakeGetter(), acc.d.compression) - - aux := make([]byte, 0, 256) - return branch.ReplacePlainKeys(aux, func(key []byte, isStorage bool) ([]byte, error) { + return branch.ReplacePlainKeys(nil, func(key []byte, isStorage bool) ([]byte, error) { if isStorage { if len(key) == length.Addr+length.Hash { return nil, nil // save storage key as is } // Optimised key referencing a state file record (file number and offset within the file) - storagePlainKey, found := sto.lookupByShortenedKey(key, storageGetter) + storagePlainKey, found := sd.aggCtx.d[kv.StorageDomain].lookupByShortenedKey(key, fStartTxNum, fEndTxNum) if !found { - s0, s1 := fStartTxNum/sd.aggTx.a.StepSize(), fEndTxNum/sd.aggTx.a.StepSize() + s0, s1 := fStartTxNum/sd.aggCtx.a.StepSize(), fEndTxNum/sd.aggCtx.a.StepSize() + oft := decodeShorterKey(key) sd.logger.Crit("replace back lost storage full key", "shortened", fmt.Sprintf("%x", key), - "decoded", fmt.Sprintf("step %d-%d; offt %d", s0, s1, decodeShorterKey(key))) + "decoded", fmt.Sprintf("step %d-%d; offt %d", s0, s1, oft)) return nil, fmt.Errorf("replace back lost storage full key: %x", key) } return storagePlainKey, nil @@ -372,11 +368,12 @@ func (sd *SharedDomains) replaceShortenedKeysInBranch(prefix []byte, branch comm return nil, nil // save account key as is } - apkBuf, found := acc.lookupByShortenedKey(key, accountGetter) + apkBuf, found := sd.aggCtx.d[kv.AccountsDomain].lookupByShortenedKey(key, fStartTxNum, fEndTxNum) if !found { - s0, s1 := fStartTxNum/sd.aggTx.a.StepSize(), fEndTxNum/sd.aggTx.a.StepSize() + oft := decodeShorterKey(key) + s0, s1 := fStartTxNum/sd.aggCtx.a.StepSize(), fEndTxNum/sd.aggCtx.a.StepSize() sd.logger.Crit("replace back lost account full key", "shortened", fmt.Sprintf("%x", key), - "decoded", fmt.Sprintf("step %d-%d; offt %d", s0, s1, decodeShorterKey(key))) + "decoded", fmt.Sprintf("step %d-%d; offt %d", s0, s1, oft)) return nil, fmt.Errorf("replace back lost account full key: %x", key) } return apkBuf, nil @@ -437,14 +434,14 @@ func (sd *SharedDomains) ReadsValid(readLists map[string]*KvList) bool { func (sd *SharedDomains) updateAccountData(addr []byte, account, prevAccount []byte, prevStep uint64) error { addrS := string(addr) - sd.sdCtx.TouchKey(kv.AccountsDomain, addrS, account) + sd.sdCtx.TouchPlainKey(addrS, account, sd.sdCtx.TouchAccount) sd.put(kv.AccountsDomain, addrS, account) return sd.dWriter[kv.AccountsDomain].PutWithPrev(addr, nil, account, prevAccount, prevStep) } func (sd *SharedDomains) updateAccountCode(addr, code, prevCode []byte, prevStep uint64) error { addrS := string(addr) - sd.sdCtx.TouchKey(kv.CodeDomain, addrS, code) + sd.sdCtx.TouchPlainKey(addrS, code, sd.sdCtx.TouchCode) sd.put(kv.CodeDomain, addrS, code) if len(code) == 0 { return sd.dWriter[kv.CodeDomain].DeleteWithPrev(addr, nil, prevCode, prevStep) @@ -468,7 +465,7 @@ func (sd *SharedDomains) deleteAccount(addr, prev []byte, prevStep uint64) error return err } - sd.sdCtx.TouchKey(kv.AccountsDomain, addrS, nil) + sd.sdCtx.TouchPlainKey(addrS, nil, sd.sdCtx.TouchAccount) sd.put(kv.AccountsDomain, addrS, nil) if err := sd.dWriter[kv.AccountsDomain].DeleteWithPrev(addr, nil, prev, prevStep); err != nil { return err @@ -484,7 +481,7 @@ func (sd *SharedDomains) writeAccountStorage(addr, loc []byte, value, preVal []b composite = append(append(composite, addr...), loc...) } compositeS := string(composite) - sd.sdCtx.TouchKey(kv.StorageDomain, compositeS, value) + sd.sdCtx.TouchPlainKey(compositeS, value, sd.sdCtx.TouchStorage) sd.put(kv.StorageDomain, compositeS, value) return sd.dWriter[kv.StorageDomain].PutWithPrev(composite, nil, value, preVal, prevStep) } @@ -495,7 +492,7 @@ func (sd *SharedDomains) delAccountStorage(addr, loc []byte, preVal []byte, prev composite = append(append(composite, addr...), loc...) } compositeS := string(composite) - sd.sdCtx.TouchKey(kv.StorageDomain, compositeS, nil) + sd.sdCtx.TouchPlainKey(compositeS, nil, sd.sdCtx.TouchStorage) sd.put(kv.StorageDomain, compositeS, nil) return sd.dWriter[kv.StorageDomain].DeleteWithPrev(composite, nil, preVal, prevStep) } @@ -516,24 +513,8 @@ func (sd *SharedDomains) IndexAdd(table kv.InvertedIdx, key []byte) (err error) return err } -func (sd *SharedDomains) SetTx(tx kv.Tx) { - if tx == nil { - panic(fmt.Errorf("tx is nil")) - } - sd.roTx = tx - - casted, ok := tx.(HasAggTx) - if !ok { - panic(fmt.Errorf("type %T need AggTx method", tx)) - } - - sd.aggTx = casted.AggTx().(*AggregatorRoTx) - if sd.aggTx == nil { - panic(fmt.Errorf("aggtx is nil")) - } -} - -func (sd *SharedDomains) StepSize() uint64 { return sd.aggTx.a.StepSize() } +func (sd *SharedDomains) SetTx(tx kv.RwTx) { sd.roTx = tx } +func (sd *SharedDomains) StepSize() uint64 { return sd.aggCtx.a.StepSize() } // SetTxNum sets txNum for all domains as well as common txNum for all domains // Requires for sd.rwTx because of commitment evaluation in shared domains if aggregationStep is reached @@ -604,7 +585,7 @@ func (sd *SharedDomains) IterateStoragePrefix(prefix []byte, it func(k []byte, v } roTx := sd.roTx - keysCursor, err := roTx.CursorDupSort(sd.aggTx.a.d[kv.StorageDomain].keysTable) + keysCursor, err := roTx.CursorDupSort(sd.aggCtx.a.d[kv.StorageDomain].keysTable) if err != nil { return err } @@ -622,13 +603,13 @@ func (sd *SharedDomains) IterateStoragePrefix(prefix []byte, it func(k []byte, v keySuffix := make([]byte, len(k)+8) copy(keySuffix, k) copy(keySuffix[len(k):], v) - if v, err = roTx.GetOne(sd.aggTx.a.d[kv.StorageDomain].valsTable, keySuffix); err != nil { + if v, err = roTx.GetOne(sd.aggCtx.a.d[kv.StorageDomain].valsTable, keySuffix); err != nil { return err } heap.Push(cpPtr, &CursorItem{t: DB_CURSOR, key: common.Copy(k), val: common.Copy(v), step: step, c: keysCursor, endTxNum: endTxNum, reverse: true}) } - sctx := sd.aggTx.d[kv.StorageDomain] + sctx := sd.aggCtx.d[kv.StorageDomain] for i, item := range sctx.files { cursor, err := item.src.bindex.Seek(sctx.statelessGetter(i), prefix) if err != nil { @@ -702,7 +683,7 @@ func (sd *SharedDomains) IterateStoragePrefix(prefix []byte, it func(k []byte, v keySuffix := make([]byte, len(k)+8) copy(keySuffix, k) copy(keySuffix[len(k):], v) - if v, err = roTx.GetOne(sd.aggTx.a.d[kv.StorageDomain].valsTable, keySuffix); err != nil { + if v, err = roTx.GetOne(sd.aggCtx.a.d[kv.StorageDomain].valsTable, keySuffix); err != nil { return err } ci1.val = common.Copy(v) @@ -722,7 +703,7 @@ func (sd *SharedDomains) IterateStoragePrefix(prefix []byte, it func(k []byte, v func (sd *SharedDomains) Close() { sd.SetBlockNum(0) - if sd.aggTx != nil { + if sd.aggCtx != nil { sd.SetTxNum(0) //sd.walLock.Lock() @@ -737,7 +718,8 @@ func (sd *SharedDomains) Close() { } if sd.sdCtx != nil { - sd.sdCtx.Close() + sd.sdCtx.updates.keys = nil + sd.sdCtx.updates.tree.Clear(true) } } @@ -754,7 +736,7 @@ func (sd *SharedDomains) Flush(ctx context.Context, tx kv.RwTx) error { } if sd.trace { _, f, l, _ := runtime.Caller(1) - fmt.Printf("[SD aggTx=%d] FLUSHING at tx %d [%x], caller %s:%d\n", sd.aggTx.id, sd.TxNum(), fh, filepath.Base(f), l) + fmt.Printf("[SD aggCtx=%d] FLUSHING at tx %d [%x], caller %s:%d\n", sd.aggCtx.id, sd.TxNum(), fh, filepath.Base(f), l) } for _, d := range sd.dWriter { if d != nil { @@ -776,7 +758,7 @@ func (sd *SharedDomains) Flush(ctx context.Context, tx kv.RwTx) error { return err } if dbg.PruneOnFlushTimeout != 0 { - _, err = sd.aggTx.PruneSmallBatches(ctx, dbg.PruneOnFlushTimeout, tx) + _, err = sd.aggCtx.PruneSmallBatches(ctx, dbg.PruneOnFlushTimeout, tx) if err != nil { return err } @@ -807,7 +789,7 @@ func (sd *SharedDomains) DomainGet(domain kv.Domain, k, k2 []byte) (v []byte, st if v, ok := sd.get(domain, k); ok { return v, 0, nil } - v, step, _, err = sd.aggTx.GetLatest(domain, k, nil, sd.roTx) + v, step, _, err = sd.aggCtx.GetLatest(domain, k, nil, sd.roTx) if err != nil { return nil, 0, fmt.Errorf("storage %x read error: %w", k, err) } @@ -919,44 +901,39 @@ func (sd *SharedDomains) Tx() kv.Tx { return sd.roTx } type SharedDomainsCommitmentContext struct { sd *SharedDomains discard bool - mode commitment.Mode - branches map[string]cachedBranch - keccak cryptozerocopy.KeccakState - updates *commitment.UpdateTree + updates *UpdateTree + mode CommitmentMode + branchCache map[string]cachedBranch patriciaTrie commitment.Trie justRestored atomic.Bool } -func NewSharedDomainsCommitmentContext(sd *SharedDomains, mode commitment.Mode, trieVariant commitment.TrieVariant) *SharedDomainsCommitmentContext { +func NewSharedDomainsCommitmentContext(sd *SharedDomains, mode CommitmentMode, trieVariant commitment.TrieVariant) *SharedDomainsCommitmentContext { ctx := &SharedDomainsCommitmentContext{ - sd: sd, - mode: mode, - discard: dbg.DiscardCommitment(), - branches: make(map[string]cachedBranch), - keccak: sha3.NewLegacyKeccak256().(cryptozerocopy.KeccakState), + sd: sd, + mode: mode, + updates: NewUpdateTree(mode), + discard: dbg.DiscardCommitment(), + patriciaTrie: commitment.InitializeTrie(trieVariant), + branchCache: make(map[string]cachedBranch), } - ctx.patriciaTrie, ctx.updates = commitment.InitializeTrieAndUpdateTree(trieVariant, mode, sd.aggTx.a.tmpdir) ctx.patriciaTrie.ResetContext(ctx) return ctx } -func (sdc *SharedDomainsCommitmentContext) Close() { - sdc.updates.Close() -} - type cachedBranch struct { data []byte step uint64 } -// ResetBranchCache should be called after each commitment computation +// Cache should ResetBranchCache after each commitment computation func (sdc *SharedDomainsCommitmentContext) ResetBranchCache() { - clear(sdc.branches) + sdc.branchCache = make(map[string]cachedBranch) } func (sdc *SharedDomainsCommitmentContext) GetBranch(pref []byte) ([]byte, uint64, error) { - cached, ok := sdc.branches[string(pref)] + cached, ok := sdc.branchCache[string(pref)] if ok { // cached value is already transformed/clean to read. // Cache should ResetBranchCache after each commitment computation @@ -970,13 +947,12 @@ func (sdc *SharedDomainsCommitmentContext) GetBranch(pref []byte) ([]byte, uint6 if sdc.sd.trace { fmt.Printf("[SDC] GetBranch: %x: %x\n", pref, v) } - // Trie reads prefix during unfold and after everything is ready reads it again to Merge update, if any, so - // cache branch until ResetBranchCache called - sdc.branches[string(pref)] = cachedBranch{data: v, step: step} - if len(v) == 0 { return nil, 0, nil } + // Trie reads prefix during unfold and after everything is ready reads it again to Merge update, if any, so + // cache branch until ResetBranchCache called + sdc.branchCache[string(pref)] = cachedBranch{data: v, step: step} return v, step, nil } @@ -984,7 +960,7 @@ func (sdc *SharedDomainsCommitmentContext) PutBranch(prefix []byte, data []byte, if sdc.sd.trace { fmt.Printf("[SDC] PutBranch: %x: %x\n", prefix, data) } - sdc.branches[string(prefix)] = cachedBranch{data: data, step: prevStep} + sdc.branchCache[string(prefix)] = cachedBranch{data: data, step: prevStep} return sdc.sd.updateCommitmentData(prefix, data, prevData, prevStep) } @@ -1002,10 +978,7 @@ func (sdc *SharedDomainsCommitmentContext) GetAccount(plainKey []byte, cell *com if len(chash) > 0 { copy(cell.CodeHash[:], chash) } - } - if bytes.Equal(cell.CodeHash[:], commitment.EmptyCodeHash) { - cell.Delete = len(encAccount) == 0 - return nil + //fmt.Printf("GetAccount: %x: n=%d b=%d ch=%x\n", plainKey, nonce, balance, chash) } code, _, err := sdc.sd.DomainGet(kv.CodeDomain, plainKey, nil) @@ -1013,9 +986,10 @@ func (sdc *SharedDomainsCommitmentContext) GetAccount(plainKey []byte, cell *com return fmt.Errorf("GetAccount: failed to read latest code: %w", err) } if len(code) > 0 { - sdc.keccak.Reset() - sdc.keccak.Write(code) - sdc.keccak.Read(cell.CodeHash[:]) + //fmt.Printf("GetAccount: code %x - %x\n", plainKey, code) + sdc.updates.keccak.Reset() + sdc.updates.keccak.Write(code) + sdc.updates.keccak.Read(cell.CodeHash[:]) } else { cell.CodeHash = commitment.EmptyCodeHashArray } @@ -1029,6 +1003,9 @@ func (sdc *SharedDomainsCommitmentContext) GetStorage(plainKey []byte, cell *com if err != nil { return err } + //if sdc.sd.trace { + // fmt.Printf("[SDC] GetStorage: %x - %x\n", plainKey, enc) + //} cell.StorageLen = len(enc) copy(cell.Storage[:], enc) cell.Delete = cell.StorageLen == 0 @@ -1042,49 +1019,57 @@ func (sdc *SharedDomainsCommitmentContext) Reset() { } func (sdc *SharedDomainsCommitmentContext) TempDir() string { - return sdc.sd.aggTx.a.dirs.Tmp + return sdc.sd.aggCtx.a.dirs.Tmp } -func (sdc *SharedDomainsCommitmentContext) KeysCount() uint64 { - return sdc.updates.Size() -} +//func (ctx *SharedDomainsCommitmentContext) Hasher() hash.Hash { return ctx.updates.keccak } +// +//func (ctx *SharedDomainsCommitmentContext) SetCommitmentMode(m CommitmentMode) { ctx.mode = m } +// // TouchPlainKey marks plainKey as updated and applies different fn for different key types // (different behaviour for Code, Account and Storage key modifications). -func (sdc *SharedDomainsCommitmentContext) TouchKey(d kv.Domain, key string, val []byte) { +func (sdc *SharedDomainsCommitmentContext) TouchPlainKey(key string, val []byte, fn func(c *commitmentItem, val []byte)) { if sdc.discard { return } - ks := []byte(key) - switch d { - case kv.AccountsDomain: - sdc.updates.TouchPlainKey(ks, val, sdc.updates.TouchAccount) - case kv.CodeDomain: - sdc.updates.TouchPlainKey(ks, val, sdc.updates.TouchCode) - case kv.StorageDomain: - sdc.updates.TouchPlainKey(ks, val, sdc.updates.TouchStorage) - default: - panic(fmt.Errorf("TouchKey: unknown domain %s", d)) - } + sdc.updates.TouchPlainKey(key, val, fn) +} + +func (sdc *SharedDomainsCommitmentContext) KeysCount() uint64 { + return sdc.updates.Size() +} + +func (sdc *SharedDomainsCommitmentContext) TouchAccount(c *commitmentItem, val []byte) { + sdc.updates.TouchAccount(c, val) +} + +func (sdc *SharedDomainsCommitmentContext) TouchStorage(c *commitmentItem, val []byte) { + sdc.updates.TouchStorage(c, val) +} + +func (sdc *SharedDomainsCommitmentContext) TouchCode(c *commitmentItem, val []byte) { + sdc.updates.TouchCode(c, val) } // Evaluates commitment for processed state. -func (sdc *SharedDomainsCommitmentContext) ComputeCommitment(ctx context.Context, saveState bool, blockNum uint64, logPrefix string) (rootHash []byte, err error) { +func (sdc *SharedDomainsCommitmentContext) ComputeCommitment(ctext context.Context, saveState bool, blockNum uint64, logPrefix string) (rootHash []byte, err error) { defer sdc.ResetBranchCache() if dbg.DiscardCommitment() { sdc.updates.List(true) return nil, nil } - mxCommitmentRunning.Inc() defer mxCommitmentRunning.Dec() defer func(s time.Time) { mxCommitmentTook.ObserveDuration(s) }(time.Now()) - updateCount := sdc.updates.Size() + touchedKeys, updates := sdc.updates.List(true) if sdc.sd.trace { - defer sdc.sd.logger.Trace("ComputeCommitment", "block", blockNum, "keys", updateCount, "mode", sdc.mode) + defer func() { + fmt.Printf("[SDC] rootHash %x block %d keys %d mode %s\n", rootHash, blockNum, len(touchedKeys), sdc.mode) + }() } - if updateCount == 0 { + if len(touchedKeys) == 0 { rootHash, err = sdc.patriciaTrie.RootHash() return rootHash, err } @@ -1094,18 +1079,17 @@ func (sdc *SharedDomainsCommitmentContext) ComputeCommitment(ctx context.Context sdc.Reset() switch sdc.mode { - case commitment.ModeDirect: - rootHash, err = sdc.patriciaTrie.ProcessTree(ctx, sdc.updates, logPrefix) + case CommitmentModeDirect: + rootHash, err = sdc.patriciaTrie.ProcessKeys(ctext, touchedKeys, logPrefix) if err != nil { return nil, err } - case commitment.ModeUpdate: - touchedKeys, updates := sdc.updates.List(true) - rootHash, err = sdc.patriciaTrie.ProcessUpdates(ctx, touchedKeys, updates) + case CommitmentModeUpdate: + rootHash, err = sdc.patriciaTrie.ProcessUpdates(ctext, touchedKeys, updates) if err != nil { return nil, err } - case commitment.ModeDisabled: + case CommitmentModeDisabled: return nil, nil default: return nil, fmt.Errorf("invalid commitment mode: %s", sdc.mode) @@ -1122,7 +1106,7 @@ func (sdc *SharedDomainsCommitmentContext) ComputeCommitment(ctx context.Context } func (sdc *SharedDomainsCommitmentContext) storeCommitmentState(blockNum uint64, rh []byte) error { - if sdc.sd.aggTx == nil { + if sdc.sd.aggCtx == nil { return fmt.Errorf("store commitment state: AggregatorContext is not initialized") } encodedState, err := sdc.encodeCommitmentState(blockNum, sdc.sd.txNum) @@ -1175,7 +1159,7 @@ func (sdc *SharedDomainsCommitmentContext) encodeCommitmentState(blockNum, txNum var keyCommitmentState = []byte("state") func (sd *SharedDomains) LatestCommitmentState(tx kv.Tx, sinceTx, untilTx uint64) (blockNum, txNum uint64, state []byte, err error) { - return sd.sdCtx.LatestCommitmentState(tx, sd.aggTx.d[kv.CommitmentDomain], sinceTx, untilTx) + return sd.sdCtx.LatestCommitmentState(tx, sd.aggCtx.d[kv.CommitmentDomain], sinceTx, untilTx) } func _decodeTxBlockNums(v []byte) (txNum, blockNum uint64) { @@ -1223,7 +1207,7 @@ func (sdc *SharedDomainsCommitmentContext) LatestCommitmentState(tx kv.Tx, cd *D } txn, _ := _decodeTxBlockNums(value) - //fmt.Printf("[commitment] seekInFiles found committed txn %d block %d\n", txn, bn) + //fmt.Printf("[commitment] Seek found committed txn %d block %d\n", txn, bn) if txn >= sinceTx && txn <= untilTx { state = value } diff --git a/erigon-lib/state/domain_shared_bench_test.go b/erigon-lib/state/domain_shared_bench_test.go index e4890f8f1ae..ed44bf58a57 100644 --- a/erigon-lib/state/domain_shared_bench_test.go +++ b/erigon-lib/state/domain_shared_bench_test.go @@ -85,11 +85,12 @@ func Benchmark_SharedDomains_GetLatest(t *testing.B) { require.NoError(t, err) } } - + //}) + //t.Run("GetHistory", func(t *testing.B) { for ik := 0; ik < t.N; ik++ { for i := 0; i < len(keys); i++ { ts := uint64(rnd.Intn(int(maxTx))) - v, ok, err := ac2.HistorySeek(kv.AccountsHistory, keys[i], ts, rwTx) + v, ok, err := ac2.HistoryGet(kv.AccountsHistory, keys[i], ts, rwTx) require.True(t, ok) require.NotNil(t, v) @@ -97,47 +98,6 @@ func Benchmark_SharedDomains_GetLatest(t *testing.B) { require.NoError(t, err) } } -} - -func BenchmarkSharedDomains_ComputeCommitment(b *testing.B) { - b.StopTimer() - - stepSize := uint64(100) - db, agg := testDbAndAggregatorBench(b, stepSize) - - ctx := context.Background() - rwTx, err := db.BeginRw(ctx) - require.NoError(b, err) - defer rwTx.Rollback() - - ac := agg.BeginFilesRo() - defer ac.Close() - - domains, err := NewSharedDomains(WrapTxWithCtx(rwTx, ac), log.New()) - require.NoError(b, err) - defer domains.Close() - - maxTx := stepSize * 17 - data := generateTestDataForDomainCommitment(b, length.Addr, length.Addr+length.Hash, maxTx, 15, 100) - require.NotNil(b, data) - - for domName, d := range data { - fom := kv.AccountsDomain - if domName == "storage" { - fom = kv.StorageDomain - } - for key, upd := range d { - for _, u := range upd { - domains.SetTxNum(u.txNum) - err := domains.DomainPut(fom, []byte(key), nil, u.value, nil, 0) - require.NoError(b, err) - } - } - } + //}) - b.StartTimer() - for i := 0; i < b.N; i++ { - _, err := domains.ComputeCommitment(ctx, true, domains.BlockNum(), "") - require.NoError(b, err) - } } diff --git a/erigon-lib/state/domain_shared_test.go b/erigon-lib/state/domain_shared_test.go index 51fb46ae3bf..5e26a655c96 100644 --- a/erigon-lib/state/domain_shared_test.go +++ b/erigon-lib/state/domain_shared_test.go @@ -4,14 +4,15 @@ import ( "context" "encoding/binary" "fmt" + "math/rand" + "testing" + "time" + "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" "github.com/ledgerwatch/log/v3" "github.com/stretchr/testify/require" - "math/rand" - "testing" - "time" "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/types" @@ -424,11 +425,10 @@ func TestSharedDomain_StorageIter(t *testing.T) { ac.Close() ac = agg.BeginFilesRo() - //err = db.Update(ctx, func(tx kv.RwTx) error { - // _, err = ac.PruneSmallBatches(ctx, 1*time.Minute, tx) - // return err - //}) - _, err = ac.PruneSmallBatchesDb(ctx, 1*time.Minute, db) + err = db.Update(ctx, func(tx kv.RwTx) error { + _, err = ac.PruneSmallBatches(ctx, 1*time.Minute, tx) + return err + }) require.NoError(t, err) ac.Close() diff --git a/erigon-lib/state/domain_test.go b/erigon-lib/state/domain_test.go index a9d9393b7bf..43643643890 100644 --- a/erigon-lib/state/domain_test.go +++ b/erigon-lib/state/domain_test.go @@ -1435,10 +1435,8 @@ func generateTestDataForDomainCommitment(tb testing.TB, keySize1, keySize2, tota key1 := generateRandomKey(r, keySize1) accs[key1] = generateAccountUpdates(r, totalTx, keyTxsLimit) key2 := key1 + generateRandomKey(r, keySize2-keySize1) - stor[key2] = generateArbitraryValueUpdates(r, totalTx, keyTxsLimit, 32) + stor[key2] = generateStorageUpdates(r, totalTx, keyTxsLimit) } - doms["accounts"] = accs - doms["storage"] = stor return doms } @@ -1496,15 +1494,14 @@ func generateAccountUpdates(r *rand.Rand, totalTx, keyTxsLimit uint64) []upd { return updates } -func generateArbitraryValueUpdates(r *rand.Rand, totalTx, keyTxsLimit, maxSize uint64) []upd { +func generateStorageUpdates(r *rand.Rand, totalTx, keyTxsLimit uint64) []upd { updates := make([]upd, 0) usedTxNums := make(map[uint64]bool) - //maxStorageSize := 24 * (1 << 10) // limit on contract code for i := uint64(0); i < keyTxsLimit; i++ { txNum := generateRandomTxNum(r, totalTx, usedTxNums) - value := make([]byte, r.Intn(int(maxSize))) + value := make([]byte, r.Intn(24*(1<<10))) r.Read(value) updates = append(updates, upd{txNum: txNum, value: value}) @@ -1621,6 +1618,7 @@ func TestDomain_CanPruneAfterAggregation(t *testing.T) { d.historyLargeValues = false d.History.compression = CompressKeys | CompressVals d.compression = CompressKeys | CompressVals + d.withExistenceIndex = true dc := d.BeginFilesRo() defer dc.Close() @@ -2512,9 +2510,7 @@ func TestDomainContext_findShortenedKey(t *testing.T) { lastFile := findFile(st, en) require.NotNilf(t, lastFile, "%d-%d", st/dc.d.aggregationStep, en/dc.d.aggregationStep) - lf := NewArchiveGetter(lastFile.decompressor.MakeGetter(), d.compression) - - shortenedKey, found := dc.findShortenedKey([]byte(key), lf, lastFile) + shortenedKey, found := dc.findShortenedKey([]byte(key), lastFile) require.Truef(t, found, "key %d/%d %x file %d %d %s", ki, len(data), []byte(key), lastFile.startTxNum, lastFile.endTxNum, lastFile.decompressor.FileName()) require.NotNil(t, shortenedKey) ki++ diff --git a/erigon-lib/state/history.go b/erigon-lib/state/history.go index 62383165a33..9c7f77a147a 100644 --- a/erigon-lib/state/history.go +++ b/erigon-lib/state/history.go @@ -123,7 +123,7 @@ func NewHistory(cfg histCfg, aggregationStep uint64, filenameBase, indexKeysTabl } h._visibleFiles = []ctxItem{} var err error - h.InvertedIndex, err = NewInvertedIndex(cfg.iiCfg, aggregationStep, filenameBase, indexKeysTable, indexTable, func(fromStep, toStep uint64) bool { return dir.FileExist(h.vFilePath(fromStep, toStep)) }, logger) + h.InvertedIndex, err = NewInvertedIndex(cfg.iiCfg, aggregationStep, filenameBase, indexKeysTable, indexTable, cfg.withExistenceIndex, func(fromStep, toStep uint64) bool { return dir.FileExist(h.vFilePath(fromStep, toStep)) }, logger) if err != nil { return nil, fmt.Errorf("NewHistory: %s, %w", filenameBase, err) } @@ -337,13 +337,15 @@ func (h *History) buildVI(ctx context.Context, historyIdxPath string, hist, efHi TmpDir: h.dirs.Tmp, IndexFile: historyIdxPath, Salt: h.salt, - NoFsync: h.noFsync, }, h.logger) if err != nil { return "", fmt.Errorf("create recsplit: %w", err) } defer rs.Close() rs.LogLvl(log.LvlTrace) + if h.noFsync { + rs.DisableFsync() + } var historyKey []byte var txKey [8]byte @@ -513,7 +515,7 @@ func (ht *HistoryRoTx) newWriter(tmpdir string, discard bool) *historyBufferedWr historyKey: make([]byte, 128), largeValues: ht.h.historyLargeValues, historyValsTable: ht.h.historyValsTable, - historyVals: etl.NewCollector("flush "+ht.h.historyValsTable, tmpdir, etl.NewSortableBuffer(WALCollectorRAM), ht.h.logger), + historyVals: etl.NewCollector(ht.h.historyValsTable, tmpdir, etl.NewSortableBuffer(WALCollectorRAM), ht.h.logger), ii: ht.iit.newWriter(tmpdir, discard), } @@ -583,7 +585,7 @@ func (h *History) collate(ctx context.Context, step, txFrom, txTo uint64, roTx k } }() - comp, err := seg.NewCompressor(ctx, "collate hist "+h.filenameBase, historyPath, h.dirs.Tmp, seg.MinPatternScore, h.compressWorkers, log.LvlTrace, h.logger) + comp, err := seg.NewCompressor(ctx, "collate history", historyPath, h.dirs.Tmp, seg.MinPatternScore, h.compressWorkers, log.LvlTrace, h.logger) if err != nil { return HistoryCollation{}, fmt.Errorf("create %s history compressor: %w", h.filenameBase, err) } @@ -596,7 +598,7 @@ func (h *History) collate(ctx context.Context, step, txFrom, txTo uint64, roTx k defer keysCursor.Close() binary.BigEndian.PutUint64(txKey[:], txFrom) - collector := etl.NewCollector("collate hist "+h.filenameBase, h.iiCfg.dirs.Tmp, etl.NewSortableBuffer(CollateETLRAM), h.logger) + collector := etl.NewCollector(h.historyValsTable, h.iiCfg.dirs.Tmp, etl.NewSortableBuffer(CollateETLRAM), h.logger) defer collector.Close() for txnmb, k, err := keysCursor.Seek(txKey[:]); err == nil && txnmb != nil; txnmb, k, err = keysCursor.Next() { @@ -634,7 +636,7 @@ func (h *History) collate(ctx context.Context, step, txFrom, txTo uint64, roTx k defer cd.Close() } - efComp, err := seg.NewCompressor(ctx, "collate idx "+h.filenameBase, efHistoryPath, h.dirs.Tmp, seg.MinPatternScore, h.compressWorkers, log.LvlTrace, h.logger) + efComp, err := seg.NewCompressor(ctx, "ef history", efHistoryPath, h.dirs.Tmp, seg.MinPatternScore, h.compressWorkers, log.LvlTrace, h.logger) if err != nil { return HistoryCollation{}, fmt.Errorf("create %s ef history compressor: %w", h.filenameBase, err) } @@ -849,6 +851,12 @@ func (h *History) buildFiles(ctx context.Context, step uint64, collation History return HistoryFiles{}, err } } + if h.InvertedIndex.withExistenceIndex { + existenceIdxPath := h.efExistenceIdxFilePath(step, step+1) + if efExistence, err = buildIndexFilterThenOpen(ctx, efHistoryDecomp, h.compression, existenceIdxPath, h.dirs.Tmp, h.salt, ps, h.logger, h.noFsync); err != nil { + return HistoryFiles{}, fmt.Errorf("build %s .ef history idx: %w", h.filenameBase, err) + } + } historyDecomp, err = seg.NewDecompressor(collation.historyPath) if err != nil { @@ -1149,10 +1157,10 @@ func (ht *HistoryRoTx) getFile(txNum uint64) (it ctxItem, ok bool) { return it, false } -func (ht *HistoryRoTx) historySeekInFiles(key []byte, txNum uint64) ([]byte, bool, error) { +func (ht *HistoryRoTx) GetNoState(key []byte, txNum uint64) ([]byte, bool, error) { // Files list of II and History is different // it means II can't return index of file, but can return TxNum which History will use to find own file - ok, histTxNum := ht.iit.seekInFiles(key, txNum) + ok, histTxNum := ht.iit.Seek(key, txNum) if !ok { return nil, false, nil } @@ -1179,7 +1187,7 @@ func (ht *HistoryRoTx) historySeekInFiles(key []byte, txNum uint64) ([]byte, boo } func (hs *HistoryStep) GetNoState(key []byte, txNum uint64) ([]byte, bool, uint64) { - //fmt.Printf("historySeekInFiles [%x] %d\n", key, txNum) + //fmt.Printf("GetNoState [%x] %d\n", key, txNum) if hs.indexFile.reader.Empty() { return nil, false, txNum } @@ -1244,10 +1252,10 @@ func (ht *HistoryRoTx) encodeTs(txNum uint64) []byte { return ht._bufTs } -// HistorySeek searches history for a value of specified key before txNum +// GetNoStateWithRecent searches history for a value of specified key before txNum // second return value is true if the value is found in the history (even if it is nil) -func (ht *HistoryRoTx) HistorySeek(key []byte, txNum uint64, roTx kv.Tx) ([]byte, bool, error) { - v, ok, err := ht.historySeekInFiles(key, txNum) +func (ht *HistoryRoTx) GetNoStateWithRecent(key []byte, txNum uint64, roTx kv.Tx) ([]byte, bool, error) { + v, ok, err := ht.GetNoState(key, txNum) if err != nil { return nil, ok, err } @@ -1255,7 +1263,7 @@ func (ht *HistoryRoTx) HistorySeek(key []byte, txNum uint64, roTx kv.Tx) ([]byte return v, true, nil } - return ht.historySeekInDB(key, txNum, roTx) + return ht.getNoStateFromDB(key, txNum, roTx) } func (ht *HistoryRoTx) valsCursor(tx kv.Tx) (c kv.Cursor, err error) { @@ -1279,7 +1287,7 @@ func (ht *HistoryRoTx) valsCursorDup(tx kv.Tx) (c kv.CursorDupSort, err error) { return ht.valsCDup, nil } -func (ht *HistoryRoTx) historySeekInDB(key []byte, txNum uint64, tx kv.Tx) ([]byte, bool, error) { +func (ht *HistoryRoTx) getNoStateFromDB(key []byte, txNum uint64, tx kv.Tx) ([]byte, bool, error) { if ht.h.historyLargeValues { c, err := ht.valsCursor(tx) if err != nil { @@ -1393,7 +1401,7 @@ func (hi *StateAsOfIterF) advanceInFiles() error { } } - if hi.from != nil && bytes.Compare(key, hi.from) < 0 { //TODO: replace by seekInFiles() + if hi.from != nil && bytes.Compare(key, hi.from) < 0 { //TODO: replace by Seek() continue } @@ -1590,7 +1598,7 @@ func (ht *HistoryRoTx) iterateChangedFrozen(fromTxNum, toTxNum int, asc order.By return iter.EmptyKV, nil } - if fromTxNum >= 0 && ht.iit.lastTxNumInFiles() <= uint64(fromTxNum) { + if fromTxNum >= 0 && ht.iit.files[len(ht.iit.files)-1].endTxNum <= uint64(fromTxNum) { return iter.EmptyKV, nil } @@ -1627,7 +1635,7 @@ func (ht *HistoryRoTx) iterateChangedRecent(fromTxNum, toTxNum int, asc order.By if asc == order.Desc { panic("not supported yet") } - rangeIsInFiles := toTxNum >= 0 && len(ht.iit.files) > 0 && ht.iit.lastTxNumInFiles() >= uint64(toTxNum) + rangeIsInFiles := toTxNum >= 0 && len(ht.iit.files) > 0 && ht.iit.files[len(ht.iit.files)-1].endTxNum >= uint64(toTxNum) if rangeIsInFiles { return iter.EmptyKVS, nil } @@ -1662,74 +1670,6 @@ func (ht *HistoryRoTx) HistoryRange(fromTxNum, toTxNum int, asc order.By, limit return iter.MergeKVS(itOnDB, itOnFiles, limit), nil } -func (ht *HistoryRoTx) idxRangeRecent(key []byte, startTxNum, endTxNum int, asc order.By, limit int, roTx kv.Tx) (iter.U64, error) { - var dbIt iter.U64 - if ht.h.historyLargeValues { - from := make([]byte, len(key)+8) - copy(from, key) - var fromTxNum uint64 - if startTxNum >= 0 { - fromTxNum = uint64(startTxNum) - } - binary.BigEndian.PutUint64(from[len(key):], fromTxNum) - to := common.Copy(from) - toTxNum := uint64(math.MaxUint64) - if endTxNum >= 0 { - toTxNum = uint64(endTxNum) - } - binary.BigEndian.PutUint64(to[len(key):], toTxNum) - var it iter.KV - var err error - if asc { - it, err = roTx.RangeAscend(ht.h.historyValsTable, from, to, limit) - } else { - it, err = roTx.RangeDescend(ht.h.historyValsTable, from, to, limit) - } - if err != nil { - return nil, err - } - dbIt = iter.TransformKV2U64(it, func(k, v []byte) (uint64, error) { - if len(k) < 8 { - return 0, fmt.Errorf("unexpected large key length %d", len(k)) - } - return binary.BigEndian.Uint64(k[len(k)-8:]), nil - }) - } else { - var from, to []byte - if startTxNum >= 0 { - from = make([]byte, 8) - binary.BigEndian.PutUint64(from, uint64(startTxNum)) - } - if endTxNum >= 0 { - to = make([]byte, 8) - binary.BigEndian.PutUint64(to, uint64(endTxNum)) - } - it, err := roTx.RangeDupSort(ht.h.historyValsTable, key, from, to, asc, limit) - if err != nil { - return nil, err - } - dbIt = iter.TransformKV2U64(it, func(k, v []byte) (uint64, error) { - if len(v) < 8 { - return 0, fmt.Errorf("unexpected small value length %d", len(v)) - } - return binary.BigEndian.Uint64(v), nil - }) - } - - return dbIt, nil -} -func (ht *HistoryRoTx) IdxRange(key []byte, startTxNum, endTxNum int, asc order.By, limit int, roTx kv.Tx) (iter.U64, error) { - frozenIt, err := ht.iit.iterateRangeFrozen(key, startTxNum, endTxNum, asc, limit) - if err != nil { - return nil, err - } - recentIt, err := ht.idxRangeRecent(key, startTxNum, endTxNum, asc, limit, roTx) - if err != nil { - return nil, err - } - return iter.Union[uint64](frozenIt, recentIt, asc, limit), nil -} - type HistoryChangesIterFiles struct { hc *HistoryRoTx nextVal []byte @@ -2070,3 +2010,71 @@ func (hs *HistoryStep) Clone() *HistoryStep { }, } } + +func (ht *HistoryRoTx) idxRangeRecent(key []byte, startTxNum, endTxNum int, asc order.By, limit int, roTx kv.Tx) (iter.U64, error) { + var dbIt iter.U64 + if ht.h.historyLargeValues { + from := make([]byte, len(key)+8) + copy(from, key) + var fromTxNum uint64 + if startTxNum >= 0 { + fromTxNum = uint64(startTxNum) + } + binary.BigEndian.PutUint64(from[len(key):], fromTxNum) + to := common.Copy(from) + toTxNum := uint64(math.MaxUint64) + if endTxNum >= 0 { + toTxNum = uint64(endTxNum) + } + binary.BigEndian.PutUint64(to[len(key):], toTxNum) + var it iter.KV + var err error + if asc { + it, err = roTx.RangeAscend(ht.h.historyValsTable, from, to, limit) + } else { + it, err = roTx.RangeDescend(ht.h.historyValsTable, from, to, limit) + } + if err != nil { + return nil, err + } + dbIt = iter.TransformKV2U64(it, func(k, v []byte) (uint64, error) { + if len(k) < 8 { + return 0, fmt.Errorf("unexpected large key length %d", len(k)) + } + return binary.BigEndian.Uint64(k[len(k)-8:]), nil + }) + } else { + var from, to []byte + if startTxNum >= 0 { + from = make([]byte, 8) + binary.BigEndian.PutUint64(from, uint64(startTxNum)) + } + if endTxNum >= 0 { + to = make([]byte, 8) + binary.BigEndian.PutUint64(to, uint64(endTxNum)) + } + it, err := roTx.RangeDupSort(ht.h.historyValsTable, key, from, to, asc, limit) + if err != nil { + return nil, err + } + dbIt = iter.TransformKV2U64(it, func(k, v []byte) (uint64, error) { + if len(v) < 8 { + return 0, fmt.Errorf("unexpected small value length %d", len(v)) + } + return binary.BigEndian.Uint64(v), nil + }) + } + + return dbIt, nil +} +func (ht *HistoryRoTx) IdxRange(key []byte, startTxNum, endTxNum int, asc order.By, limit int, roTx kv.Tx) (iter.U64, error) { + frozenIt, err := ht.iit.iterateRangeFrozen(key, startTxNum, endTxNum, asc, limit) + if err != nil { + return nil, err + } + recentIt, err := ht.idxRangeRecent(key, startTxNum, endTxNum, asc, limit, roTx) + if err != nil { + return nil, err + } + return iter.Union[uint64](frozenIt, recentIt, asc, limit), nil +} diff --git a/erigon-lib/state/history_test.go b/erigon-lib/state/history_test.go index 00f936b9073..a7b03759f9e 100644 --- a/erigon-lib/state/history_test.go +++ b/erigon-lib/state/history_test.go @@ -622,7 +622,7 @@ func checkHistoryHistory(t *testing.T, h *History, txs uint64) { binary.BigEndian.PutUint64(k[:], keyNum) binary.BigEndian.PutUint64(v[:], valNum) k[0], v[0] = 0x01, 0xff - val, ok, err := hc.historySeekInFiles(k[:], txNum+1) + val, ok, err := hc.GetNoState(k[:], txNum+1) //require.Equal(t, ok, txNum < 976) if ok { require.NoError(t, err, label) @@ -1088,15 +1088,15 @@ func TestIterateChanged2(t *testing.T) { require.NoError(err) defer tx.Rollback() - v, ok, err := hc.HistorySeek(hexutility.MustDecodeHex("0100000000000001"), 900, tx) + v, ok, err := hc.GetNoStateWithRecent(hexutility.MustDecodeHex("0100000000000001"), 900, tx) require.NoError(err) require.True(ok) require.Equal(hexutility.MustDecodeHex("ff00000000000383"), v) - v, ok, err = hc.HistorySeek(hexutility.MustDecodeHex("0100000000000001"), 0, tx) + v, ok, err = hc.GetNoStateWithRecent(hexutility.MustDecodeHex("0100000000000001"), 0, tx) require.NoError(err) require.True(ok) require.Equal([]byte{}, v) - v, ok, err = hc.HistorySeek(hexutility.MustDecodeHex("0100000000000001"), 1000, tx) + v, ok, err = hc.GetNoStateWithRecent(hexutility.MustDecodeHex("0100000000000001"), 1000, tx) require.NoError(err) require.True(ok) require.Equal(hexutility.MustDecodeHex("ff000000000003e7"), v) @@ -1142,15 +1142,15 @@ func TestIterateChanged2(t *testing.T) { require.NoError(err) defer tx.Rollback() - v, ok, err := hc.HistorySeek(hexutility.MustDecodeHex("0100000000000001"), 900, tx) + v, ok, err := hc.GetNoStateWithRecent(hexutility.MustDecodeHex("0100000000000001"), 900, tx) require.NoError(err) require.True(ok) require.Equal(hexutility.MustDecodeHex("ff00000000000383"), v) - v, ok, err = hc.HistorySeek(hexutility.MustDecodeHex("0100000000000001"), 0, tx) + v, ok, err = hc.GetNoStateWithRecent(hexutility.MustDecodeHex("0100000000000001"), 0, tx) require.NoError(err) require.True(ok) require.Equal([]byte{}, v) - v, ok, err = hc.HistorySeek(hexutility.MustDecodeHex("0100000000000001"), 1000, tx) + v, ok, err = hc.GetNoStateWithRecent(hexutility.MustDecodeHex("0100000000000001"), 1000, tx) require.NoError(err) require.True(ok) require.Equal(hexutility.MustDecodeHex("ff000000000003e7"), v) diff --git a/erigon-lib/state/inverted_index.go b/erigon-lib/state/inverted_index.go index 4d8c20c8211..781121116e9 100644 --- a/erigon-lib/state/inverted_index.go +++ b/erigon-lib/state/inverted_index.go @@ -83,6 +83,8 @@ type InvertedIndex struct { //TODO: re-visit this check - maybe we don't need it. It's abot kill in the middle of merge integrityCheck func(fromStep, toStep uint64) bool + withExistenceIndex bool + // fields for history write logger log.Logger @@ -99,29 +101,36 @@ type iiCfg struct { db kv.RoDB // global db pointer. mostly for background warmup. } -func NewInvertedIndex(cfg iiCfg, aggregationStep uint64, filenameBase, indexKeysTable, indexTable string, integrityCheck func(fromStep uint64, toStep uint64) bool, logger log.Logger) (*InvertedIndex, error) { +func NewInvertedIndex(cfg iiCfg, aggregationStep uint64, filenameBase, indexKeysTable, indexTable string, withExistenceIndex bool, integrityCheck func(fromStep uint64, toStep uint64) bool, logger log.Logger) (*InvertedIndex, error) { if cfg.dirs.SnapDomain == "" { panic("empty `dirs` varialbe") } ii := InvertedIndex{ - iiCfg: cfg, - dirtyFiles: btree2.NewBTreeGOptions[*filesItem](filesItemLess, btree2.Options{Degree: 128, NoLocks: false}), - aggregationStep: aggregationStep, - filenameBase: filenameBase, - indexKeysTable: indexKeysTable, - indexTable: indexTable, - compressWorkers: 1, - integrityCheck: integrityCheck, - logger: logger, - compression: CompressNone, + iiCfg: cfg, + dirtyFiles: btree2.NewBTreeGOptions[*filesItem](filesItemLess, btree2.Options{Degree: 128, NoLocks: false}), + aggregationStep: aggregationStep, + filenameBase: filenameBase, + indexKeysTable: indexKeysTable, + indexTable: indexTable, + compressWorkers: 1, + integrityCheck: integrityCheck, + withExistenceIndex: withExistenceIndex, + logger: logger, + compression: CompressNone, } ii.indexList = withHashMap + if ii.withExistenceIndex { + ii.indexList |= withExistence + } ii._visibleFiles = []ctxItem{} return &ii, nil } +func (ii *InvertedIndex) efExistenceIdxFilePath(fromStep, toStep uint64) string { + return filepath.Join(ii.dirs.SnapAccessors, fmt.Sprintf("v1-%s.%d-%d.efei", ii.filenameBase, fromStep, toStep)) +} func (ii *InvertedIndex) efAccessorFilePath(fromStep, toStep uint64) string { return filepath.Join(ii.dirs.SnapAccessors, fmt.Sprintf("v1-%s.%d-%d.efi", ii.filenameBase, fromStep, toStep)) } @@ -242,6 +251,18 @@ func (ii *InvertedIndex) missedIdxFiles() (l []*filesItem) { }) return l } +func (ii *InvertedIndex) missedExistenceFilterFiles() (l []*filesItem) { + ii.dirtyFiles.Walk(func(items []*filesItem) bool { + for _, item := range items { + fromStep, toStep := item.startTxNum/ii.aggregationStep, item.endTxNum/ii.aggregationStep + if !dir.FileExist(ii.efExistenceIdxFilePath(fromStep, toStep)) { + l = append(l, item) + } + } + return true + }) + return l +} func (ii *InvertedIndex) buildEfi(ctx context.Context, item *filesItem, ps *background.ProgressSet) (err error) { if item.decompressor == nil { @@ -250,6 +271,56 @@ func (ii *InvertedIndex) buildEfi(ctx context.Context, item *filesItem, ps *back fromStep, toStep := item.startTxNum/ii.aggregationStep, item.endTxNum/ii.aggregationStep return ii.buildMapIdx(ctx, fromStep, toStep, item.decompressor, ps) } +func (ii *InvertedIndex) buildExistenceFilter(ctx context.Context, item *filesItem, ps *background.ProgressSet) (err error) { + if item.decompressor == nil { + return fmt.Errorf("buildExistenceFilter: passed item with nil decompressor %s %d-%d", ii.filenameBase, item.startTxNum/ii.aggregationStep, item.endTxNum/ii.aggregationStep) + } + if !ii.withExistenceIndex { + return nil + } + fromStep, toStep := item.startTxNum/ii.aggregationStep, item.endTxNum/ii.aggregationStep + idxPath := ii.efExistenceIdxFilePath(fromStep, toStep) + return buildIdxFilter(ctx, item.decompressor, ii.compression, idxPath, ii.salt, ps, ii.logger, ii.noFsync) +} + +func buildIdxFilter(ctx context.Context, d *seg.Decompressor, compressed FileCompression, idxPath string, salt *uint32, ps *background.ProgressSet, logger log.Logger, noFsync bool) error { + g := NewArchiveGetter(d.MakeGetter(), compressed) + _, fileName := filepath.Split(idxPath) + count := d.Count() / 2 + + p := ps.AddNew(fileName, uint64(count)) + defer ps.Delete(p) + defer d.EnableReadAhead().DisableReadAhead() + + idxFilter, err := NewExistenceFilter(uint64(count), idxPath) + if err != nil { + return err + } + if noFsync { + idxFilter.DisableFsync() + } + hasher := murmur3.New128WithSeed(*salt) + + key := make([]byte, 0, 256) + g.Reset(0) + for g.HasNext() { + key, _ = g.Next(key[:0]) + hasher.Reset() + hasher.Write(key) //nolint:errcheck + hi, _ := hasher.Sum128() + idxFilter.AddHash(hi) + + // Skip value + g.Skip() + + p.Processed.Add(1) + } + if err := idxFilter.Build(); err != nil { + return err + } + + return nil +} // BuildMissedIndices - produce .efi/.vi/.kvi from .ef/.v/.kv func (ii *InvertedIndex) BuildMissedIndices(ctx context.Context, g *errgroup.Group, ps *background.ProgressSet) { @@ -260,6 +331,12 @@ func (ii *InvertedIndex) BuildMissedIndices(ctx context.Context, g *errgroup.Gro }) } + for _, item := range ii.missedExistenceFilterFiles() { + item := item + g.Go(func() error { + return ii.buildExistenceFilter(ctx, item, ps) + }) + } } func (ii *InvertedIndex) openFiles() error { @@ -306,6 +383,16 @@ func (ii *InvertedIndex) openFiles() error { } } } + if item.existence == nil && ii.withExistenceIndex { + fPath := ii.efExistenceIdxFilePath(fromStep, toStep) + if dir.FileExist(fPath) { + if item.existence, err = OpenExistenceFilter(fPath); err != nil { + _, fName := filepath.Split(fPath) + ii.logger.Warn("[agg] InvertedIndex.openFiles", "err", err, "f", fName) + // don't interrupt on error. other files may be good + } + } + } } return true @@ -427,8 +514,8 @@ func (iit *InvertedIndexRoTx) newWriter(tmpdir string, discard bool) *invertedIn indexKeysTable: iit.ii.indexKeysTable, indexTable: iit.ii.indexTable, // etl collector doesn't fsync: means if have enough ram, all files produced by all collectors will be in ram - indexKeys: etl.NewCollector("flush "+iit.ii.indexKeysTable, tmpdir, etl.NewSortableBuffer(WALCollectorRAM), iit.ii.logger), - index: etl.NewCollector("flush "+iit.ii.indexTable, tmpdir, etl.NewSortableBuffer(WALCollectorRAM), iit.ii.logger), + indexKeys: etl.NewCollector(iit.ii.indexKeysTable, tmpdir, etl.NewSortableBuffer(WALCollectorRAM), iit.ii.logger), + index: etl.NewCollector(iit.ii.indexTable, tmpdir, etl.NewSortableBuffer(WALCollectorRAM), iit.ii.logger), } w.indexKeys.LogLvl(log.LvlTrace) w.index.LogLvl(log.LvlTrace) @@ -533,13 +620,18 @@ func (iit *InvertedIndexRoTx) statelessIdxReader(i int) *recsplit.IndexReader { return r } -func (iit *InvertedIndexRoTx) seekInFiles(key []byte, txNum uint64) (found bool, equalOrHigherTxNum uint64) { +func (iit *InvertedIndexRoTx) Seek(key []byte, txNum uint64) (found bool, equalOrHigherTxNum uint64) { hi, lo := iit.hashKey(key) for i := 0; i < len(iit.files); i++ { if iit.files[i].endTxNum <= txNum { continue } + if iit.ii.withExistenceIndex && iit.files[i].src.existence != nil { + if !iit.files[i].src.existence.ContainsHash(hi) { + continue + } + } offset, ok := iit.statelessIdxReader(i).TwoLayerLookupByHash(hi, lo) if !ok { continue @@ -561,11 +653,6 @@ func (iit *InvertedIndexRoTx) seekInFiles(key []byte, txNum uint64) (found bool, return false, 0 } -// it is assumed files are always sorted -func (iit *InvertedIndexRoTx) lastTxNumInFiles() uint64 { - return iit.files[len(iit.files)-1].endTxNum -} - // IdxRange - return range of txNums for given `key` // is to be used in public API, therefore it relies on read-only transaction // so that iteration can be done even when the inverted index is being updated. @@ -585,12 +672,12 @@ func (iit *InvertedIndexRoTx) IdxRange(key []byte, startTxNum, endTxNum int, asc func (iit *InvertedIndexRoTx) recentIterateRange(key []byte, startTxNum, endTxNum int, asc order.By, limit int, roTx kv.Tx) (iter.U64, error) { //optimization: return empty pre-allocated iterator if range is frozen if asc { - isFrozenRange := len(iit.files) > 0 && endTxNum >= 0 && iit.lastTxNumInFiles() >= uint64(endTxNum) + isFrozenRange := len(iit.files) > 0 && endTxNum >= 0 && iit.files[len(iit.files)-1].endTxNum >= uint64(endTxNum) if isFrozenRange { return iter.EmptyU64, nil } } else { - isFrozenRange := len(iit.files) > 0 && startTxNum >= 0 && iit.lastTxNumInFiles() >= uint64(startTxNum) + isFrozenRange := len(iit.files) > 0 && startTxNum >= 0 && iit.files[len(iit.files)-1].endTxNum >= uint64(startTxNum) if isFrozenRange { return iter.EmptyU64, nil } @@ -759,10 +846,6 @@ func (iit *InvertedIndexRoTx) Prune(ctx context.Context, rwTx kv.RwTx, txFrom, t defer cleanup() } - if limit == 0 { - limit = math.MaxUint64 - } - ii := iit.ii //defer func() { // ii.logger.Error("[snapshots] prune index", @@ -773,38 +856,53 @@ func (iit *InvertedIndexRoTx) Prune(ctx context.Context, rwTx kv.RwTx, txFrom, t // "tx until limit", limit) //}() + // do not collect and sort keys if it's History index + var indexWithHistoryValues bool + { + itc, err := rwTx.CursorDupSort(ii.indexTable) + if err != nil { + return nil, err + } + idxValuesCount, err := itc.Count() + itc.Close() + if err != nil { + return nil, err + } + indexWithHistoryValues = idxValuesCount == 0 && fn != nil + } + keysCursor, err := rwTx.RwCursorDupSort(ii.indexKeysTable) if err != nil { return stat, fmt.Errorf("create %s keys cursor: %w", ii.filenameBase, err) } defer keysCursor.Close() - keysCursorForDel, err := rwTx.RwCursorDupSort(ii.indexKeysTable) - if err != nil { - return stat, fmt.Errorf("create %s keys cursor: %w", ii.filenameBase, err) - } - defer keysCursorForDel.Close() - idxC, err := rwTx.RwCursorDupSort(ii.indexTable) + + var txKey [8]byte + binary.BigEndian.PutUint64(txKey[:], txFrom) + k, v, err := keysCursor.Seek(txKey[:]) if err != nil { return nil, err } - defer idxC.Close() - idxValuesCount, err := idxC.Count() - if err != nil { - return nil, err + if k == nil { + return nil, nil } - indexWithValues := idxValuesCount != 0 || fn != nil - collector := etl.NewCollector("prune idx "+ii.filenameBase, ii.dirs.Tmp, etl.NewSortableBuffer(etl.BufferOptimalSize/8), ii.logger) + txFrom = binary.BigEndian.Uint64(k) + if limit == 0 { + limit = math.MaxUint64 + } + if txFrom >= txTo { + return nil, nil + } + + collector := etl.NewCollector("snapshots", ii.dirs.Tmp, etl.NewOldestEntryBuffer(etl.BufferOptimalSize), ii.logger) defer collector.Close() collector.LogLvl(log.LvlDebug) collector.SortAndFlushInBackground(true) - var txKey [8]byte - binary.BigEndian.PutUint64(txKey[:], txFrom) - // Invariant: if some `txNum=N` pruned - it's pruned Fully // Means: can use DeleteCurrentDuplicates all values of given `txNum` - for k, v, err := keysCursor.Seek(txKey[:]); k != nil; k, v, err = keysCursor.NextNoDup() { + for ; k != nil; k, v, err = keysCursor.NextNoDup() { if err != nil { return nil, fmt.Errorf("iterate over %s index keys: %w", ii.filenameBase, err) } @@ -820,15 +918,21 @@ func (iit *InvertedIndexRoTx) Prune(ctx context.Context, rwTx kv.RwTx, txFrom, t stat.MinTxNum = min(stat.MinTxNum, txNum) stat.MaxTxNum = max(stat.MaxTxNum, txNum) - if indexWithValues { - for ; v != nil; _, v, err = keysCursor.NextDup() { - if err != nil { - return nil, fmt.Errorf("iterate over %s index keys: %w", ii.filenameBase, err) + for ; v != nil; _, v, err = keysCursor.NextDup() { + if err != nil { + return nil, fmt.Errorf("iterate over %s index keys: %w", ii.filenameBase, err) + } + if !indexWithHistoryValues { + if err := collector.Collect(v, nil); err != nil { + return nil, err } - if err := collector.Collect(v, k); err != nil { + } + if fn != nil { + if err := fn(v, k); err != nil { return nil, err } } + stat.PruneCountValues++ } stat.PruneCountTx++ @@ -841,7 +945,9 @@ func (iit *InvertedIndexRoTx) Prune(ctx context.Context, rwTx kv.RwTx, txFrom, t return nil, ctx.Err() } } - if !indexWithValues { + + if indexWithHistoryValues { + // empty indexTable, no need to collect and prune keys out of there return stat, nil } @@ -850,32 +956,46 @@ func (iit *InvertedIndexRoTx) Prune(ctx context.Context, rwTx kv.RwTx, txFrom, t return nil, err } defer idxCForDeletes.Close() + idxC, err := rwTx.RwCursorDupSort(ii.indexTable) + if err != nil { + return nil, err + } + defer idxC.Close() - binary.BigEndian.PutUint64(txKey[:], txFrom) - err = collector.Load(nil, "", func(key, txnm []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { - if fn != nil { - if err = fn(key, txnm); err != nil { - return fmt.Errorf("fn error: %w", err) - } - } - if idxValuesCount > 0 { - if err = idxCForDeletes.DeleteExact(key, txnm); err != nil { + binary.BigEndian.PutUint64(txKey[:], stat.MinTxNum) + err = collector.Load(rwTx, "", func(key, _ []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { + for txnm, err := idxC.SeekBothRange(key, txKey[:]); txnm != nil; _, txnm, err = idxC.NextDup() { + if err != nil { return err } - } - mxPruneSizeIndex.Inc() - stat.PruneCountValues++ - select { - case <-logEvery.C: txNum := binary.BigEndian.Uint64(txnm) - ii.logger.Info("[snapshots] prune index", "name", ii.filenameBase, "pruned tx", stat.PruneCountTx, - "pruned values", stat.PruneCountValues, - "steps", fmt.Sprintf("%.2f-%.2f", float64(txFrom)/float64(ii.aggregationStep), float64(txNum)/float64(ii.aggregationStep))) - default: + if txNum < stat.MinTxNum { + continue // to bigger txnums + } + if txNum > stat.MaxTxNum { + return nil // go to next key + } + if _, _, err = idxCForDeletes.SeekBothExact(key, txnm); err != nil { + return err + } + if err = idxCForDeletes.DeleteCurrent(); err != nil { + return err + } + mxPruneSizeIndex.Inc() + + select { + case <-logEvery.C: + ii.logger.Info("[snapshots] prune index", "name", ii.filenameBase, "pruned tx", stat.PruneCountTx, + "pruned values", stat.PruneCountValues, + "steps", fmt.Sprintf("%.2f-%.2f", float64(txFrom)/float64(ii.aggregationStep), float64(txNum)/float64(ii.aggregationStep))) + case <-ctx.Done(): + return ctx.Err() + default: + } } return nil - }, etl.TransformArgs{Quit: ctx.Done()}) + }, etl.TransformArgs{}) return stat, err } @@ -1370,9 +1490,8 @@ func (ii *InvertedIndex) collate(ctx context.Context, step uint64, roTx kv.Tx) ( } defer keysCursor.Close() - collector := etl.NewCollector("collate idx "+ii.filenameBase, ii.iiCfg.dirs.Tmp, etl.NewSortableBuffer(CollateETLRAM), ii.logger) + collector := etl.NewCollector(ii.indexKeysTable, ii.iiCfg.dirs.Tmp, etl.NewSortableBuffer(CollateETLRAM), ii.logger) defer collector.Close() - collector.LogLvl(log.LvlTrace) var txKey [8]byte binary.BigEndian.PutUint64(txKey[:], txFrom) @@ -1407,7 +1526,7 @@ func (ii *InvertedIndex) collate(ctx context.Context, step uint64, roTx kv.Tx) ( } }() - comp, err := seg.NewCompressor(ctx, "collate idx "+ii.filenameBase, coll.iiPath, ii.dirs.Tmp, seg.MinPatternScore, ii.compressWorkers, log.LvlTrace, ii.logger) + comp, err := seg.NewCompressor(ctx, "snapshots", coll.iiPath, ii.dirs.Tmp, seg.MinPatternScore, ii.compressWorkers, log.LvlTrace, ii.logger) if err != nil { return InvertedIndexCollation{}, fmt.Errorf("create %s compressor: %w", ii.filenameBase, err) } @@ -1551,6 +1670,13 @@ func (ii *InvertedIndex) buildFiles(ctx context.Context, step uint64, coll Inver return InvertedFiles{}, err } + if ii.withExistenceIndex { + idxPath2 := ii.efExistenceIdxFilePath(step, step+1) + if existence, err = buildIndexFilterThenOpen(ctx, decomp, ii.compression, idxPath2, ii.dirs.Tmp, ii.salt, ps, ii.logger, ii.noFsync); err != nil { + return InvertedFiles{}, fmt.Errorf("build %s efei: %w", ii.filenameBase, err) + } + } + closeComp = false return InvertedFiles{decomp: decomp, index: index, existence: existence}, nil } @@ -1566,12 +1692,15 @@ func (ii *InvertedIndex) buildMapIdx(ctx context.Context, fromStep, toStep uint6 TmpDir: ii.dirs.Tmp, IndexFile: idxPath, Salt: ii.salt, - NoFsync: ii.noFsync, } - return buildIndex(ctx, data, ii.compression, idxPath, false, cfg, ps, ii.logger) + return buildIndex(ctx, data, ii.compression, idxPath, false, cfg, ps, ii.logger, ii.noFsync) } func (ii *InvertedIndex) integrateDirtyFiles(sf InvertedFiles, txNumFrom, txNumTo uint64) { + if asserts && ii.withExistenceIndex && sf.existence == nil { + panic(fmt.Errorf("assert: no existence index: %s", sf.decomp.FileName())) + } + fi := newFilesItem(txNumFrom, txNumTo, ii.aggregationStep) fi.decompressor = sf.decomp fi.index = sf.index diff --git a/erigon-lib/state/inverted_index_test.go b/erigon-lib/state/inverted_index_test.go index 4a4382ef44f..3e710e17a29 100644 --- a/erigon-lib/state/inverted_index_test.go +++ b/erigon-lib/state/inverted_index_test.go @@ -54,7 +54,7 @@ func testDbAndInvertedIndex(tb testing.TB, aggStep uint64, logger log.Logger) (k tb.Cleanup(db.Close) salt := uint32(1) cfg := iiCfg{salt: &salt, dirs: dirs, db: db} - ii, err := NewInvertedIndex(cfg, aggStep, "inv", keysTable, indexTable, nil, logger) + ii, err := NewInvertedIndex(cfg, aggStep, "inv", keysTable, indexTable, true, nil, logger) require.NoError(tb, err) ii.DisableFsync() tb.Cleanup(ii.Close) @@ -452,7 +452,7 @@ func TestInvIndexScanFiles(t *testing.T) { var err error salt := uint32(1) cfg := iiCfg{salt: &salt, dirs: ii.dirs, db: db} - ii, err = NewInvertedIndex(cfg, ii.aggregationStep, ii.filenameBase, ii.indexKeysTable, ii.indexTable, nil, logger) + ii, err = NewInvertedIndex(cfg, ii.aggregationStep, ii.filenameBase, ii.indexKeysTable, ii.indexTable, true, nil, logger) require.NoError(t, err) defer ii.Close() diff --git a/erigon-lib/state/merge.go b/erigon-lib/state/merge.go index 6da62192e3a..189c950a9f0 100644 --- a/erigon-lib/state/merge.go +++ b/erigon-lib/state/merge.go @@ -42,8 +42,8 @@ import ( func (d *Domain) dirtyFilesEndTxNumMinimax() uint64 { minimax := d.History.endTxNumMinimax() - if _max, ok := d.dirtyFiles.Max(); ok { - endTxNum := _max.endTxNum + if max, ok := d.dirtyFiles.Max(); ok { + endTxNum := max.endTxNum if minimax == 0 || endTxNum < minimax { minimax = endTxNum } @@ -53,8 +53,8 @@ func (d *Domain) dirtyFilesEndTxNumMinimax() uint64 { func (ii *InvertedIndex) endTxNumMinimax() uint64 { var minimax uint64 - if _max, ok := ii.dirtyFiles.Max(); ok { - endTxNum := _max.endTxNum + if max, ok := ii.dirtyFiles.Max(); ok { + endTxNum := max.endTxNum if minimax == 0 || endTxNum < minimax { minimax = endTxNum } @@ -62,17 +62,17 @@ func (ii *InvertedIndex) endTxNumMinimax() uint64 { return minimax } func (ii *InvertedIndex) endIndexedTxNumMinimax(needFrozen bool) uint64 { - var _max uint64 + var max uint64 ii.dirtyFiles.Walk(func(items []*filesItem) bool { for _, item := range items { if item.index == nil || (needFrozen && !item.frozen) { continue } - _max = cmp.Max(_max, item.endTxNum) + max = cmp.Max(max, item.endTxNum) } return true }) - return _max + return max } func (h *History) endTxNumMinimax() uint64 { @@ -80,8 +80,8 @@ func (h *History) endTxNumMinimax() uint64 { return math.MaxUint64 } minimax := h.InvertedIndex.endTxNumMinimax() - if _max, ok := h.dirtyFiles.Max(); ok { - endTxNum := _max.endTxNum + if max, ok := h.dirtyFiles.Max(); ok { + endTxNum := max.endTxNum if minimax == 0 || endTxNum < minimax { minimax = endTxNum } @@ -89,20 +89,20 @@ func (h *History) endTxNumMinimax() uint64 { return minimax } func (h *History) endIndexedTxNumMinimax(needFrozen bool) uint64 { - var _max uint64 + var max uint64 if h.dontProduceHistoryFiles && h.dirtyFiles.Len() == 0 { - _max = math.MaxUint64 + max = math.MaxUint64 } h.dirtyFiles.Walk(func(items []*filesItem) bool { for _, item := range items { if item.index == nil || (needFrozen && !item.frozen) { continue } - _max = cmp.Max(_max, item.endTxNum) + max = cmp.Max(max, item.endTxNum) } return true }) - return cmp.Min(_max, h.InvertedIndex.endIndexedTxNumMinimax(needFrozen)) + return cmp.Min(max, h.InvertedIndex.endIndexedTxNumMinimax(needFrozen)) } type DomainRanges struct { @@ -311,34 +311,31 @@ func (dt *DomainRoTx) maxTxNumInDomainFiles(cold bool) uint64 { return 0 } -func (ht *HistoryRoTx) maxTxNumInFiles(onlyFrozen bool) uint64 { +func (ht *HistoryRoTx) maxTxNumInFiles(cold bool) uint64 { if len(ht.files) == 0 { return 0 } - var _max uint64 - if onlyFrozen { + var max uint64 + if cold { for i := len(ht.files) - 1; i >= 0; i-- { if !ht.files[i].src.frozen { continue } - _max = ht.files[i].endTxNum + max = ht.files[i].endTxNum break } } else { - _max = ht.files[len(ht.files)-1].endTxNum + max = ht.files[len(ht.files)-1].endTxNum } - return cmp.Min(_max, ht.iit.maxTxNumInFiles(onlyFrozen)) + return cmp.Min(max, ht.iit.maxTxNumInFiles(cold)) } - -func (iit *InvertedIndexRoTx) maxTxNumInFiles(onlyFrozen bool) uint64 { +func (iit *InvertedIndexRoTx) maxTxNumInFiles(cold bool) uint64 { if len(iit.files) == 0 { return 0 } - if !onlyFrozen { - return iit.lastTxNumInFiles() + if !cold { + return iit.files[len(iit.files)-1].endTxNum } - - // files contains [frozen..., cold...] in that order for i := len(iit.files) - 1; i >= 0; i-- { if !iit.files[i].src.frozen { continue @@ -543,7 +540,7 @@ func (dt *DomainRoTx) mergeFiles(ctx context.Context, domainFiles, indexFiles, h fromStep, toStep := r.valuesStartTxNum/dt.d.aggregationStep, r.valuesEndTxNum/dt.d.aggregationStep kvFilePath := dt.d.kvFilePath(fromStep, toStep) - kvFile, err := seg.NewCompressor(ctx, "merge domain "+dt.d.filenameBase, kvFilePath, dt.d.dirs.Tmp, seg.MinPatternScore, dt.d.compressWorkers, log.LvlTrace, dt.d.logger) + kvFile, err := seg.NewCompressor(ctx, "merge", kvFilePath, dt.d.dirs.Tmp, seg.MinPatternScore, dt.d.compressWorkers, log.LvlTrace, dt.d.logger) if err != nil { return nil, nil, nil, fmt.Errorf("merge %s compressor: %w", dt.d.filenameBase, err) } @@ -707,7 +704,7 @@ func (iit *InvertedIndexRoTx) mergeFiles(ctx context.Context, files []*filesItem fromStep, toStep := startTxNum/iit.ii.aggregationStep, endTxNum/iit.ii.aggregationStep datPath := iit.ii.efFilePath(fromStep, toStep) - if comp, err = seg.NewCompressor(ctx, "merge idx "+iit.ii.filenameBase, datPath, iit.ii.dirs.Tmp, seg.MinPatternScore, iit.ii.compressWorkers, log.LvlTrace, iit.ii.logger); err != nil { + if comp, err = seg.NewCompressor(ctx, "Snapshots merge", datPath, iit.ii.dirs.Tmp, seg.MinPatternScore, iit.ii.compressWorkers, log.LvlTrace, iit.ii.logger); err != nil { return nil, fmt.Errorf("merge %s inverted index compressor: %w", iit.ii.filenameBase, err) } if iit.ii.noFsync { @@ -810,6 +807,13 @@ func (iit *InvertedIndexRoTx) mergeFiles(ctx context.Context, files []*filesItem return nil, err } + if iit.ii.withExistenceIndex { + idxPath := iit.ii.efExistenceIdxFilePath(fromStep, toStep) + if outItem.existence, err = buildIndexFilterThenOpen(ctx, outItem.decompressor, iit.ii.compression, idxPath, iit.ii.dirs.Tmp, iit.ii.salt, ps, iit.ii.logger, iit.ii.noFsync); err != nil { + return nil, err + } + } + closeItem = false return outItem, nil } @@ -864,7 +868,7 @@ func (ht *HistoryRoTx) mergeFiles(ctx context.Context, indexFiles, historyFiles fromStep, toStep := r.historyStartTxNum/ht.h.aggregationStep, r.historyEndTxNum/ht.h.aggregationStep datPath := ht.h.vFilePath(fromStep, toStep) idxPath := ht.h.vAccessorFilePath(fromStep, toStep) - if comp, err = seg.NewCompressor(ctx, "merge hist "+ht.h.filenameBase, datPath, ht.h.dirs.Tmp, seg.MinPatternScore, ht.h.compressWorkers, log.LvlTrace, ht.h.logger); err != nil { + if comp, err = seg.NewCompressor(ctx, "merge", datPath, ht.h.dirs.Tmp, seg.MinPatternScore, ht.h.compressWorkers, log.LvlTrace, ht.h.logger); err != nil { return nil, nil, fmt.Errorf("merge %s history compressor: %w", ht.h.filenameBase, err) } compr := NewArchiveWriter(comp, ht.h.compression) @@ -955,12 +959,15 @@ func (ht *HistoryRoTx) mergeFiles(ctx context.Context, indexFiles, historyFiles TmpDir: ht.h.dirs.Tmp, IndexFile: idxPath, Salt: ht.h.salt, - NoFsync: ht.h.noFsync, }, ht.h.logger); err != nil { return nil, nil, fmt.Errorf("create recsplit: %w", err) } rs.LogLvl(log.LvlTrace) + if ht.h.noFsync { + rs.DisableFsync() + } + var ( txKey [8]byte historyKey []byte diff --git a/erigon-lib/state/merge_test.go b/erigon-lib/state/merge_test.go index a75c8852636..52ab0b24410 100644 --- a/erigon-lib/state/merge_test.go +++ b/erigon-lib/state/merge_test.go @@ -24,6 +24,7 @@ func emptyTestInvertedIndex(aggStep uint64) *InvertedIndex { func TestFindMergeRangeCornerCases(t *testing.T) { t.Run("> 2 unmerged files", func(t *testing.T) { ii := emptyTestInvertedIndex(1) + ii.withExistenceIndex = false ii.scanStateFiles([]string{ "v1-test.0-2.ef", "v1-test.2-3.ef", diff --git a/erigon-lib/state/metrics.go b/erigon-lib/state/metrics.go deleted file mode 100644 index 5b0b48df4f2..00000000000 --- a/erigon-lib/state/metrics.go +++ /dev/null @@ -1,59 +0,0 @@ -/* - Copyright 2024 Erigon contributors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package state - -import "github.com/ledgerwatch/erigon-lib/metrics" - -var ( - //LatestStateReadWarm = metrics.GetOrCreateSummary(`latest_state_read{type="warm",found="yes"}`) //nolint - //LatestStateReadWarmNotFound = metrics.GetOrCreateSummary(`latest_state_read{type="warm",found="no"}`) //nolint - //LatestStateReadGrind = metrics.GetOrCreateSummary(`latest_state_read{type="grind",found="yes"}`) //nolint - //LatestStateReadGrindNotFound = metrics.GetOrCreateSummary(`latest_state_read{type="grind",found="no"}`) //nolint - //LatestStateReadCold = metrics.GetOrCreateSummary(`latest_state_read{type="cold",found="yes"}`) //nolint - //LatestStateReadColdNotFound = metrics.GetOrCreateSummary(`latest_state_read{type="cold",found="no"}`) //nolint - mxPruneTookAgg = metrics.GetOrCreateSummary(`prune_seconds{type="state"}`) - mxPrunableDAcc = metrics.GetOrCreateGauge(`domain_prunable{type="domain",table="account"}`) - mxPrunableDSto = metrics.GetOrCreateGauge(`domain_prunable{type="domain",table="storage"}`) - mxPrunableDCode = metrics.GetOrCreateGauge(`domain_prunable{type="domain",table="code"}`) - mxPrunableDComm = metrics.GetOrCreateGauge(`domain_prunable{type="domain",table="commitment"}`) - mxPrunableHAcc = metrics.GetOrCreateGauge(`domain_prunable{type="history",table="account"}`) - mxPrunableHSto = metrics.GetOrCreateGauge(`domain_prunable{type="history",table="storage"}`) - mxPrunableHCode = metrics.GetOrCreateGauge(`domain_prunable{type="history",table="code"}`) - mxPrunableHComm = metrics.GetOrCreateGauge(`domain_prunable{type="history",table="commitment"}`) - mxUnwindTook = metrics.GetOrCreateHistogram(`domain_unwind_took{type="domain"}`) - mxUnwindSharedTook = metrics.GetOrCreateHistogram(`domain_unwind_took{type="shared"}`) - mxRunningUnwind = metrics.GetOrCreateGauge("domain_running_unwind") - mxRunningMerges = metrics.GetOrCreateGauge("domain_running_merges") - mxRunningFilesBuilding = metrics.GetOrCreateGauge("domain_running_files_building") - mxCollateTook = metrics.GetOrCreateHistogram(`domain_collate_took{type="domain"}`) - mxCollateTookHistory = metrics.GetOrCreateHistogram(`domain_collate_took{type="history"}`) - mxCollateTookIndex = metrics.GetOrCreateHistogram(`domain_collate_took{type="index"}`) - mxPruneTookDomain = metrics.GetOrCreateHistogram(`domain_prune_took{type="domain"}`) - mxPruneTookHistory = metrics.GetOrCreateHistogram(`domain_prune_took{type="history"}`) - mxPruneTookIndex = metrics.GetOrCreateHistogram(`domain_prune_took{type="index"}`) - mxPruneInProgress = metrics.GetOrCreateGauge("domain_pruning_progress") - mxCollationSize = metrics.GetOrCreateGauge("domain_collation_size") - mxCollationSizeHist = metrics.GetOrCreateGauge("domain_collation_hist_size") - mxPruneSizeDomain = metrics.GetOrCreateCounter(`domain_prune_size{type="domain"}`) - mxPruneSizeHistory = metrics.GetOrCreateCounter(`domain_prune_size{type="history"}`) - mxPruneSizeIndex = metrics.GetOrCreateCounter(`domain_prune_size{type="index"}`) - mxBuildTook = metrics.GetOrCreateSummary("domain_build_files_took") - mxStepTook = metrics.GetOrCreateSummary("domain_step_took") - mxFlushTook = metrics.GetOrCreateSummary("domain_flush_took") - mxCommitmentRunning = metrics.GetOrCreateGauge("domain_running_commitment") - mxCommitmentTook = metrics.GetOrCreateSummary("domain_commitment_took") -) diff --git a/erigon-lib/txpool/pool.go b/erigon-lib/txpool/pool.go index 1b4f38eb9e0..5fcdad752c6 100644 --- a/erigon-lib/txpool/pool.go +++ b/erigon-lib/txpool/pool.go @@ -50,7 +50,7 @@ import ( "github.com/ledgerwatch/erigon-lib/gointerfaces" "github.com/ledgerwatch/erigon-lib/gointerfaces/grpcutil" remote "github.com/ledgerwatch/erigon-lib/gointerfaces/remoteproto" - "github.com/ledgerwatch/erigon-lib/gointerfaces/txpoolproto" + txpoolproto "github.com/ledgerwatch/erigon-lib/gointerfaces/txpoolproto" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon-lib/kv/mdbx" diff --git a/erigon-lib/txpool/pool_fuzz_test.go b/erigon-lib/txpool/pool_fuzz_test.go index c8079abb2f4..0c7c23256d5 100644 --- a/erigon-lib/txpool/pool_fuzz_test.go +++ b/erigon-lib/txpool/pool_fuzz_test.go @@ -313,7 +313,7 @@ func FuzzOnNewBlocks(f *testing.F) { var prevHashes types.Hashes ch := make(chan types.Announcements, 100) - coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + _, coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) db := memdb.NewTestPoolDB(t) cfg := txpoolcfg.DefaultConfig diff --git a/erigon-lib/txpool/pool_test.go b/erigon-lib/txpool/pool_test.go index f84a992ba16..2b13971c6f3 100644 --- a/erigon-lib/txpool/pool_test.go +++ b/erigon-lib/txpool/pool_test.go @@ -51,7 +51,7 @@ func TestNonceFromAddress(t *testing.T) { assert, require := assert.New(t), require.New(t) ch := make(chan types.Announcements, 100) - coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + _, coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) db := memdb.NewTestPoolDB(t) cfg := txpoolcfg.DefaultConfig @@ -173,7 +173,7 @@ func TestReplaceWithHigherFee(t *testing.T) { t.Skip("TODO") assert, require := assert.New(t), require.New(t) ch := make(chan types.Announcements, 100) - coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + _, coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) db := memdb.NewTestPoolDB(t) cfg := txpoolcfg.DefaultConfig @@ -292,7 +292,7 @@ func TestReverseNonces(t *testing.T) { t.Skip("TODO") assert, require := assert.New(t), require.New(t) ch := make(chan types.Announcements, 100) - coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + _, coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) db := memdb.NewTestPoolDB(t) cfg := txpoolcfg.DefaultConfig @@ -421,7 +421,7 @@ func TestTxPoke(t *testing.T) { t.Skip("TODO") assert, require := assert.New(t), require.New(t) ch := make(chan types.Announcements, 100) - coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + _, coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) db := memdb.NewTestPoolDB(t) cfg := txpoolcfg.DefaultConfig @@ -682,7 +682,7 @@ func TestShanghaiValidateTx(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { ch := make(chan types.Announcements, 100) - coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + _, coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) cfg := txpoolcfg.DefaultConfig @@ -736,7 +736,7 @@ func TestBlobTxReplacement(t *testing.T) { t.Skip("TODO") assert, require := assert.New(t), require.New(t) ch := make(chan types.Announcements, 5) - coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + _, coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) db := memdb.NewTestPoolDB(t) cfg := txpoolcfg.DefaultConfig @@ -951,7 +951,7 @@ func TestDropRemoteAtNoGossip(t *testing.T) { t.Skip("TODO") assert, require := assert.New(t), require.New(t) ch := make(chan types.Announcements, 100) - coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + _, coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) db := memdb.NewTestPoolDB(t) cfg := txpoolcfg.DefaultConfig @@ -1060,7 +1060,7 @@ func TestBlobSlots(t *testing.T) { t.Skip("TODO") assert, require := assert.New(t), require.New(t) ch := make(chan types.Announcements, 5) - coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + _, coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) db := memdb.NewTestPoolDB(t) cfg := txpoolcfg.DefaultConfig @@ -1140,7 +1140,7 @@ func TestGasLimitChanged(t *testing.T) { assert, require := assert.New(t), require.New(t) ch := make(chan types.Announcements, 100) - coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + _, coreDB, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) db := memdb.NewTestPoolDB(t) cfg := txpoolcfg.DefaultConfig sendersCache := kvcache.New(kvcache.DefaultCoherentConfig) diff --git a/erigon-lib/types/txn.go b/erigon-lib/types/txn.go index af3e1123f41..3784c9fd93b 100644 --- a/erigon-lib/types/txn.go +++ b/erigon-lib/types/txn.go @@ -1005,7 +1005,7 @@ func UnwrapTxPlayloadRlp(blobTxRlp []byte) ([]byte, error) { if err != nil { return nil, err } - blobTxRlp = blobTxRlp[dataposPrev-1 : datapos+datalen] // seekInFiles left an extra-bit + blobTxRlp = blobTxRlp[dataposPrev-1 : datapos+datalen] // Seek left an extra-bit blobTxRlp[0] = 0x3 // Include the prefix part of the rlp return blobTxRlp, nil diff --git a/eth/backend.go b/eth/backend.go index 256db206c3e..f401eb86c92 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -65,10 +65,11 @@ import ( remote "github.com/ledgerwatch/erigon-lib/gointerfaces/remoteproto" rpcsentinel "github.com/ledgerwatch/erigon-lib/gointerfaces/sentinelproto" protosentry "github.com/ledgerwatch/erigon-lib/gointerfaces/sentryproto" - "github.com/ledgerwatch/erigon-lib/gointerfaces/txpoolproto" + txpoolproto "github.com/ledgerwatch/erigon-lib/gointerfaces/txpoolproto" prototypes "github.com/ledgerwatch/erigon-lib/gointerfaces/typesproto" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/remotedbserver" "github.com/ledgerwatch/erigon-lib/kv/temporal" libstate "github.com/ledgerwatch/erigon-lib/state" @@ -104,6 +105,7 @@ import ( "github.com/ledgerwatch/erigon/eth/protocols/eth" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/eth/tracers" "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/ethstats" @@ -231,7 +233,7 @@ const blockBufferSize = 128 // New creates a new Ethereum object (including the // initialisation of the common Ethereum object) -func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethereum, error) { +func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger log.Logger, tracer *tracers.Tracer) (*Ethereum, error) { config.Snapshot.Enabled = config.Sync.UseSnapshots if config.Miner.GasPrice == nil || config.Miner.GasPrice.Cmp(libcommon.Big0) <= 0 { logger.Warn("Sanitizing invalid miner gas price", "provided", config.Miner.GasPrice, "updated", ethconfig.Defaults.Miner.GasPrice) @@ -261,7 +263,8 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger return err } - return nil + config.HistoryV3, err = kvcfg.HistoryV3.WriteOnce(tx, config.HistoryV3) + return err }); err != nil { return nil, err } @@ -288,6 +291,10 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger }, } + if tracer.Hooks != nil && tracer.Hooks.OnBlockchainInit != nil { + tracer.Hooks.OnBlockchainInit(config.Genesis.Config) + } + var chainConfig *chain.Config var genesis *types.Block if err := backend.chainDB.Update(context.Background(), func(tx kv.RwTx) error { @@ -300,7 +307,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger genesisSpec = nil } var genesisErr error - chainConfig, genesis, genesisErr = core.WriteGenesisBlock(tx, genesisSpec, config.OverridePragueTime, tmpdir, logger) + chainConfig, genesis, genesisErr = core.WriteGenesisBlock(tx, genesisSpec, config.OverridePragueTime, tmpdir, logger, tracer.Hooks) if _, ok := genesisErr.(*chain.ConfigCompatError); genesisErr != nil && !ok { return genesisErr } @@ -339,17 +346,19 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger // Check if we have an already initialized chain and fall back to // that if so. Otherwise we need to generate a new genesis spec. - blockReader, blockWriter, allSnapshots, allBorSnapshots, agg, err := setUpBlockReader(ctx, chainKv, config.Dirs, config, chainConfig.Bor != nil, logger) + blockReader, blockWriter, allSnapshots, allBorSnapshots, agg, err := setUpBlockReader(ctx, chainKv, config.Dirs, config, config.HistoryV3, chainConfig.Bor != nil, logger) if err != nil { return nil, err } backend.agg, backend.blockSnapshots, backend.blockReader, backend.blockWriter = agg, allSnapshots, blockReader, blockWriter - backend.chainDB, err = temporal.New(backend.chainDB, agg) - if err != nil { - return nil, err + if config.HistoryV3 { + backend.chainDB, err = temporal.New(backend.chainDB, agg) + if err != nil { + return nil, err + } + chainKv = backend.chainDB //nolint } - chainKv = backend.chainDB //nolint if err := backend.setUpSnapDownloader(ctx, config.Downloader); err != nil { return nil, err @@ -551,15 +560,19 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger terseLogger.SetHandler(log.LvlFilterHandler(log.LvlWarn, log.StderrHandler)) // Needs its own notifications to not update RPC daemon and txpool about pending blocks stateSync := stages2.NewInMemoryExecution(backend.sentryCtx, backend.chainDB, config, backend.sentriesClient, - dirs, notifications, blockReader, blockWriter, backend.agg, backend.silkworm, terseLogger) + dirs, notifications, blockReader, blockWriter, backend.agg, backend.silkworm, terseLogger, nil) chainReader := consensuschain.NewReader(chainConfig, txc.Tx, blockReader, logger) // We start the mining step - if err := stages2.StateStep(ctx, chainReader, backend.engine, txc, stateSync, header, body, unwindPoint, headersChain, bodiesChain); err != nil { + if err := stages2.StateStep(ctx, chainReader, backend.engine, txc, stateSync, header, body, unwindPoint, headersChain, bodiesChain, config.HistoryV3); err != nil { logger.Warn("Could not validate block", "err", err) return err } var progress uint64 - progress, err = stages.GetStageProgress(txc.Tx, stages.Execution) + if config.HistoryV3 { + progress, err = stages.GetStageProgress(txc.Tx, stages.Execution) + } else { + progress, err = stages.GetStageProgress(txc.Tx, stages.IntermediateHashes) + } if err != nil { return err } @@ -629,7 +642,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger backend.newTxs = make(chan libtypes.Announcements, 1024) //defer close(newTxs) backend.txPoolDB, backend.txPool, backend.txPoolFetch, backend.txPoolSend, backend.txPoolGrpcServer, err = txpooluitl.AllComponents( - ctx, config.TxPool, kvcache.NewDummy(), backend.newTxs, chainKv, backend.sentriesClient.Sentries(), stateDiffClient, misc.Eip1559FeeCalculator, logger, + ctx, config.TxPool, kvcache.NewDummy(config.HistoryV3), backend.newTxs, chainKv, backend.sentriesClient.Sentries(), stateDiffClient, misc.Eip1559FeeCalculator, logger, ) if err != nil { return nil, err @@ -671,6 +684,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger backend.notifications.Accumulator, config.StateStream, /*stateStream=*/ false, + config.HistoryV3, dirs, blockReader, backend.sentriesClient.Hd, @@ -710,6 +724,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger backend.notifications.Accumulator, config.StateStream, /*stateStream=*/ false, + config.HistoryV3, dirs, blockReader, backend.sentriesClient.Hd, @@ -852,7 +867,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger backend.syncPruneOrder = stagedsync.PolygonSyncPruneOrder } else { backend.syncStages = stages2.NewDefaultStages(backend.sentryCtx, backend.chainDB, snapDb, p2pConfig, config, backend.sentriesClient, backend.notifications, backend.downloaderClient, - blockReader, blockRetire, backend.agg, backend.silkworm, backend.forkValidator, heimdallClient, recents, signatures, logger) + blockReader, blockRetire, backend.agg, backend.silkworm, backend.forkValidator, heimdallClient, recents, signatures, logger, tracer) backend.syncUnwindOrder = stagedsync.DefaultUnwindOrder backend.syncPruneOrder = stagedsync.DefaultPruneOrder } @@ -883,15 +898,27 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger } checkStateRoot := true - pipelineStages := stages2.NewPipelineStages(ctx, backend.chainDB, config, p2pConfig, backend.sentriesClient, backend.notifications, backend.downloaderClient, blockReader, blockRetire, backend.agg, backend.silkworm, backend.forkValidator, logger, checkStateRoot) + pipelineStages := stages2.NewPipelineStages(ctx, backend.chainDB, config, p2pConfig, backend.sentriesClient, backend.notifications, backend.downloaderClient, blockReader, blockRetire, backend.agg, backend.silkworm, backend.forkValidator, logger, tracer, checkStateRoot) backend.pipelineStagedSync = stagedsync.New(config.Sync, pipelineStages, stagedsync.PipelineUnwindOrder, stagedsync.PipelinePruneOrder, logger) - backend.eth1ExecutionServer = eth1.NewEthereumExecutionModule(blockReader, backend.chainDB, backend.pipelineStagedSync, backend.forkValidator, chainConfig, assembleBlockPOS, hook, backend.notifications.Accumulator, backend.notifications.StateChangesConsumer, logger, backend.engine, config.Sync, ctx) + backend.eth1ExecutionServer = eth1.NewEthereumExecutionModule(blockReader, backend.chainDB, backend.pipelineStagedSync, backend.forkValidator, chainConfig, assembleBlockPOS, hook, backend.notifications.Accumulator, backend.notifications.StateChangesConsumer, logger, backend.engine, config.HistoryV3, config.Sync, ctx) executionRpc := direct.NewExecutionClientDirect(backend.eth1ExecutionServer) + engineBackendRPC := engineapi.NewEngineServer( + logger, + chainConfig, + executionRpc, + backend.sentriesClient.Hd, + engine_block_downloader.NewEngineBlockDownloader(ctx, + logger, backend.sentriesClient.Hd, executionRpc, + backend.sentriesClient.Bd, backend.sentriesClient.BroadcastNewBlock, backend.sentriesClient.SendBodyRequest, blockReader, + backend.chainDB, chainConfig, tmpdir, config.Sync), + config.InternalCL, + false, + config.Miner.EnabledPOS) + backend.engineBackendRPC = engineBackendRPC var executionEngine executionclient.ExecutionEngine - caplinUseEngineAPI := config.NetworkID == uint64(clparams.GnosisNetwork) || config.NetworkID == uint64(clparams.HoleskyNetwork) || config.NetworkID == uint64(clparams.GoerliNetwork) // Gnosis has too few blocks on his network for phase2 to work. Once we have proper snapshot automation, it can go back to normal. - if caplinUseEngineAPI { + if config.NetworkID == uint64(clparams.GnosisNetwork) || config.NetworkID == uint64(clparams.HoleskyNetwork) || config.NetworkID == uint64(clparams.GoerliNetwork) { // Read the jwt secret jwtSecret, err := cli.ObtainJWTSecret(&stack.Config().Http, logger) if err != nil { @@ -907,19 +934,6 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger return nil, err } } - engineBackendRPC := engineapi.NewEngineServer( - logger, - chainConfig, - executionRpc, - backend.sentriesClient.Hd, - engine_block_downloader.NewEngineBlockDownloader(ctx, - logger, backend.sentriesClient.Hd, executionRpc, - backend.sentriesClient.Bd, backend.sentriesClient.BroadcastNewBlock, backend.sentriesClient.SendBodyRequest, blockReader, - backend.chainDB, chainConfig, tmpdir, config.Sync), - config.InternalCL && !caplinUseEngineAPI, // If the chain supports the engine API, then we should not make the server fail. - false, - config.Miner.EnabledPOS) - backend.engineBackendRPC = engineBackendRPC // If we choose not to run a consensus layer, run our embedded. if config.InternalCL && clparams.EmbeddedSupported(config.NetworkID) { @@ -972,7 +986,6 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger backend.polygonSyncService = polygonsync.NewService( logger, chainConfig, - tmpdir, sentryClient, p2pConfig.MaxPeers, statusDataProvider, @@ -1424,7 +1437,7 @@ func (s *Ethereum) setUpSnapDownloader(ctx context.Context, downloaderCfg *downl return err } -func setUpBlockReader(ctx context.Context, db kv.RwDB, dirs datadir.Dirs, snConfig *ethconfig.Config, isBor bool, logger log.Logger) (services.FullBlockReader, *blockio.BlockWriter, *freezeblocks.RoSnapshots, *freezeblocks.BorRoSnapshots, *libstate.Aggregator, error) { +func setUpBlockReader(ctx context.Context, db kv.RwDB, dirs datadir.Dirs, snConfig *ethconfig.Config, histV3 bool, isBor bool, logger log.Logger) (services.FullBlockReader, *blockio.BlockWriter, *freezeblocks.RoSnapshots, *freezeblocks.BorRoSnapshots, *libstate.Aggregator, error) { var minFrozenBlock uint64 if frozenLimit := snConfig.Sync.FrozenBlockLimit; frozenLimit != 0 { @@ -1463,7 +1476,7 @@ func setUpBlockReader(ctx context.Context, db kv.RwDB, dirs datadir.Dirs, snConf } blockReader := freezeblocks.NewBlockReader(allSnapshots, allBorSnapshots) - blockWriter := blockio.NewBlockWriter() + blockWriter := blockio.NewBlockWriter(histV3) return blockReader, blockWriter, allSnapshots, allBorSnapshots, agg, nil } diff --git a/eth/calltracer/calltracer.go b/eth/calltracer/calltracer.go index c4ca57e06c1..ad98d6a02f1 100644 --- a/eth/calltracer/calltracer.go +++ b/eth/calltracer/calltracer.go @@ -10,11 +10,15 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/eth/tracers" ) type CallTracer struct { + t *tracers.Tracer + froms map[libcommon.Address]struct{} tos map[libcommon.Address]bool // address -> isCreated } @@ -26,8 +30,13 @@ func NewCallTracer() *CallTracer { } } -func (ct *CallTracer) CaptureTxStart(gasLimit uint64) {} -func (ct *CallTracer) CaptureTxEnd(restGas uint64) {} +func (ct *CallTracer) Tracer() *tracers.Tracer { + return &tracers.Tracer{ + Hooks: &tracing.Hooks{ + OnEnter: ct.OnEnter, + }, + } +} // CaptureStart and CaptureEnter also capture SELFDESTRUCT opcode invocations func (ct *CallTracer) captureStartOrEnter(from, to libcommon.Address, create bool, code []byte) { @@ -45,20 +54,10 @@ func (ct *CallTracer) captureStartOrEnter(from, to libcommon.Address, create boo } } -func (ct *CallTracer) CaptureStart(env *vm.EVM, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { - ct.captureStartOrEnter(from, to, create, code) -} -func (ct *CallTracer) CaptureEnter(typ vm.OpCode, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { +func (ct *CallTracer) OnEnter(depth int, typ byte, from libcommon.Address, to libcommon.Address, precompile bool, input []byte, gas uint64, value *uint256.Int, code []byte) { + create := vm.OpCode(typ) == vm.CREATE ct.captureStartOrEnter(from, to, create, code) } -func (ct *CallTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { -} -func (ct *CallTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { -} -func (ct *CallTracer) CaptureEnd(output []byte, usedGas uint64, err error) { -} -func (ct *CallTracer) CaptureExit(output []byte, usedGas uint64, err error) { -} func (ct *CallTracer) WriteToDb(tx kv.StatelessWriteTx, block *types.Block, vmConfig vm.Config) error { ct.tos[block.Coinbase()] = false diff --git a/eth/consensuschain/consensus_chain_reader.go b/eth/consensuschain/consensus_chain_reader.go index 4cf0dc9f165..5d8de5d485e 100644 --- a/eth/consensuschain/consensus_chain_reader.go +++ b/eth/consensuschain/consensus_chain_reader.go @@ -34,6 +34,32 @@ func (cr Reader) GetHeader(hash common.Hash, number uint64) *types.Header { } return rawdb.ReadHeader(cr.tx, hash, number) } +func (cr Reader) CurrentFinalizedHeader() *types.Header { + hash := rawdb.ReadForkchoiceFinalized(cr.tx) + if hash == (common.Hash{}) { + return nil + } + + number := rawdb.ReadHeaderNumber(cr.tx, hash) + if number == nil { + return nil + } + + return rawdb.ReadHeader(cr.tx, hash, *number) +} +func (cr Reader) CurrentSafeHeader() *types.Header { + hash := rawdb.ReadForkchoiceSafe(cr.tx) + if hash == (common.Hash{}) { + return nil + } + + number := rawdb.ReadHeaderNumber(cr.tx, hash) + if number == nil { + return nil + } + + return rawdb.ReadHeader(cr.tx, hash, *number) +} func (cr Reader) GetHeaderByNumber(number uint64) *types.Header { if cr.blockReader != nil { h, _ := cr.blockReader.HeaderByNumber(context.Background(), cr.tx, number) diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 4550d65aaa6..ed96688de37 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -72,6 +72,7 @@ var LightClientGPO = gaspricecfg.Config{ var Defaults = Config{ Sync: Sync{ UseSnapshots: true, + HistoryV3: true, ExecWorkerCount: estimate.ReconstituteState.WorkersHalf(), //only half of CPU, other half will spend for snapshots build/merge/prune ReconWorkerCount: estimate.ReconstituteState.Workers(), BodyCacheLimit: 256 * 1024 * 1024, @@ -270,6 +271,9 @@ type Config struct { type Sync struct { UseSnapshots bool + // New DB and Snapshots format of history allows: parallel blocks execution, get state as of given transaction without executing whole block.", + HistoryV3 bool + // LoopThrottle sets a minimum time between staged loop iterations LoopThrottle time.Duration ExecWorkerCount int diff --git a/eth/gasprice/feehistory.go b/eth/gasprice/feehistory.go index 5a6c4d4c0e6..2e9923a2302 100644 --- a/eth/gasprice/feehistory.go +++ b/eth/gasprice/feehistory.go @@ -93,7 +93,7 @@ func (oracle *Oracle) processBlock(bf *blockFees, percentiles []float64) { return } if bf.block == nil || (bf.receipts == nil && len(bf.block.Transactions()) != 0) { - log.Error("[GasPriceOracle] Block or receipts are missing while reward percentiles are requested") + log.Error("Block or receipts are missing while reward percentiles are requested") return } @@ -205,7 +205,7 @@ func (oracle *Oracle) FeeHistory(ctx context.Context, blocks int, unresolvedLast return libcommon.Big0, nil, nil, nil, nil // returning with no data and no error means there are no retrievable blocks } if blocks > maxFeeHistory { - log.Warn("[GasPriceOracle] Sanitizing fee history length", "requested", blocks, "truncated", maxFeeHistory) + log.Warn("Sanitizing fee history length", "requested", blocks, "truncated", maxFeeHistory) blocks = maxFeeHistory } for i, p := range rewardPercentiles { diff --git a/eth/gasprice/gasprice.go b/eth/gasprice/gasprice.go index c30cb7fbb78..ad3bfd369d9 100644 --- a/eth/gasprice/gasprice.go +++ b/eth/gasprice/gasprice.go @@ -70,26 +70,26 @@ func NewOracle(backend OracleBackend, params gaspricecfg.Config, cache Cache) *O blocks := params.Blocks if blocks < 1 { blocks = 1 - log.Warn("[GasPriceOracle] Sanitizing invalid gasprice oracle sample blocks", "provided", params.Blocks, "updated", blocks) + log.Warn("Sanitizing invalid gasprice oracle sample blocks", "provided", params.Blocks, "updated", blocks) } percent := params.Percentile if percent < 0 { percent = 0 - log.Warn("[GasPriceOracle] Sanitizing invalid gasprice oracle sample percentile", "provided", params.Percentile, "updated", percent) + log.Warn("Sanitizing invalid gasprice oracle sample percentile", "provided", params.Percentile, "updated", percent) } if percent > 100 { percent = 100 - log.Warn("[GasPriceOracle] Sanitizing invalid gasprice oracle sample percentile", "provided", params.Percentile, "updated", percent) + log.Warn("Sanitizing invalid gasprice oracle sample percentile", "provided", params.Percentile, "updated", percent) } maxPrice := params.MaxPrice if maxPrice == nil || maxPrice.Int64() <= 0 { maxPrice = gaspricecfg.DefaultMaxPrice - log.Warn("[GasPriceOracle] Sanitizing invalid gasprice oracle price cap", "provided", params.MaxPrice, "updated", maxPrice) + log.Warn("Sanitizing invalid gasprice oracle price cap", "provided", params.MaxPrice, "updated", maxPrice) } ignorePrice := params.IgnorePrice if ignorePrice == nil || ignorePrice.Int64() < 0 { ignorePrice = gaspricecfg.DefaultIgnorePrice - log.Warn("[GasPriceOracle] Sanitizing invalid gasprice oracle ignore price", "provided", params.IgnorePrice, "updated", ignorePrice) + log.Warn("Sanitizing invalid gasprice oracle ignore price", "provided", params.IgnorePrice, "updated", ignorePrice) } setBorDefaultGpoIgnorePrice(backend.ChainConfig(), params) @@ -190,7 +190,7 @@ func (t *transactionsByGasPrice) Push(x interface{}) { // not just its contents. l, ok := x.(types.Transaction) if !ok { - log.Error("[GasPriceOracle] Type assertion failure", "err", "cannot get types.Transaction from interface") + log.Error("Type assertion failure", "err", "cannot get types.Transaction from interface") } t.txs = append(t.txs, l) } @@ -214,12 +214,12 @@ func (oracle *Oracle) getBlockPrices(ctx context.Context, blockNum uint64, limit ignoreUnder, overflow := uint256.FromBig(ingoreUnderBig) if overflow { err := errors.New("overflow in getBlockPrices, gasprice.go: ignoreUnder too large") - log.Error("[GasPriceOracle] getBlockPrices", "err", err) + log.Error("gasprice.go: getBlockPrices", "err", err) return err } block, err := oracle.backend.BlockByNumber(ctx, rpc.BlockNumber(blockNum)) if err != nil { - log.Error("[GasPriceOracle] getBlockPrices", "err", err) + log.Error("gasprice.go: getBlockPrices", "err", err) return err } @@ -237,7 +237,7 @@ func (oracle *Oracle) getBlockPrices(ctx context.Context, blockNum uint64, limit baseFee, overflow = uint256.FromBig(block.BaseFee()) if overflow { err := errors.New("overflow in getBlockPrices, gasprice.go: baseFee > 2^256-1") - log.Error("[GasPriceOracle] getBlockPrices", "err", err) + log.Error("gasprice.go: getBlockPrices", "err", err) return err } } @@ -287,7 +287,7 @@ func (s *sortingHeap) Pop() interface{} { // setBorDefaultGpoIgnorePrice enforces gpo IgnorePrice to be equal to BorDefaultGpoIgnorePrice (30gwei by default) func setBorDefaultGpoIgnorePrice(chainConfig *chain.Config, gasPriceConfig gaspricecfg.Config) { if chainConfig.Bor != nil && gasPriceConfig.IgnorePrice != gaspricecfg.BorDefaultGpoIgnorePrice { - log.Warn("[GasPriceOracle] Sanitizing invalid bor gasprice oracle ignore price", "provided", gasPriceConfig.IgnorePrice, "updated", gaspricecfg.BorDefaultGpoIgnorePrice) + log.Warn("Sanitizing invalid bor gasprice oracle ignore price", "provided", gasPriceConfig.IgnorePrice, "updated", gaspricecfg.BorDefaultGpoIgnorePrice) gasPriceConfig.IgnorePrice = gaspricecfg.BorDefaultGpoIgnorePrice } } diff --git a/eth/integrity/e3_ef_files.go b/eth/integrity/e3_ef_files.go index 216c31e4f5d..84e49c0a8fe 100644 --- a/eth/integrity/e3_ef_files.go +++ b/eth/integrity/e3_ef_files.go @@ -27,7 +27,7 @@ func E3EfFiles(ctx context.Context, chainDB kv.RwDB, agg *state.Aggregator) erro } defer tx.Rollback() - err = tx.(state.HasAggTx).AggTx().(*state.AggregatorRoTx).DebugEFAllValuesAreInRange(ctx, idx) + err = tx.(state.HasAggCtx).AggCtx().(*state.AggregatorRoTx).DebugEFAllValuesAreInRange(ctx, idx) if err != nil { return err } diff --git a/eth/integrity/e3_history_no_system_txs.go b/eth/integrity/e3_history_no_system_txs.go index 5f3739a1ffa..70906bb2030 100644 --- a/eth/integrity/e3_history_no_system_txs.go +++ b/eth/integrity/e3_history_no_system_txs.go @@ -38,7 +38,7 @@ func E3HistoryNoSystemTxs(ctx context.Context, chainDB kv.RwDB, agg *state.Aggre defer tx.Rollback() var minStep uint64 = math.MaxUint64 - keys, err := tx.(state.HasAggTx).AggTx().(*state.AggregatorRoTx).DomainRangeLatest(tx, kv.AccountsDomain, []byte{byte(j), byte(jj)}, []byte{byte(j), byte(jj + 1)}, -1) + keys, err := tx.(state.HasAggCtx).AggCtx().(*state.AggregatorRoTx).DomainRangeLatest(tx, kv.AccountsDomain, []byte{byte(j), byte(jj)}, []byte{byte(j), byte(jj + 1)}, -1) if err != nil { return err } diff --git a/eth/stagedsync/chain_reader.go b/eth/stagedsync/chain_reader.go index 072818304ed..1641cc2e604 100644 --- a/eth/stagedsync/chain_reader.go +++ b/eth/stagedsync/chain_reader.go @@ -36,6 +36,35 @@ func (cr ChainReader) CurrentHeader() *types.Header { return h } +// CurrentFinalizedHeader retrieves the current finalized header from the local chain. +func (cr ChainReader) CurrentFinalizedHeader() *types.Header { + hash := rawdb.ReadForkchoiceFinalized(cr.Db) + if hash == (libcommon.Hash{}) { + return nil + } + + number := rawdb.ReadHeaderNumber(cr.Db, hash) + if number == nil { + return nil + } + + return rawdb.ReadHeader(cr.Db, hash, *number) +} + +func (cr ChainReader) CurrentSafeHeader() *types.Header { + hash := rawdb.ReadForkchoiceSafe(cr.Db) + if hash == (libcommon.Hash{}) { + return nil + } + + number := rawdb.ReadHeaderNumber(cr.Db, hash) + if number == nil { + return nil + } + + return rawdb.ReadHeader(cr.Db, hash, *number) +} + // GetHeader retrieves a block header from the database by hash and number. func (cr ChainReader) GetHeader(hash libcommon.Hash, number uint64) *types.Header { h, _ := cr.BlockReader.Header(context.Background(), cr.Db, hash, number) diff --git a/eth/stagedsync/default_stages.go b/eth/stagedsync/default_stages.go index 9db7778f312..3b17c8110d2 100644 --- a/eth/stagedsync/default_stages.go +++ b/eth/stagedsync/default_stages.go @@ -6,6 +6,7 @@ import ( "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon-lib/common/dbg" + "github.com/ledgerwatch/erigon-lib/config3" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/wrap" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" @@ -163,7 +164,7 @@ func DefaultStages(ctx context.Context, { ID: stages.IntermediateHashes, Description: "Generate intermediate hashes and computing state root", - Disabled: true, + Disabled: bodies.historyV3 || config3.EnableHistoryV4InTest || dbg.StagesOnlyBlocks, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { if exec.chainConfig.IsOsaka(0) { _, err := SpawnVerkleTrie(s, u, txc.Tx, trieCfg, ctx, logger) @@ -186,7 +187,7 @@ func DefaultStages(ctx context.Context, ID: stages.CallTraces, Description: "Generate call traces index", DisabledDescription: "Work In Progress", - Disabled: true, + Disabled: bodies.historyV3 || dbg.StagesOnlyBlocks, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { return SpawnCallTraces(s, txc.Tx, callTraces, ctx, logger) }, @@ -200,7 +201,7 @@ func DefaultStages(ctx context.Context, { ID: stages.AccountHistoryIndex, Description: "Generate account history index", - Disabled: true, + Disabled: bodies.historyV3 || dbg.StagesOnlyBlocks, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { return SpawnAccountHistoryIndex(s, txc.Tx, history, ctx, logger) }, @@ -214,7 +215,7 @@ func DefaultStages(ctx context.Context, { ID: stages.StorageHistoryIndex, Description: "Generate storage history index", - Disabled: true, + Disabled: bodies.historyV3 || dbg.StagesOnlyBlocks, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { return SpawnStorageHistoryIndex(s, txc.Tx, history, ctx, logger) }, @@ -228,7 +229,7 @@ func DefaultStages(ctx context.Context, { ID: stages.LogIndex, Description: "Generate receipt logs index", - Disabled: true, + Disabled: bodies.historyV3 || dbg.StagesOnlyBlocks, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { return SpawnLogIndex(s, txc.Tx, logIndex, ctx, 0, logger) }, @@ -538,7 +539,7 @@ func UploaderPipelineStages(ctx context.Context, snapshots SnapshotsCfg, headers { ID: stages.HashState, Description: "Hash the key in the state", - Disabled: true, + Disabled: exec.historyV3 && config3.EnableHistoryV4InTest, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { return SpawnHashStateStage(s, txc.Tx, hashState, ctx, logger) }, @@ -552,7 +553,7 @@ func UploaderPipelineStages(ctx context.Context, snapshots SnapshotsCfg, headers { ID: stages.IntermediateHashes, Description: "Generate intermediate hashes and computing state root", - Disabled: true, + Disabled: exec.historyV3 && config3.EnableHistoryV4InTest, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { if exec.chainConfig.IsOsaka(0) { _, err := SpawnVerkleTrie(s, u, txc.Tx, trieCfg, ctx, logger) @@ -575,7 +576,7 @@ func UploaderPipelineStages(ctx context.Context, snapshots SnapshotsCfg, headers ID: stages.CallTraces, Description: "Generate call traces index", DisabledDescription: "Work In Progress", - Disabled: true, + Disabled: exec.historyV3, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { return SpawnCallTraces(s, txc.Tx, callTraces, ctx, logger) }, @@ -589,7 +590,7 @@ func UploaderPipelineStages(ctx context.Context, snapshots SnapshotsCfg, headers { ID: stages.AccountHistoryIndex, Description: "Generate account history index", - Disabled: true, + Disabled: exec.historyV3, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { return SpawnAccountHistoryIndex(s, txc.Tx, history, ctx, logger) }, @@ -713,7 +714,7 @@ func StateStages(ctx context.Context, headers HeadersCfg, bodies BodiesCfg, bloc { ID: stages.HashState, Description: "Hash the key in the state", - Disabled: true, + Disabled: bodies.historyV3, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { return SpawnHashStateStage(s, txc.Tx, hashState, ctx, logger) }, @@ -724,7 +725,7 @@ func StateStages(ctx context.Context, headers HeadersCfg, bodies BodiesCfg, bloc { ID: stages.IntermediateHashes, Description: "Generate intermediate hashes and computing state root", - Disabled: true, + Disabled: bodies.historyV3, Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, txc wrap.TxContainer, logger log.Logger) error { _, err := SpawnIntermediateHashesStage(s, u, txc.Tx, trieCfg, ctx, logger) return err diff --git a/eth/stagedsync/exec3.go b/eth/stagedsync/exec3.go index b5fc938faa1..15829ed1f29 100644 --- a/eth/stagedsync/exec3.go +++ b/eth/stagedsync/exec3.go @@ -28,6 +28,7 @@ import ( "github.com/ledgerwatch/erigon-lib/common/dir" "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" "github.com/ledgerwatch/erigon-lib/metrics" @@ -890,15 +891,20 @@ Loop: agg.BuildFilesInBackground(outputTxNum.Load()) } - aggCtx := agg.BeginFilesRo() - defer aggCtx.Close() - tt = time.Now() for haveMoreToPrune := true; haveMoreToPrune; { - //very aggressive prune, because: - // if prune is slow - means DB > RAM and skip pruning will only make things worse - // db will grow -> prune will get slower -> db will grow -> ... - if haveMoreToPrune, err = aggCtx.PruneSmallBatchesDb(ctx, 10*time.Minute, chainDb); err != nil { + if err := chainDb.Update(ctx, func(tx kv.RwTx) error { + //very aggressive prune, because: + // if prune is slow - means DB > RAM and skip pruning will only make things worse + // db will grow -> prune will get slower -> db will grow -> ... + if haveMoreToPrune, err = tx.(state2.HasAggCtx). + AggCtx().(*state2.AggregatorRoTx). + PruneSmallBatches(ctx, 10*time.Minute, tx); err != nil { + + return err + } + return nil + }); err != nil { return err } } @@ -980,11 +986,44 @@ Loop: // nolint func dumpPlainStateDebug(tx kv.RwTx, doms *state2.SharedDomains) { + blockNum, err := stages.GetStageProgress(tx, stages.Execution) + if err != nil { + panic(err) + } + histV3, err := kvcfg.HistoryV3.Enabled(tx) + if err != nil { + panic(err) + } + fmt.Printf("[dbg] plain state: %d\n", blockNum) + defer fmt.Printf("[dbg] plain state end\n") + + if !histV3 { + if err := tx.ForEach(kv.PlainState, nil, func(k, v []byte) error { + if len(k) == 20 { + a := accounts.NewAccount() + a.DecodeForStorage(v) + fmt.Printf("%x, %d, %d, %d, %x\n", k, &a.Balance, a.Nonce, a.Incarnation, a.CodeHash) + } + return nil + }); err != nil { + panic(err) + } + if err := tx.ForEach(kv.PlainState, nil, func(k, v []byte) error { + if len(k) > 20 { + fmt.Printf("%x, %x\n", k, v) + } + return nil + }); err != nil { + panic(err) + } + return + } + if doms != nil { doms.Flush(context.Background(), tx) } { - it, err := tx.(state2.HasAggTx).AggTx().(*state2.AggregatorRoTx).DomainRangeLatest(tx, kv.AccountsDomain, nil, nil, -1) + it, err := tx.(state2.HasAggCtx).AggCtx().(*state2.AggregatorRoTx).DomainRangeLatest(tx, kv.AccountsDomain, nil, nil, -1) if err != nil { panic(err) } @@ -999,7 +1038,7 @@ func dumpPlainStateDebug(tx kv.RwTx, doms *state2.SharedDomains) { } } { - it, err := tx.(state2.HasAggTx).AggTx().(*state2.AggregatorRoTx).DomainRangeLatest(tx, kv.StorageDomain, nil, nil, -1) + it, err := tx.(state2.HasAggCtx).AggCtx().(*state2.AggregatorRoTx).DomainRangeLatest(tx, kv.StorageDomain, nil, nil, -1) if err != nil { panic(1) } @@ -1012,7 +1051,7 @@ func dumpPlainStateDebug(tx kv.RwTx, doms *state2.SharedDomains) { } } { - it, err := tx.(state2.HasAggTx).AggTx().(*state2.AggregatorRoTx).DomainRangeLatest(tx, kv.CommitmentDomain, nil, nil, -1) + it, err := tx.(state2.HasAggCtx).AggCtx().(*state2.AggregatorRoTx).DomainRangeLatest(tx, kv.CommitmentDomain, nil, nil, -1) if err != nil { panic(1) } @@ -1093,7 +1132,7 @@ func flushAndCheckCommitmentV3(ctx context.Context, header *types.Header, applyT return false, nil } - unwindToLimit, err := applyTx.(state2.HasAggTx).AggTx().(*state2.AggregatorRoTx).CanUnwindDomainsToBlockNum(applyTx) + unwindToLimit, err := applyTx.(state2.HasAggCtx).AggCtx().(*state2.AggregatorRoTx).CanUnwindDomainsToBlockNum(applyTx) if err != nil { return false, err } @@ -1104,7 +1143,7 @@ func flushAndCheckCommitmentV3(ctx context.Context, header *types.Header, applyT unwindTo := maxBlockNum - jump // protect from too far unwind - allowedUnwindTo, ok, err := applyTx.(state2.HasAggTx).AggTx().(*state2.AggregatorRoTx).CanUnwindBeforeBlockNum(unwindTo, applyTx) + allowedUnwindTo, ok, err := applyTx.(state2.HasAggCtx).AggCtx().(*state2.AggregatorRoTx).CanUnwindBeforeBlockNum(unwindTo, applyTx) if err != nil { return false, err } diff --git a/eth/stagedsync/stage_bodies.go b/eth/stagedsync/stage_bodies.go index 076d9cdde94..f600e85086b 100644 --- a/eth/stagedsync/stage_bodies.go +++ b/eth/stagedsync/stage_bodies.go @@ -36,6 +36,7 @@ type BodiesCfg struct { chanConfig chain.Config blockReader services.FullBlockReader blockWriter *blockio.BlockWriter + historyV3 bool loopBreakCheck func(int) bool } @@ -44,12 +45,13 @@ func StageBodiesCfg(db kv.RwDB, bd *bodydownload.BodyDownload, blockPropagator adapter.BlockPropagator, timeout int, chanConfig chain.Config, blockReader services.FullBlockReader, + historyV3 bool, blockWriter *blockio.BlockWriter, loopBreakCheck func(int) bool) BodiesCfg { return BodiesCfg{ db: db, bd: bd, bodyReqSend: bodyReqSend, penalise: penalise, blockPropagator: blockPropagator, timeout: timeout, chanConfig: chanConfig, blockReader: blockReader, - blockWriter: blockWriter, loopBreakCheck: loopBreakCheck} + historyV3: historyV3, blockWriter: blockWriter, loopBreakCheck: loopBreakCheck} } // BodiesForward progresses Bodies stage in the forward direction @@ -245,7 +247,7 @@ func BodiesForward( if err != nil { return false, fmt.Errorf("WriteRawBodyIfNotExists: %w", err) } - if ok { + if cfg.historyV3 && ok { if err := rawdb.AppendCanonicalTxNums(tx, blockHeight); err != nil { return false, err } diff --git a/eth/stagedsync/stage_call_traces_test.go b/eth/stagedsync/stage_call_traces_test.go index 76333931d23..0de80a9a9e7 100644 --- a/eth/stagedsync/stage_call_traces_test.go +++ b/eth/stagedsync/stage_call_traces_test.go @@ -33,11 +33,12 @@ func genTestCallTraceSet(t *testing.T, tx kv.RwTx, to uint64) { } func TestCallTrace(t *testing.T) { - t.Skip("this stage is disabled in E3") - logger := log.New() ctx, require := context.Background(), require.New(t) - db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + histV3, db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + if histV3 { + t.Skip() + } tx, err := db.BeginRw(context.Background()) require.NoError(err) defer tx.Rollback() diff --git a/eth/stagedsync/stage_custom_trace.go b/eth/stagedsync/stage_custom_trace.go index a85064ad117..db739660161 100644 --- a/eth/stagedsync/stage_custom_trace.go +++ b/eth/stagedsync/stage_custom_trace.go @@ -111,7 +111,7 @@ func SpawnCustomTrace(s *StageState, txc wrap.TxContainer, cfg CustomTraceCfg, c // if err != nil { // return err // } - // lastTotal, ok, err := tx.HistorySeek(kv.GasUsedHistory, key, lastTxNum) + // lastTotal, ok, err := tx.HistoryGet(kv.GasUsedHistory, key, lastTxNum) // if err != nil { // return err // } diff --git a/eth/stagedsync/stage_execute.go b/eth/stagedsync/stage_execute.go index 6dc3fc44309..46ff154f08b 100644 --- a/eth/stagedsync/stage_execute.go +++ b/eth/stagedsync/stage_execute.go @@ -2,6 +2,7 @@ package stagedsync import ( "context" + "encoding/binary" "errors" "fmt" "os" @@ -19,19 +20,26 @@ import ( "github.com/ledgerwatch/erigon-lib/common/cmp" "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/common/dbg" + "github.com/ledgerwatch/erigon-lib/common/hexutility" + "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/common/metrics" "github.com/ledgerwatch/erigon-lib/diagnostics" + "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/dbutils" "github.com/ledgerwatch/erigon-lib/kv/membatch" "github.com/ledgerwatch/erigon-lib/kv/membatchwithdb" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" + "github.com/ledgerwatch/erigon-lib/kv/temporal/historyv2" libstate "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon-lib/wrap" + "github.com/ledgerwatch/erigon/common/changeset" + "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/core/vm" @@ -100,6 +108,7 @@ func StageExecuteBlocksCfg( stateStream bool, badBlockHalt bool, + historyV3 bool, dirs datadir.Dirs, blockReader services.FullBlockReader, hd headerDownloader, @@ -123,7 +132,7 @@ func StageExecuteBlocksCfg( blockReader: blockReader, hd: hd, genesis: genesis, - historyV3: true, + historyV3: historyV3, syncCfg: syncCfg, agg: agg, silkworm: silkworm, @@ -154,13 +163,15 @@ func executeBlock( return h } - getTracer := func(txIndex int, txHash common.Hash) (vm.EVMLogger, error) { - return tracelogger.NewStructLogger(&tracelogger.LogConfig{}), nil + getTracer := func(txIndex int, txHash common.Hash) (*tracing.Hooks, error) { + return tracelogger.NewStructLogger(&tracelogger.LogConfig{}).Hooks(), nil } callTracer := calltracer.NewCallTracer() vmConfig.Debug = true - vmConfig.Tracer = callTracer + if vmConfig.Tracer == nil { + vmConfig.Tracer = callTracer.Tracer().Hooks + } var receipts types.Receipts var stateSyncReceipt *types.Receipt @@ -335,9 +346,8 @@ func unwindExec3(u *UnwindState, s *StageState, txc wrap.TxContainer, ctx contex //if ok && bn != u.UnwindPoint { // return fmt.Errorf("commitment can unwind only to block: %d, requested: %d. UnwindTo was called with wrong value", bn, u.UnwindPoint) //} - start := time.Now() - unwindToLimit, err := txc.Tx.(libstate.HasAggTx).AggTx().(*libstate.AggregatorRoTx).CanUnwindDomainsToBlockNum(txc.Tx) + unwindToLimit, err := txc.Tx.(libstate.HasAggCtx).AggCtx().(*libstate.AggregatorRoTx).CanUnwindDomainsToBlockNum(txc.Tx) if err != nil { return err } @@ -374,8 +384,8 @@ func unwindExec3(u *UnwindState, s *StageState, txc wrap.TxContainer, ctx contex if err := rawdb.DeleteNewerEpochs(txc.Tx, u.UnwindPoint+1); err != nil { return fmt.Errorf("delete newer epochs: %w", err) } - fmt.Printf("unwindv3: %d -> %d done within %s\n", s.BlockNumber, u.UnwindPoint, time.Since(start)) - return nil + + return domains.Flush(ctx, txc.Tx) } func senderStageProgress(tx kv.Tx, db kv.RoDB) (prevStageProgress uint64, err error) { @@ -821,6 +831,10 @@ func UnwindExecutionStage(u *UnwindState, s *StageState, txc wrap.TxContainer, c } func unwindExecutionStage(u *UnwindState, s *StageState, txc wrap.TxContainer, ctx context.Context, cfg ExecuteBlockCfg, initialCycle bool, logger log.Logger) error { + logPrefix := s.LogPrefix() + stateBucket := kv.PlainState + storageKeyLength := length.Addr + length.Incarnation + length.Hash + var accumulator *shards.Accumulator if cfg.stateStream && s.BlockNumber-u.UnwindPoint < stateStreamLimit { accumulator = cfg.accumulator @@ -837,7 +851,121 @@ func unwindExecutionStage(u *UnwindState, s *StageState, txc wrap.TxContainer, c } //TODO: why we don't call accumulator.ChangeCode??? - return unwindExec3(u, s, txc, ctx, accumulator, logger) + if cfg.historyV3 { + return unwindExec3(u, s, txc, ctx, accumulator, logger) + } + + changes := etl.NewCollector(logPrefix, cfg.dirs.Tmp, etl.NewOldestEntryBuffer(etl.BufferOptimalSize), logger) + defer changes.Close() + errRewind := changeset.RewindData(txc.Tx, s.BlockNumber, u.UnwindPoint, changes, ctx.Done()) + if errRewind != nil { + return fmt.Errorf("getting rewind data: %w", errRewind) + } + + if err := changes.Load(txc.Tx, stateBucket, func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { + if len(k) == 20 { + if len(v) > 0 { + var acc accounts.Account + if err := acc.DecodeForStorage(v); err != nil { + return err + } + + // Fetch the code hash + recoverCodeHashPlain(&acc, txc.Tx, k) + var address common.Address + copy(address[:], k) + + // cleanup contract code bucket + original, err := state.NewPlainStateReader(txc.Tx).ReadAccountData(address) + if err != nil { + return fmt.Errorf("read account for %x: %w", address, err) + } + if original != nil { + // clean up all the code incarnations original incarnation and the new one + for incarnation := original.Incarnation; incarnation > acc.Incarnation && incarnation > 0; incarnation-- { + err = txc.Tx.Delete(kv.PlainContractCode, dbutils.PlainGenerateStoragePrefix(address[:], incarnation)) + if err != nil { + return fmt.Errorf("writeAccountPlain for %x: %w", address, err) + } + } + } + + newV := make([]byte, acc.EncodingLengthForStorage()) + acc.EncodeForStorage(newV) + if accumulator != nil { + accumulator.ChangeAccount(address, acc.Incarnation, newV) + } + if err := next(k, k, newV); err != nil { + return err + } + } else { + if accumulator != nil { + var address common.Address + copy(address[:], k) + accumulator.DeleteAccount(address) + } + if err := next(k, k, nil); err != nil { + return err + } + } + return nil + } + if accumulator != nil { + var address common.Address + var incarnation uint64 + var location common.Hash + copy(address[:], k[:length.Addr]) + incarnation = binary.BigEndian.Uint64(k[length.Addr:]) + copy(location[:], k[length.Addr+length.Incarnation:]) + logger.Debug(fmt.Sprintf("un ch st: %x, %d, %x, %x\n", address, incarnation, location, common.Copy(v))) + accumulator.ChangeStorage(address, incarnation, location, common.Copy(v)) + } + if len(v) > 0 { + if err := next(k, k[:storageKeyLength], v); err != nil { + return err + } + } else { + if err := next(k, k[:storageKeyLength], nil); err != nil { + return err + } + } + return nil + + }, etl.TransformArgs{Quit: ctx.Done()}); err != nil { + return err + } + + if err := historyv2.Truncate(txc.Tx, u.UnwindPoint+1); err != nil { + return err + } + + if err := rawdb.TruncateReceipts(txc.Tx, u.UnwindPoint+1); err != nil { + return fmt.Errorf("truncate receipts: %w", err) + } + if err := rawdb.TruncateBorReceipts(txc.Tx, u.UnwindPoint+1); err != nil { + return fmt.Errorf("truncate bor receipts: %w", err) + } + if err := rawdb.DeleteNewerEpochs(txc.Tx, u.UnwindPoint+1); err != nil { + return fmt.Errorf("delete newer epochs: %w", err) + } + + // Truncate CallTraceSet + keyStart := hexutility.EncodeTs(u.UnwindPoint + 1) + c, err := txc.Tx.RwCursorDupSort(kv.CallTraceSet) + if err != nil { + return err + } + defer c.Close() + for k, _, err := c.Seek(keyStart); k != nil; k, _, err = c.NextNoDup() { + if err != nil { + return err + } + if err = txc.Tx.Delete(kv.CallTraceSet, k); err != nil { + return err + } + } + + return nil } func recoverCodeHashPlain(acc *accounts.Account, db kv.Tx, key []byte) { @@ -851,6 +979,7 @@ func recoverCodeHashPlain(acc *accounts.Account, db kv.Tx, key []byte) { } func PruneExecutionStage(s *PruneState, tx kv.RwTx, cfg ExecuteBlockCfg, ctx context.Context, initialCycle bool) (err error) { + logPrefix := s.LogPrefix() useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) @@ -863,12 +992,42 @@ func PruneExecutionStage(s *PruneState, tx kv.RwTx, cfg ExecuteBlockCfg, ctx con logEvery := time.NewTicker(logInterval) defer logEvery.Stop() - pruneTimeout := 3 * time.Second - if initialCycle { - pruneTimeout = 12 * time.Hour - } - if _, err = tx.(*temporal.Tx).AggTx().(*libstate.AggregatorRoTx).PruneSmallBatches(ctx, pruneTimeout, tx); err != nil { // prune part of retired data, before commit - return err + if cfg.historyV3 { + pruneTimeout := 3 * time.Second + if initialCycle { + pruneTimeout = 12 * time.Hour + } + if _, err = tx.(*temporal.Tx).AggCtx().(*libstate.AggregatorRoTx).PruneSmallBatches(ctx, pruneTimeout, tx); err != nil { // prune part of retired data, before commit + return err + } + } else { + if cfg.prune.History.Enabled() { + if err = rawdb.PruneTableDupSort(tx, kv.AccountChangeSet, logPrefix, cfg.prune.History.PruneTo(s.ForwardProgress), logEvery, ctx); err != nil { + return err + } + if err = rawdb.PruneTableDupSort(tx, kv.StorageChangeSet, logPrefix, cfg.prune.History.PruneTo(s.ForwardProgress), logEvery, ctx); err != nil { + return err + } + } + + if cfg.prune.Receipts.Enabled() { + if err = rawdb.PruneTable(tx, kv.Receipts, cfg.prune.Receipts.PruneTo(s.ForwardProgress), ctx, math.MaxInt32); err != nil { + return err + } + if err = rawdb.PruneTable(tx, kv.BorReceipts, cfg.prune.Receipts.PruneTo(s.ForwardProgress), ctx, math.MaxUint32); err != nil { + return err + } + // EDIT: Don't prune yet, let LogIndex stage take care of it + // LogIndex.Prune will read everything what not pruned here + // if err = rawdb.PruneTable(tx, kv.Log, cfg.prune.Receipts.PruneTo(s.ForwardProgress), ctx, math.MaxInt32); err != nil { + // return err + // } + } + if cfg.prune.CallTraces.Enabled() { + if err = rawdb.PruneTableDupSort(tx, kv.CallTraceSet, logPrefix, cfg.prune.CallTraces.PruneTo(s.ForwardProgress), logEvery, ctx); err != nil { + return err + } + } } if err = s.Done(tx); err != nil { diff --git a/eth/stagedsync/stage_hashstate.go b/eth/stagedsync/stage_hashstate.go index 091e74bfcab..6eefc047807 100644 --- a/eth/stagedsync/stage_hashstate.go +++ b/eth/stagedsync/stage_hashstate.go @@ -33,12 +33,15 @@ import ( type HashStateCfg struct { db kv.RwDB dirs datadir.Dirs + + historyV3 bool } -func StageHashStateCfg(db kv.RwDB, dirs datadir.Dirs) HashStateCfg { +func StageHashStateCfg(db kv.RwDB, dirs datadir.Dirs, historyV3 bool) HashStateCfg { return HashStateCfg{ - db: db, - dirs: dirs, + db: db, + dirs: dirs, + historyV3: historyV3, } } @@ -123,13 +126,25 @@ func unwindHashStateStageImpl(logPrefix string, u *UnwindState, s *StageState, t // Currently it does not require unwinding because it does not create any Intermediate Hash records // and recomputes the state root from scratch prom := NewPromoter(tx, cfg.dirs, ctx, logger) - if err := prom.UnwindOnHistoryV3(logPrefix, s.BlockNumber, u.UnwindPoint, false, true); err != nil { + if cfg.historyV3 { + if err := prom.UnwindOnHistoryV3(logPrefix, s.BlockNumber, u.UnwindPoint, false, true); err != nil { + return err + } + if err := prom.UnwindOnHistoryV3(logPrefix, s.BlockNumber, u.UnwindPoint, false, false); err != nil { + return err + } + if err := prom.UnwindOnHistoryV3(logPrefix, s.BlockNumber, u.UnwindPoint, true, false); err != nil { + return err + } + return nil + } + if err := prom.Unwind(logPrefix, s, u, false /* storage */, true /* codes */); err != nil { return err } - if err := prom.UnwindOnHistoryV3(logPrefix, s.BlockNumber, u.UnwindPoint, false, false); err != nil { + if err := prom.Unwind(logPrefix, s, u, false /* storage */, false /* codes */); err != nil { return err } - if err := prom.UnwindOnHistoryV3(logPrefix, s.BlockNumber, u.UnwindPoint, true, false); err != nil { + if err := prom.Unwind(logPrefix, s, u, true /* storage */, false /* codes */); err != nil { return err } return nil @@ -886,10 +901,23 @@ func (p *Promoter) Unwind(logPrefix string, s *StageState, u *UnwindState, stora func promoteHashedStateIncrementally(logPrefix string, from, to uint64, tx kv.RwTx, cfg HashStateCfg, ctx context.Context, logger log.Logger) error { prom := NewPromoter(tx, cfg.dirs, ctx, logger) - if err := prom.PromoteOnHistoryV3(logPrefix, from, to, false); err != nil { + if cfg.historyV3 { + if err := prom.PromoteOnHistoryV3(logPrefix, from, to, false); err != nil { + return err + } + if err := prom.PromoteOnHistoryV3(logPrefix, from, to, true); err != nil { + return err + } + return nil + } + + if err := prom.Promote(logPrefix, from, to, false, true); err != nil { + return err + } + if err := prom.Promote(logPrefix, from, to, false, false); err != nil { return err } - if err := prom.PromoteOnHistoryV3(logPrefix, from, to, true); err != nil { + if err := prom.Promote(logPrefix, from, to, true, false); err != nil { return err } return nil diff --git a/eth/stagedsync/stage_hashstate_test.go b/eth/stagedsync/stage_hashstate_test.go index 35eb5a412b4..ec037d7c196 100644 --- a/eth/stagedsync/stage_hashstate_test.go +++ b/eth/stagedsync/stage_hashstate_test.go @@ -21,13 +21,14 @@ func TestPromoteHashedStateClearState(t *testing.T) { } logger := log.New() dirs := datadir.New(t.TempDir()) + historyV3 := false _, tx1 := memdb.NewTestTx(t) db2, tx2 := memdb.NewTestTx(t) generateBlocks(t, 1, 50, hashedWriterGen(tx1), changeCodeWithIncarnations) generateBlocks(t, 1, 50, plainWriterGen(tx2), changeCodeWithIncarnations) - err := PromoteHashedStateCleanly("logPrefix", tx2, StageHashStateCfg(db2, dirs), context.Background(), logger) + err := PromoteHashedStateCleanly("logPrefix", tx2, StageHashStateCfg(db2, dirs, historyV3), context.Background(), logger) if err != nil { t.Errorf("error while promoting state: %v", err) } @@ -41,13 +42,14 @@ func TestPromoteHashedStateIncremental(t *testing.T) { } logger := log.New() dirs := datadir.New(t.TempDir()) + historyV3 := false _, tx1 := memdb.NewTestTx(t) db2, tx2 := memdb.NewTestTx(t) generateBlocks(t, 1, 50, hashedWriterGen(tx1), changeCodeWithIncarnations) generateBlocks(t, 1, 50, plainWriterGen(tx2), changeCodeWithIncarnations) - cfg := StageHashStateCfg(db2, dirs) + cfg := StageHashStateCfg(db2, dirs, historyV3) err := PromoteHashedStateCleanly("logPrefix", tx2, cfg, context.Background(), logger) if err != nil { t.Errorf("error while promoting state: %v", err) @@ -70,6 +72,7 @@ func TestPromoteHashedStateIncrementalMixed(t *testing.T) { } logger := log.New() dirs := datadir.New(t.TempDir()) + historyV3 := false _, tx1 := memdb.NewTestTx(t) db2, tx2 := memdb.NewTestTx(t) @@ -77,7 +80,7 @@ func TestPromoteHashedStateIncrementalMixed(t *testing.T) { generateBlocks(t, 1, 50, hashedWriterGen(tx2), changeCodeWithIncarnations) generateBlocks(t, 51, 50, plainWriterGen(tx2), changeCodeWithIncarnations) - err := promoteHashedStateIncrementally("logPrefix", 50, 101, tx2, StageHashStateCfg(db2, dirs), context.Background(), logger) + err := promoteHashedStateIncrementally("logPrefix", 50, 101, tx2, StageHashStateCfg(db2, dirs, historyV3), context.Background(), logger) if err != nil { t.Errorf("error while promoting state: %v", err) } @@ -90,19 +93,20 @@ func TestUnwindHashed(t *testing.T) { } logger := log.New() dirs := datadir.New(t.TempDir()) + historyV3 := false _, tx1 := memdb.NewTestTx(t) db2, tx2 := memdb.NewTestTx(t) generateBlocks(t, 1, 50, hashedWriterGen(tx1), changeCodeWithIncarnations) generateBlocks(t, 1, 50, plainWriterGen(tx2), changeCodeWithIncarnations) - err := PromoteHashedStateCleanly("logPrefix", tx2, StageHashStateCfg(db2, dirs), context.Background(), logger) + err := PromoteHashedStateCleanly("logPrefix", tx2, StageHashStateCfg(db2, dirs, historyV3), context.Background(), logger) if err != nil { t.Errorf("error while promoting state: %v", err) } u := &UnwindState{UnwindPoint: 50} s := &StageState{BlockNumber: 100} - err = unwindHashStateStageImpl("logPrefix", u, s, tx2, StageHashStateCfg(db2, dirs), context.Background(), logger) + err = unwindHashStateStageImpl("logPrefix", u, s, tx2, StageHashStateCfg(db2, dirs, historyV3), context.Background(), logger) if err != nil { t.Errorf("error while unwind state: %v", err) } @@ -114,6 +118,7 @@ func TestPromoteIncrementallyShutdown(t *testing.T) { if config3.EnableHistoryV4InTest { t.Skip("e3: doesn't have this stage") } + historyV3 := false tt := []struct { name string @@ -135,7 +140,7 @@ func TestPromoteIncrementallyShutdown(t *testing.T) { } db, tx := memdb.NewTestTx(t) generateBlocks(t, 1, 10, plainWriterGen(tx), changeCodeWithIncarnations) - if err := promoteHashedStateIncrementally("logPrefix", 1, 10, tx, StageHashStateCfg(db, dirs), ctx, log.New()); !errors.Is(err, tc.errExp) { + if err := promoteHashedStateIncrementally("logPrefix", 1, 10, tx, StageHashStateCfg(db, dirs, historyV3), ctx, log.New()); !errors.Is(err, tc.errExp) { t.Errorf("error does not match expected error while shutdown promoteHashedStateIncrementally, got: %v, expected: %v", err, tc.errExp) } }) @@ -149,6 +154,7 @@ func TestPromoteHashedStateCleanlyShutdown(t *testing.T) { t.Skip("e3: doesn't have this stage") } logger := log.New() + historyV3 := false tt := []struct { name string @@ -174,7 +180,7 @@ func TestPromoteHashedStateCleanlyShutdown(t *testing.T) { generateBlocks(t, 1, 10, plainWriterGen(tx), changeCodeWithIncarnations) - if err := PromoteHashedStateCleanly("logPrefix", tx, StageHashStateCfg(db, dirs), ctx, logger); !errors.Is(err, tc.errExp) { + if err := PromoteHashedStateCleanly("logPrefix", tx, StageHashStateCfg(db, dirs, historyV3), ctx, logger); !errors.Is(err, tc.errExp) { t.Errorf("error does not match expected error while shutdown promoteHashedStateCleanly , got: %v, expected: %v", err, tc.errExp) } @@ -187,6 +193,7 @@ func TestUnwindHashStateShutdown(t *testing.T) { t.Skip("e3: doesn't have this stage") } logger := log.New() + historyV3 := false tt := []struct { name string cancelFuncExec bool @@ -210,7 +217,7 @@ func TestUnwindHashStateShutdown(t *testing.T) { db, tx := memdb.NewTestTx(t) generateBlocks(t, 1, 10, plainWriterGen(tx), changeCodeWithIncarnations) - cfg := StageHashStateCfg(db, dirs) + cfg := StageHashStateCfg(db, dirs, historyV3) err := PromoteHashedStateCleanly("logPrefix", tx, cfg, ctx, logger) if tc.cancelFuncExec { require.ErrorIs(t, err, context.Canceled) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 970bc698965..54079d67223 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -47,6 +47,7 @@ type HeadersCfg struct { batchSize datasize.ByteSize noP2PDiscovery bool tmpdir string + historyV3 bool blockReader services.FullBlockReader blockWriter *blockio.BlockWriter @@ -70,6 +71,7 @@ func StageHeadersCfg( blockReader services.FullBlockReader, blockWriter *blockio.BlockWriter, tmpdir string, + historyV3 bool, notifications *shards.Notifications, loopBreakCheck func(int) bool) HeadersCfg { return HeadersCfg{ @@ -83,6 +85,7 @@ func StageHeadersCfg( penalize: penalize, batchSize: batchSize, tmpdir: tmpdir, + historyV3: historyV3, noP2PDiscovery: noP2PDiscovery, blockReader: blockReader, blockWriter: blockWriter, @@ -324,24 +327,29 @@ Loop: timer.Stop() } if headerInserter.Unwind() { - unwindTo := headerInserter.UnwindPoint() - doms, err := state.NewSharedDomains(tx, logger) //TODO: if remove this line TestBlockchainHeaderchainReorgConsistency failing - if err != nil { - return err - } - defer doms.Close() + if cfg.historyV3 { + unwindTo := headerInserter.UnwindPoint() + doms, err := state.NewSharedDomains(tx, logger) //TODO: if remove this line TestBlockchainHeaderchainReorgConsistency failing + if err != nil { + return err + } + defer doms.Close() - allowedUnwindTo, ok, err := tx.(state.HasAggTx).AggTx().(*state.AggregatorRoTx).CanUnwindBeforeBlockNum(unwindTo, tx) - if err != nil { - return err - } - if !ok { - return fmt.Errorf("too far unwind. requested=%d, minAllowed=%d", unwindTo, allowedUnwindTo) - } - if err := u.UnwindTo(allowedUnwindTo, StagedUnwind, tx); err != nil { - return err + allowedUnwindTo, ok, err := tx.(state.HasAggCtx).AggCtx().(*state.AggregatorRoTx).CanUnwindBeforeBlockNum(unwindTo, tx) + if err != nil { + return err + } + if !ok { + return fmt.Errorf("too far unwind. requested=%d, minAllowed=%d", unwindTo, allowedUnwindTo) + } + if err := u.UnwindTo(allowedUnwindTo, StagedUnwind, tx); err != nil { + return err + } + } else { + if err := u.UnwindTo(headerInserter.UnwindPoint(), StagedUnwind, tx); err != nil { + return err + } } - } if headerInserter.GetHighest() != 0 { if !headerInserter.Unwind() { @@ -589,6 +597,32 @@ func NewChainReaderImpl(config *chain.Config, tx kv.Tx, blockReader services.Ful func (cr ChainReaderImpl) Config() *chain.Config { return cr.config } func (cr ChainReaderImpl) CurrentHeader() *types.Header { panic("") } +func (cr ChainReaderImpl) CurrentFinalizedHeader() *types.Header { + hash := rawdb.ReadForkchoiceFinalized(cr.tx) + if hash == (libcommon.Hash{}) { + return nil + } + + number := rawdb.ReadHeaderNumber(cr.tx, hash) + if number == nil { + return nil + } + + return rawdb.ReadHeader(cr.tx, hash, *number) +} +func (cr ChainReaderImpl) CurrentSafeHeader() *types.Header { + hash := rawdb.ReadForkchoiceSafe(cr.tx) + if hash == (libcommon.Hash{}) { + return nil + } + + number := rawdb.ReadHeaderNumber(cr.tx, hash) + if number == nil { + return nil + } + + return rawdb.ReadHeader(cr.tx, hash, *number) +} func (cr ChainReaderImpl) GetHeader(hash libcommon.Hash, number uint64) *types.Header { if cr.blockReader != nil { h, _ := cr.blockReader.Header(context.Background(), cr.tx, hash, number) diff --git a/eth/stagedsync/stage_mining_create_block.go b/eth/stagedsync/stage_mining_create_block.go index ea1a5b3e032..a2edf156ded 100644 --- a/eth/stagedsync/stage_mining_create_block.go +++ b/eth/stagedsync/stage_mining_create_block.go @@ -32,7 +32,6 @@ type MiningBlock struct { Receipts types.Receipts Withdrawals []*types.Withdrawal PreparedTxs types.TransactionsStream - Requests []*types.Request } type MiningState struct { diff --git a/eth/stagedsync/stage_mining_exec.go b/eth/stagedsync/stage_mining_exec.go index f6a21970149..0d474e6ad1a 100644 --- a/eth/stagedsync/stage_mining_exec.go +++ b/eth/stagedsync/stage_mining_exec.go @@ -20,6 +20,7 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/metrics" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" types2 "github.com/ledgerwatch/erigon-lib/types" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/core" @@ -87,11 +88,16 @@ func SpawnMiningExecStage(s *StageState, txc wrap.TxContainer, cfg MiningExecCfg txs := current.PreparedTxs noempty := true + histV3, _ := kvcfg.HistoryV3.Enabled(txc.Tx) var domains *state2.SharedDomains var ( stateReader state.StateReader ) - stateReader = state.NewReaderV4(txc.Doms) + if histV3 { + stateReader = state.NewReaderV4(txc.Doms) + } else { + stateReader = state.NewPlainStateReader(txc.Tx) + } ibs := state.New(stateReader) // Create an empty block based on temporary copied state for @@ -121,15 +127,19 @@ func SpawnMiningExecStage(s *StageState, txc wrap.TxContainer, cfg MiningExecCfg m := membatchwithdb.NewMemoryBatch(txc.Tx, cfg.tmpdir, logger) defer m.Rollback() - var err error - domains, err = state2.NewSharedDomains(m, logger) - if err != nil { - return err + if histV3 { + var err error + domains, err = state2.NewSharedDomains(m, logger) + if err != nil { + return err + } + defer domains.Close() + simStateReader = state.NewReaderV4(domains) + simStateWriter = state.NewWriterV4(domains) + } else { + simStateReader = state.NewPlainStateReader(m) + simStateWriter = state.NewPlainStateWriterNoHistory(m) } - defer domains.Close() - simStateReader = state.NewReaderV4(domains) - simStateWriter = state.NewWriterV4(domains) - executionAt, err := s.ExecutionAt(txc.Tx) if err != nil { return err @@ -176,12 +186,12 @@ func SpawnMiningExecStage(s *StageState, txc wrap.TxContainer, cfg MiningExecCfg } var err error - _, current.Txs, current.Receipts, err = core.FinalizeBlockExecution(cfg.engine, stateReader, current.Header, current.Txs, current.Uncles, &state.NoopWriter{}, &cfg.chainConfig, ibs, current.Receipts, current.Withdrawals, current.Requests, ChainReaderImpl{config: &cfg.chainConfig, tx: txc.Tx, blockReader: cfg.blockReader, logger: logger}, true, logger) + _, current.Txs, current.Receipts, err = core.FinalizeBlockExecution(cfg.engine, stateReader, current.Header, current.Txs, current.Uncles, &state.NoopWriter{}, &cfg.chainConfig, ibs, current.Receipts, current.Withdrawals, ChainReaderImpl{config: &cfg.chainConfig, tx: txc.Tx, blockReader: cfg.blockReader, logger: logger}, true, logger) if err != nil { return err } - block := types.NewBlock(current.Header, current.Txs, current.Uncles, current.Receipts, current.Withdrawals, current.Requests) + block := types.NewBlock(current.Header, current.Txs, current.Uncles, current.Receipts, current.Withdrawals) // Simulate the block execution to get the final state root if err := rawdb.WriteHeader(txc.Tx, block.Header()); err != nil { return fmt.Errorf("cannot write header: %s", err) @@ -197,8 +207,10 @@ func SpawnMiningExecStage(s *StageState, txc wrap.TxContainer, cfg MiningExecCfg if _, err = rawdb.WriteRawBodyIfNotExists(txc.Tx, block.Hash(), blockHeight, block.RawBody()); err != nil { return fmt.Errorf("cannot write body: %s", err) } - if err := rawdb.AppendCanonicalTxNums(txc.Tx, blockHeight); err != nil { - return err + if histV3 { + if err := rawdb.AppendCanonicalTxNums(txc.Tx, blockHeight); err != nil { + return err + } } if err := stages.SaveStageProgress(txc.Tx, kv.Headers, blockHeight); err != nil { return err diff --git a/eth/stagedsync/stage_mining_finish.go b/eth/stagedsync/stage_mining_finish.go index 408a7990e71..d3d36dfbab6 100644 --- a/eth/stagedsync/stage_mining_finish.go +++ b/eth/stagedsync/stage_mining_finish.go @@ -52,7 +52,7 @@ func SpawnMiningFinishStage(s *StageState, tx kv.RwTx, cfg MiningFinishCfg, quit // continue //} - block := types.NewBlock(current.Header, current.Txs, current.Uncles, current.Receipts, current.Withdrawals, current.Requests) + block := types.NewBlock(current.Header, current.Txs, current.Uncles, current.Receipts, current.Withdrawals) blockWithReceipts := &types.BlockWithReceipts{Block: block, Receipts: current.Receipts} *current = MiningBlock{} // hack to clean global data diff --git a/eth/stagedsync/stage_snapshots.go b/eth/stagedsync/stage_snapshots.go index d489c977ab2..1304d93e738 100644 --- a/eth/stagedsync/stage_snapshots.go +++ b/eth/stagedsync/stage_snapshots.go @@ -35,6 +35,7 @@ import ( "github.com/ledgerwatch/erigon-lib/etl" protodownloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloaderproto" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon/core/rawdb" @@ -43,7 +44,6 @@ import ( "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/ethconfig/estimate" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb/prune" borsnaptype "github.com/ledgerwatch/erigon/polygon/bor/snaptype" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/services" @@ -63,13 +63,13 @@ type SnapshotsCfg struct { blockReader services.FullBlockReader notifier *shards.Notifications + historyV3 bool caplin bool blobs bool agg *state.Aggregator silkworm *silkworm.Silkworm snapshotUploader *snapshotUploader syncConfig ethconfig.Sync - prune prune.Mode } func StageSnapshotsCfg(db kv.RwDB, @@ -80,11 +80,11 @@ func StageSnapshotsCfg(db kv.RwDB, snapshotDownloader protodownloader.DownloaderClient, blockReader services.FullBlockReader, notifier *shards.Notifications, + historyV3 bool, agg *state.Aggregator, caplin bool, blobs bool, silkworm *silkworm.Silkworm, - prune prune.Mode, ) SnapshotsCfg { cfg := SnapshotsCfg{ db: db, @@ -94,12 +94,12 @@ func StageSnapshotsCfg(db kv.RwDB, snapshotDownloader: snapshotDownloader, blockReader: blockReader, notifier: notifier, + historyV3: historyV3, caplin: caplin, agg: agg, silkworm: silkworm, syncConfig: syncConfig, blobs: blobs, - prune: prune, } if uploadFs := cfg.syncConfig.UploadLocation; len(uploadFs) > 0 { @@ -154,6 +154,7 @@ func SpawnStageSnapshots( } defer tx.Rollback() } + if err := DownloadAndIndexSnapshotsIfNeed(s, ctx, tx, cfg, initialCycle, logger); err != nil { return err } @@ -233,16 +234,7 @@ func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.R } } } else { - - // Download only the snapshots that are for the header chain. - if err := snapshotsync.WaitForDownloader(ctx, s.LogPrefix() /*headerChain=*/, true, cfg.blobs, cfg.prune, cstate, cfg.agg, tx, cfg.blockReader, &cfg.chainConfig, cfg.snapshotDownloader, s.state.StagesIdsList()); err != nil { - return err - } - if err := cfg.blockReader.Snapshots().ReopenSegments([]snaptype.Type{coresnaptype.Headers, coresnaptype.Bodies}, true); err != nil { - return err - } - - if err := snapshotsync.WaitForDownloader(ctx, s.LogPrefix() /*headerChain=*/, false, cfg.blobs, cfg.prune, cstate, cfg.agg, tx, cfg.blockReader, &cfg.chainConfig, cfg.snapshotDownloader, s.state.StagesIdsList()); err != nil { + if err := snapshotsync.WaitForDownloader(ctx, s.LogPrefix(), cfg.historyV3, cfg.blobs, cstate, cfg.agg, tx, cfg.blockReader, &cfg.chainConfig, cfg.snapshotDownloader, s.state.StagesIdsList()); err != nil { return err } } @@ -262,19 +254,21 @@ func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.R } } - indexWorkers := estimate.IndexSnapshot.Workers() - if err := cfg.agg.BuildOptionalMissedIndices(ctx, indexWorkers); err != nil { - return err - } - if err := cfg.agg.BuildMissedIndices(ctx, indexWorkers); err != nil { - return err - } - if cfg.notifier.Events != nil { - cfg.notifier.Events.OnNewSnapshot() - } + if cfg.historyV3 { + indexWorkers := estimate.IndexSnapshot.Workers() + if err := cfg.agg.BuildOptionalMissedIndices(ctx, indexWorkers); err != nil { + return err + } + if err := cfg.agg.BuildMissedIndices(ctx, indexWorkers); err != nil { + return err + } + if cfg.notifier.Events != nil { + cfg.notifier.Events.OnNewSnapshot() + } - if casted, ok := tx.(*temporal.Tx); ok { - casted.ForceReopenAggCtx() // otherwise next stages will not see just-indexed-files + if casted, ok := tx.(*temporal.Tx); ok { + casted.ForceReopenAggCtx() // otherwise next stages will not see just-indexed-files + } } frozenBlocks := cfg.blockReader.FrozenBlocks() @@ -294,7 +288,7 @@ func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.R { cfg.blockReader.Snapshots().LogStat("download") - tx.(state.HasAggTx).AggTx().(*state.AggregatorRoTx).LogStats(tx, func(endTxNumMinimax uint64) uint64 { + tx.(state.HasAggCtx).AggCtx().(*state.AggregatorRoTx).LogStats(tx, func(endTxNumMinimax uint64) uint64 { _, histBlockNumProgress, _ := rawdbv3.TxNums.FindBlockNum(tx, endTxNumMinimax) return histBlockNumProgress }) @@ -377,37 +371,40 @@ func FillDBFromSnapshots(logPrefix string, ctx context.Context, tx kv.RwTx, dirs return err } + historyV3, err := kvcfg.HistoryV3.Enabled(tx) if err != nil { return err } - _ = tx.ClearBucket(kv.MaxTxNum) - if err := blockReader.IterateFrozenBodies(func(blockNum, baseTxNum, txAmount uint64) error { - select { - case <-ctx.Done(): - return ctx.Err() - case <-logEvery.C: - logger.Info(fmt.Sprintf("[%s] MaxTxNums index: %dk/%dk", logPrefix, blockNum/1000, blockReader.FrozenBlocks()/1000)) - default: - } - if baseTxNum+txAmount == 0 { - panic(baseTxNum + txAmount) //uint-underflow - } - maxTxNum := baseTxNum + txAmount - 1 + if historyV3 { + _ = tx.ClearBucket(kv.MaxTxNum) + if err := blockReader.IterateFrozenBodies(func(blockNum, baseTxNum, txAmount uint64) error { + select { + case <-ctx.Done(): + return ctx.Err() + case <-logEvery.C: + logger.Info(fmt.Sprintf("[%s] MaxTxNums index: %dk/%dk", logPrefix, blockNum/1000, blockReader.FrozenBlocks()/1000)) + default: + } + if baseTxNum+txAmount == 0 { + panic(baseTxNum + txAmount) //uint-underflow + } + maxTxNum := baseTxNum + txAmount - 1 - if err := rawdbv3.TxNums.Append(tx, blockNum, maxTxNum); err != nil { - return fmt.Errorf("%w. blockNum=%d, maxTxNum=%d", err, blockNum, maxTxNum) - } - return nil - }); err != nil { - return fmt.Errorf("build txNum => blockNum mapping: %w", err) - } - if blockReader.FrozenBlocks() > 0 { - if err := rawdb.AppendCanonicalTxNums(tx, blockReader.FrozenBlocks()+1); err != nil { - return err + if err := rawdbv3.TxNums.Append(tx, blockNum, maxTxNum); err != nil { + return fmt.Errorf("%w. blockNum=%d, maxTxNum=%d", err, blockNum, maxTxNum) + } + return nil + }); err != nil { + return fmt.Errorf("build txNum => blockNum mapping: %w", err) } - } else { - if err := rawdb.AppendCanonicalTxNums(tx, 0); err != nil { - return err + if blockReader.FrozenBlocks() > 0 { + if err := rawdb.AppendCanonicalTxNums(tx, blockReader.FrozenBlocks()+1); err != nil { + return err + } + } else { + if err := rawdb.AppendCanonicalTxNums(tx, 0); err != nil { + return err + } } } ac := agg.BeginFilesRo() @@ -421,12 +418,6 @@ func FillDBFromSnapshots(logPrefix string, ctx context.Context, tx kv.RwTx, dirs return nil } -func computeBlocksToPrune(cfg SnapshotsCfg) (blocksToPrune uint64, historyToPrune uint64) { - frozenBlocks := cfg.blockReader.Snapshots().SegmentsMax() - fmt.Println("O", cfg.prune.Blocks.PruneTo(frozenBlocks), cfg.prune.History.PruneTo(frozenBlocks)) - return frozenBlocks - cfg.prune.Blocks.PruneTo(frozenBlocks), frozenBlocks - cfg.prune.History.PruneTo(frozenBlocks) -} - /* ====== PRUNING ====== */ // snapshots pruning sections works more as a retiring of blocks // retiring blocks means moving block data from db into snapshots @@ -486,12 +477,6 @@ func SnapshotsPrune(s *PruneState, initialCycle bool, cfg SnapshotsCfg, ctx cont } return nil - }, func() error { - filesDeleted, err := pruneBlockSnapshots(ctx, cfg, logger) - if filesDeleted && cfg.notifier != nil { - cfg.notifier.Events.OnNewSnapshot() - } - return err }) //cfg.agg.BuildFilesInBackground() @@ -537,66 +522,6 @@ func SnapshotsPrune(s *PruneState, initialCycle bool, cfg SnapshotsCfg, ctx cont return nil } -func pruneBlockSnapshots(ctx context.Context, cfg SnapshotsCfg, logger log.Logger) (bool, error) { - tx, err := cfg.db.BeginRo(ctx) - if err != nil { - return false, err - } - defer tx.Rollback() - // Prune snapshots if necessary (remove .segs or idx files appropriatelly) - headNumber := cfg.blockReader.FrozenBlocks() - executionProgress, err := stages.GetStageProgress(tx, stages.Execution) - if err != nil { - return false, err - } - // If we are behind the execution stage, we should not prune snapshots - if headNumber > executionProgress { - return false, nil - } - - // Keep at least 2 block snapshots as we do not want FrozenBlocks to be 0 - pruneAmount, _ := computeBlocksToPrune(cfg) - if pruneAmount == 0 { - return false, nil - } - - minBlockNumberToKeep := uint64(0) - if headNumber > pruneAmount { - minBlockNumberToKeep = headNumber - pruneAmount - } - - snapshotFileNames := cfg.blockReader.FrozenFiles() - filesDeleted := false - // Prune blocks snapshots if necessary - for _, file := range snapshotFileNames { - if !cfg.prune.Blocks.Enabled() || headNumber == 0 || !strings.Contains(file, "transactions") { - continue - } - - // take the snapshot file name and parse it to get the "from" - info, _, ok := snaptype.ParseFileName(cfg.dirs.Snap, file) - if !ok { - continue - } - if info.To >= minBlockNumberToKeep { - continue - } - if info.To-info.From != snaptype.Erigon2MergeLimit { - continue - } - if cfg.snapshotDownloader != nil { - if _, err := cfg.snapshotDownloader.Delete(ctx, &protodownloader.DeleteRequest{Paths: []string{file}}); err != nil { - return filesDeleted, err - } - } - if err := cfg.blockReader.Snapshots().Delete(file); err != nil { - return filesDeleted, err - } - filesDeleted = true - } - return filesDeleted, nil -} - type uploadState struct { sync.Mutex file string diff --git a/eth/stagedsync/stage_trie3.go b/eth/stagedsync/stage_trie3.go index 5f16605c7e9..250f828385f 100644 --- a/eth/stagedsync/stage_trie3.go +++ b/eth/stagedsync/stage_trie3.go @@ -29,7 +29,7 @@ func collectAndComputeCommitment(ctx context.Context, tx kv.RwTx, tmpDir string, return nil, err } defer domains.Close() - ac := domains.AggTx().(*state.AggregatorRoTx) + ac := domains.AggCtx().(*state.AggregatorRoTx) // has to set this value because it will be used during domain.Commit() call. // If we do not, txNum of block beginning will be used, which will cause invalid txNum on restart following commitment rebuilding @@ -91,7 +91,7 @@ func collectAndComputeCommitment(ctx context.Context, tx kv.RwTx, tmpDir string, processed atomic.Uint64 ) - sdCtx := state.NewSharedDomainsCommitmentContext(domains, commitment.ModeDirect, commitment.VariantHexPatriciaTrie) + sdCtx := state.NewSharedDomainsCommitmentContext(domains, state.CommitmentModeDirect, commitment.VariantHexPatriciaTrie) loadKeys := func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { if sdCtx.KeysCount() >= batchSize { @@ -104,7 +104,7 @@ func collectAndComputeCommitment(ctx context.Context, tx kv.RwTx, tmpDir string, "intermediate root", fmt.Sprintf("%x", rh)) } processed.Add(1) - sdCtx.TouchKey(kv.AccountsDomain, string(k), nil) + sdCtx.TouchPlainKey(string(k), nil, nil) return nil } @@ -189,7 +189,7 @@ func RebuildPatriciaTrieBasedOnFiles(rwTx kv.RwTx, cfg TrieCfg, ctx context.Cont } var foundHash bool - toTxNum := rwTx.(*temporal.Tx).AggTx().(*state.AggregatorRoTx).EndTxNumNoCommitment() + toTxNum := rwTx.(*temporal.Tx).AggCtx().(*state.AggregatorRoTx).EndTxNumNoCommitment() ok, blockNum, err := rawdbv3.TxNums.FindBlockNum(rwTx, toTxNum) if err != nil { return libcommon.Hash{}, err diff --git a/eth/stagedsync/stage_trie3_test.go b/eth/stagedsync/stage_trie3_test.go index 46a85c84b58..7f45cb57f5c 100644 --- a/eth/stagedsync/stage_trie3_test.go +++ b/eth/stagedsync/stage_trie3_test.go @@ -18,7 +18,10 @@ import ( func TestRebuildPatriciaTrieBasedOnFiles(t *testing.T) { ctx := context.Background() dirs := datadir.New(t.TempDir()) - db, agg := temporaltest.NewTestDB(t, dirs) + v3, db, agg := temporaltest.NewTestDB(t, dirs) + if !v3 { + t.Skip("this test is v3 only") + } logger := log.New() tx, err := db.BeginRw(context.Background()) @@ -85,8 +88,7 @@ func TestRebuildPatriciaTrieBasedOnFiles(t *testing.T) { } // checkRoot is false since we do not pass blockReader and want to check root manually afterwards. - historyV3 := true - cfg := StageTrieCfg(db, false /* checkRoot */, true /* saveHashesToDb */, false /* badBlockHalt */, dirs.Tmp, nil, nil /* hd */, historyV3, agg) + cfg := StageTrieCfg(db, false /* checkRoot */, true /* saveHashesToDb */, false /* badBlockHalt */, dirs.Tmp, nil, nil /* hd */, v3, agg) rebuiltRoot, err := RebuildPatriciaTrieBasedOnFiles(tx, cfg, context.Background(), log.New()) require.NoError(t, err) diff --git a/eth/stagedsync/sync.go b/eth/stagedsync/sync.go index a0ce6a892fd..12d199c2f2f 100644 --- a/eth/stagedsync/sync.go +++ b/eth/stagedsync/sync.go @@ -137,9 +137,9 @@ func (s *Sync) IsAfter(stage1, stage2 stages.SyncStage) bool { func (s *Sync) HasUnwindPoint() bool { return s.unwindPoint != nil } func (s *Sync) UnwindTo(unwindPoint uint64, reason UnwindReason, tx kv.Tx) error { if tx != nil { - if casted, ok := tx.(state.HasAggTx); ok { + if casted, ok := tx.(state.HasAggCtx); ok { // protect from too far unwind - unwindPointWithCommitment, ok, err := casted.AggTx().(*state.AggregatorRoTx).CanUnwindBeforeBlockNum(unwindPoint, tx) + unwindPointWithCommitment, ok, err := casted.AggCtx().(*state.AggregatorRoTx).CanUnwindBeforeBlockNum(unwindPoint, tx) if err != nil { return err } diff --git a/eth/tracers/api.go b/eth/tracers/config/api.go similarity index 97% rename from eth/tracers/api.go rename to eth/tracers/config/api.go index 9890821192a..9e3e0b2a654 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/config/api.go @@ -1,4 +1,4 @@ -package tracers +package config import ( "encoding/json" diff --git a/eth/tracers/internal/tracetest/calltrace_test.go b/eth/tracers/internal/tracetest/calltrace_test.go index aa19930bead..296474ac869 100644 --- a/eth/tracers/internal/tracetest/calltrace_test.go +++ b/eth/tracers/internal/tracetest/calltrace_test.go @@ -163,15 +163,18 @@ func testCallTracer(tracerName string, dirPath string, t *testing.T) { if err != nil { t.Fatalf("failed to create call tracer: %v", err) } - evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer}) + statedb.SetLogger(tracer.Hooks) + evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer.Hooks}) msg, err := tx.AsMessage(*signer, test.Genesis.BaseFee, rules) if err != nil { t.Fatalf("failed to prepare transaction for tracing: %v", err) } + tracer.OnTxStart(evm.GetVMContext(), tx, msg.From()) vmRet, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(tx.GetGas()).AddBlobGas(tx.GetBlobGas()), true /* refunds */, false /* gasBailout */) if err != nil { t.Fatalf("failed to execute transaction: %v", err) } + tracer.OnTxEnd(&types.Receipt{GasUsed: vmRet.UsedGas}, err) // Retrieve the trace result and compare against the expected. res, err := tracer.GetResult() if err != nil { @@ -271,7 +274,7 @@ func benchTracer(b *testing.B, tracerName string, test *callTracerTest) { if err != nil { b.Fatalf("failed to create call tracer: %v", err) } - evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer}) + evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer.Hooks}) snap := statedb.Snapshot() st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.GetGas()).AddBlobGas(tx.GetBlobGas())) if _, err = st.TransitionDb(true /* refunds */, false /* gasBailout */); err != nil { @@ -345,15 +348,18 @@ func TestZeroValueToNotExitCall(t *testing.T) { if err != nil { t.Fatalf("failed to create call tracer: %v", err) } - evm := vm.NewEVM(context, txContext, statedb, params.MainnetChainConfig, vm.Config{Debug: true, Tracer: tracer}) + statedb.SetLogger(tracer.Hooks) + evm := vm.NewEVM(context, txContext, statedb, params.MainnetChainConfig, vm.Config{Debug: true, Tracer: tracer.Hooks}) msg, err := tx.AsMessage(*signer, nil, rules) if err != nil { t.Fatalf("failed to prepare transaction for tracing: %v", err) } - st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.GetGas()).AddBlobGas(tx.GetBlobGas())) - if _, err = st.TransitionDb(true /* refunds */, false /* gasBailout */); err != nil { + tracer.OnTxStart(evm.GetVMContext(), tx, msg.From()) + vmRet, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(tx.GetGas()).AddBlobGas(tx.GetBlobGas()), true /* refunds */, false /* gasBailout */) + if err != nil { t.Fatalf("failed to execute transaction: %v", err) } + tracer.OnTxEnd(&types.Receipt{GasUsed: vmRet.UsedGas}, err) // Retrieve the trace result and compare against the etalon res, err := tracer.GetResult() if err != nil { diff --git a/eth/tracers/internal/tracetest/prestate_test.go b/eth/tracers/internal/tracetest/prestate_test.go index 006e4d17d85..6b22d48dd2b 100644 --- a/eth/tracers/internal/tracetest/prestate_test.go +++ b/eth/tracers/internal/tracetest/prestate_test.go @@ -127,15 +127,18 @@ func testPrestateDiffTracer(tracerName string, dirPath string, t *testing.T) { if err != nil { t.Fatalf("failed to create call tracer: %v", err) } - evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer}) + statedb.SetLogger(tracer.Hooks) + evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer.Hooks}) msg, err := tx.AsMessage(*signer, nil, rules) // BaseFee is set to nil and not to contet.BaseFee, to match the output to go-ethereum tests if err != nil { t.Fatalf("failed to prepare transaction for tracing: %v", err) } - st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.GetGas()).AddBlobGas(tx.GetBlobGas())) - if _, err = st.TransitionDb(true /* refunds */, false /* gasBailout */); err != nil { + tracer.OnTxStart(evm.GetVMContext(), tx, msg.From()) + vmRet, err := core.ApplyMessage(evm, msg, new(core.GasPool).AddGas(tx.GetGas()), true, false) + if err != nil { t.Fatalf("failed to execute transaction: %v", err) } + tracer.OnTxEnd(&types.Receipt{GasUsed: vmRet.UsedGas}, nil) // Retrieve the trace result and compare against the expected res, err := tracer.GetResult() if err != nil { diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json b/eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json index 9ba67a9bf46..a5f518eee12 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/delegatecall.json @@ -70,7 +70,8 @@ "input": "0x7d65837a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a529806c67cc6486d4d62024471772f47f6fd672", "output": "0x0000000000000000000000000000000000000000000000000000000000000001", "to": "0x42b02b5deeb78f34cd5ac896473b63e6c99a71a2", - "type": "DELEGATECALL" + "type": "DELEGATECALL", + "value": "0x0" } ], "from": "0x269296dddce321a6bcbaa2f0181127593d732cba", diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer/inner_instafail.json b/eth/tracers/internal/tracetest/testdata/call_tracer/inner_instafail.json index e3fd2ecc271..aec3e282d6a 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer/inner_instafail.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer/inner_instafail.json @@ -54,6 +54,16 @@ "value": "0x0", "gas": "0x1f97e", "gasUsed": "0x72de", - "input": "0x2e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b160000" + "input": "0x2e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b160000", + "calls": [{ + "from":"0x6c06b16512b332e6cd8293a2974872674716ce18", + "gas":"0x8fc", + "gasUsed":"0x0", + "to":"0x66fdfd05e46126a07465ad24e40cc0597bc1ef31", + "input":"0x", + "error":"insufficient balance for transfer", + "value":"0x14d1120d7b160000", + "type":"CALL" + }] } } diff --git a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json index bc13bc25068..4367515e199 100644 --- a/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json +++ b/eth/tracers/internal/tracetest/testdata/call_tracer_withLog/delegatecall.json @@ -163,7 +163,8 @@ "to": "0xef3487d24a0702703e04a26cef479e313c8fc7ae", "input": "0x24d4e90a0000000000000000000000000000000000000000000000020000000000000000", "output": "0x000000000000000000000000000000000000000000000000b17217f7d1cf79ab", - "type": "DELEGATECALL" + "type": "DELEGATECALL", + "value": "0x0" }, { "from": "0x8695e5e79dab06fbbb05f445316fa4edb0da30f0", @@ -172,7 +173,8 @@ "to": "0xef3487d24a0702703e04a26cef479e313c8fc7ae", "input": "0x872fb2b5000000000000000000000000000000000000000000000000c330b3f7006420b8", "output": "0x00000000000000000000000000000000000000000000000224bf7df2c80f0878", - "type": "DELEGATECALL" + "type": "DELEGATECALL", + "value": "0x0" }, { "from": "0x8695e5e79dab06fbbb05f445316fa4edb0da30f0", @@ -181,7 +183,8 @@ "to": "0xef3487d24a0702703e04a26cef479e313c8fc7ae", "input": "0x872fb2b50000000000000000000000000000000000000000000000000000000000000000", "output": "0x00000000000000000000000000000000000000000000000100000016aee6e8ef", - "type": "DELEGATECALL" + "type": "DELEGATECALL", + "value": "0x0" }, { "from": "0x8695e5e79dab06fbbb05f445316fa4edb0da30f0", @@ -190,7 +193,8 @@ "to": "0xef3487d24a0702703e04a26cef479e313c8fc7ae", "input": "0x24d4e90a00000000000000000000000000000000000000000000000324bf7e0976f5f167", "output": "0x0000000000000000000000000000000000000000000000012535c5e5f87ee0d2", - "type": "DELEGATECALL" + "type": "DELEGATECALL", + "value": "0x0" }, { "from": "0x8695e5e79dab06fbbb05f445316fa4edb0da30f0", @@ -199,7 +203,8 @@ "to": "0xef3487d24a0702703e04a26cef479e313c8fc7ae", "input": "0x872fb2b5000000000000000000000000000000000000000000000000c330b3f7006420b8", "output": "0x00000000000000000000000000000000000000000000000224bf7df2c80f0878", - "type": "DELEGATECALL" + "type": "DELEGATECALL", + "value": "0x0" }, { "from": "0x8695e5e79dab06fbbb05f445316fa4edb0da30f0", @@ -208,7 +213,8 @@ "to": "0xef3487d24a0702703e04a26cef479e313c8fc7ae", "input": "0x872fb2b500000000000000000000000000000000000000000000000237d37fe5d297a500", "output": "0x0000000000000000000000000000000000000000000000093088c407fcbbce38", - "type": "DELEGATECALL" + "type": "DELEGATECALL", + "value": "0x0" }, { "from": "0x8695e5e79dab06fbbb05f445316fa4edb0da30f0", @@ -217,7 +223,8 @@ "to": "0xef3487d24a0702703e04a26cef479e313c8fc7ae", "input": "0x24d4e90a00000000000000000000000000000000000000000000000b554841fac4cad6b0", "output": "0x0000000000000000000000000000000000000000000000026d7fc130d6a74cbe", - "type": "DELEGATECALL" + "type": "DELEGATECALL", + "value": "0x0" } ], "value": "0x0", @@ -390,7 +397,8 @@ "data": "0x000000000000000000000000000000000000000000000000de0b6b3a76400000" } ], - "type": "DELEGATECALL" + "type": "DELEGATECALL", + "value": "0x0" } ], "value": "0x0", diff --git a/eth/tracers/internal/tracetest/testdata/prestate_tracer/create_existing_contract.json b/eth/tracers/internal/tracetest/testdata/prestate_tracer/create_existing_contract.json index 1d3d5402ddc..d43964ac5f7 100644 --- a/eth/tracers/internal/tracetest/testdata/prestate_tracer/create_existing_contract.json +++ b/eth/tracers/internal/tracetest/testdata/prestate_tracer/create_existing_contract.json @@ -63,7 +63,7 @@ "result": { "0x082d4cdf07f386ffa9258f52a5c49db4ac321ec6": { "balance": "0xc820f93200f4000", - "nonce": 93 + "nonce": 94 }, "0x332b656504f4eabb44c8617a42af37461a34e9dc": { "balance": "0x11faea4f35e5af80000", diff --git a/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create.json b/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create.json index 6a0e0b2b3d8..6b8af39eb2f 100644 --- a/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create.json +++ b/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create.json @@ -61,7 +61,7 @@ }, "0xf0c5cef39b17c213cfe090a46b8c7760ffb7928a": { "balance": "0x15b6828e22bb12188", - "nonce": 746 + "nonce": 747 } }, "post": { diff --git a/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create_suicide.json b/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create_suicide.json index d24bcbb8b3d..827222ed096 100644 --- a/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create_suicide.json +++ b/eth/tracers/internal/tracetest/testdata/prestate_tracer_with_diff_mode/create_suicide.json @@ -69,7 +69,7 @@ "pre": { "0x082d4cdf07f386ffa9258f52a5c49db4ac321ec6": { "balance": "0xc820f93200f4000", - "nonce": 93 + "nonce": 94 }, "0x332b656504f4eabb44c8617a42af37461a34e9dc": { "balance": "0x11faea4f35e5af80000", diff --git a/eth/tracers/js/goja.go b/eth/tracers/js/goja.go index a38983d39fa..d520779d032 100644 --- a/eth/tracers/js/goja.go +++ b/eth/tracers/js/goja.go @@ -28,9 +28,9 @@ import ( "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/tracing" + "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/core/vm/evmtypes" - "github.com/ledgerwatch/erigon/core/vm/stack" "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/eth/tracers" jsassets "github.com/ledgerwatch/erigon/eth/tracers/js/internal/tracers" @@ -95,7 +95,7 @@ func fromBuf(vm *goja.Runtime, bufType goja.Value, buf goja.Value, allowString b // JS functions on the relevant EVM hooks. It uses Goja as its JS engine. type jsTracer struct { vm *goja.Runtime - env *vm.EVM + env *tracing.VMContext toBig toBigFn // Converts a hex string into a JS bigint toBuf toBufFn // Converts a []byte into a JS buffer fromBuf fromBufFn // Converts an array, hex string or Uint8Array to a []byte @@ -103,7 +103,6 @@ type jsTracer struct { activePrecompiles []libcommon.Address // List of active precompiles at current block traceStep bool // True if tracer object exposes a `step()` method traceFrame bool // True if tracer object exposes the `enter()` and `exit()` methods - gasLimit uint64 // Amount of gas bought for the whole tx err error // Any error that should stop tracing obj *goja.Object // Trace object @@ -132,7 +131,7 @@ type jsTracer struct { // The methods `result` and `fault` are required to be present. // The methods `step`, `enter`, and `exit` are optional, but note that // `enter` and `exit` always go together. -func newJsTracer(code string, ctx *tracers.Context, cfg json.RawMessage) (tracers.Tracer, error) { +func newJsTracer(code string, ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, error) { if c, ok := assetTracers[code]; ok { code = c } @@ -208,26 +207,53 @@ func newJsTracer(code string, ctx *tracers.Context, cfg json.RawMessage) (tracer t.frameValue = t.frame.setupObject() t.frameResultValue = t.frameResult.setupObject() t.logValue = t.log.setupObject() - return t, nil -} - -// CaptureTxStart implements the Tracer interface and is invoked at the beginning of + return &tracers.Tracer{ + Hooks: &tracing.Hooks{ + OnTxStart: t.OnTxStart, + OnTxEnd: t.OnTxEnd, + OnEnter: t.OnEnter, + OnExit: t.OnExit, + OnOpcode: t.OnOpcode, + OnFault: t.OnFault, + }, + GetResult: t.GetResult, + Stop: t.Stop, + }, nil +} + +// OnTxStart implements the Tracer interface and is invoked at the beginning of // transaction processing. -func (t *jsTracer) CaptureTxStart(gasLimit uint64) { - t.gasLimit = gasLimit +func (t *jsTracer) OnTxStart(env *tracing.VMContext, tx types.Transaction, from libcommon.Address) { + t.env = env + // Need statedb access for db object + db := &dbObj{ibs: env.IntraBlockState, vm: t.vm, toBig: t.toBig, toBuf: t.toBuf, fromBuf: t.fromBuf} + t.dbValue = db.setupObject() + // Update list of precompiles based on current block + rules := env.ChainConfig.Rules(env.BlockNumber, env.Time) + t.activePrecompiles = vm.ActivePrecompiles(rules) + t.ctx["block"] = t.vm.ToValue(t.env.BlockNumber) + t.ctx["gas"] = t.vm.ToValue(tx.GetGas()) + t.ctx["gasPrice"] = t.vm.ToValue(t.env.GasPrice.ToBig()) } -// CaptureTxEnd implements the Tracer interface and is invoked at the end of +// OnTxEnd implements the Tracer interface and is invoked at the end of // transaction processing. -func (t *jsTracer) CaptureTxEnd(restGas uint64) { - t.ctx["gasUsed"] = t.vm.ToValue(t.gasLimit - restGas) +func (t *jsTracer) OnTxEnd(receipt *types.Receipt, err error) { + if err != nil { + // Don't override vm error + if _, ok := t.ctx["error"]; !ok { + t.ctx["error"] = t.vm.ToValue(err.Error()) + } + return + } + t.ctx["gasUsed"] = t.vm.ToValue(receipt.GasUsed) } -// CaptureStart implements the Tracer interface to initialize the tracing operation. -func (t *jsTracer) CaptureStart(env *vm.EVM, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { - t.env = env - db := &dbObj{ibs: env.IntraBlockState(), vm: t.vm, toBig: t.toBig, toBuf: t.toBuf, fromBuf: t.fromBuf} - t.dbValue = db.setupObject() +// onStart implements the Tracer interface to initialize the tracing operation. +func (t *jsTracer) onStart(from libcommon.Address, to libcommon.Address, create bool, input []byte, gas uint64, value *uint256.Int) { + if t.err != nil { + return + } if create { t.ctx["type"] = t.vm.ToValue("CREATE") } else { @@ -236,22 +262,16 @@ func (t *jsTracer) CaptureStart(env *vm.EVM, from libcommon.Address, to libcommo t.ctx["from"] = t.vm.ToValue(from.Bytes()) t.ctx["to"] = t.vm.ToValue(to.Bytes()) t.ctx["input"] = t.vm.ToValue(input) - t.ctx["gas"] = t.vm.ToValue(t.gasLimit) - t.ctx["gasPrice"] = t.vm.ToValue(env.GasPrice.ToBig()) valueBig, err := t.toBig(t.vm, value.ToBig().String()) if err != nil { t.err = err return } t.ctx["value"] = valueBig - t.ctx["block"] = t.vm.ToValue(env.Context.BlockNumber) - // Update list of precompiles based on current block - rules := env.ChainRules() - t.activePrecompiles = vm.ActivePrecompiles(rules) } -// CaptureState implements the Tracer interface to trace a single step of VM execution. -func (t *jsTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { +// OnOpcode implements the Tracer interface to trace a single step of VM execution. +func (t *jsTracer) OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) { if !t.traceStep { return } @@ -260,14 +280,14 @@ func (t *jsTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope } log := t.log - log.op.op = op - log.memory.memory = scope.Memory - log.stack.stack = scope.Stack - log.contract.contract = scope.Contract + log.op.op = vm.OpCode(op) + log.memory.memory = scope.MemoryData() + log.stack.stack = scope.StackData() + log.contract.scope = scope log.pc = pc log.gas = gas log.cost = cost - log.refund = t.env.IntraBlockState().GetRefund() + log.refund = t.env.IntraBlockState.GetRefund() log.depth = depth log.err = err if _, err := t.step(t.obj, t.logValue, t.dbValue); err != nil { @@ -275,36 +295,42 @@ func (t *jsTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope } } -// CaptureFault implements the Tracer interface to trace an execution fault -func (t *jsTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { +// OnFault implements the Tracer interface to trace an execution fault +func (t *jsTracer) OnFault(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, depth int, err error) { if t.err != nil { return } - // Other log fields have been already set as part of the last CaptureState. + // Other log fields have been already set as part of the last OnFault. t.log.err = err if _, err := t.fault(t.obj, t.logValue, t.dbValue); err != nil { t.onError("fault", err) } } -// CaptureEnd is called after the call finishes to finalize the tracing. -func (t *jsTracer) CaptureEnd(output []byte, gasUsed uint64, err error) { +// onEnd is called after the call finishes to finalize the tracing. +func (t *jsTracer) onEnd(output []byte, gasUsed uint64, err error, reverted bool) { t.ctx["output"] = t.vm.ToValue(output) if err != nil { t.ctx["error"] = t.vm.ToValue(err.Error()) } } -// CaptureEnter is called when EVM enters a new scope (via call, create or selfdestruct). -func (t *jsTracer) CaptureEnter(typ vm.OpCode, from libcommon.Address, to libcommon.Address, precompile, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { - if !t.traceFrame { +// OnEnter is called when EVM enters a new scope (via call, create or selfdestruct). +func (t *jsTracer) OnEnter(depth int, typ byte, from libcommon.Address, to libcommon.Address, precompile bool, input []byte, gas uint64, value *uint256.Int, code []byte) { + if t.err != nil { return } - if t.err != nil { + + if depth == 0 { + t.onStart(from, to, vm.OpCode(typ) == vm.CREATE, input, gas, value) return } - t.frame.typ = typ.String() + if !t.traceFrame { + return + } + + t.frame.typ = vm.OpCode(typ).String() t.frame.from = from t.frame.to = to t.frame.input = libcommon.CopyBytes(input) @@ -319,9 +345,18 @@ func (t *jsTracer) CaptureEnter(typ vm.OpCode, from libcommon.Address, to libcom } } -// CaptureExit is called when EVM exits a scope, even if the scope didn't +// OnExit is called when EVM exits a scope, even if the scope didn't // execute any code. -func (t *jsTracer) CaptureExit(output []byte, gasUsed uint64, err error) { +func (t *jsTracer) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) { + if t.err != nil { + return + } + + if depth == 0 { + t.onEnd(output, gasUsed, err, reverted) + return + } + if !t.traceFrame { return } @@ -359,9 +394,6 @@ func (t *jsTracer) Stop(err error) { // execution. func (t *jsTracer) onError(context string, err error) { t.err = wrapError(context, err) - // `env` is set on CaptureStart which comes before any JS execution. - // So it should be non-nil. - t.env.Cancel() } func wrapError(context string, err error) error { @@ -540,7 +572,7 @@ func (o *opObj) setupObject() *goja.Object { } type memoryObj struct { - memory *vm.Memory + memory []byte vm *goja.Runtime toBig toBigFn toBuf toBufFn @@ -568,14 +600,10 @@ func (mo *memoryObj) slice(begin, end int64) ([]byte, error) { if end < begin || begin < 0 { return nil, fmt.Errorf("tracer accessed out of bound memory: offset %d, end %d", begin, end) } - mlen := mo.memory.Len() - if end-int64(mlen) > memoryPadLimit { - return nil, fmt.Errorf("tracer reached limit for padding memory slice: end %d, memorySize %d", end, mlen) + slice, err := tracers.GetMemoryCopyPadded(mo.memory, begin, end-begin) + if err != nil { + return nil, err } - slice := make([]byte, end-begin) - end = min(end, int64(mo.memory.Len())) - ptr := mo.memory.GetPtr(begin, end-begin) - copy(slice, ptr) return slice, nil } @@ -595,14 +623,14 @@ func (mo *memoryObj) GetUint(addr int64) goja.Value { // getUint returns the 32 bytes at the specified address interpreted as a uint. func (mo *memoryObj) getUint(addr int64) (*big.Int, error) { - if mo.memory.Len() < int(addr)+32 || addr < 0 { - return nil, fmt.Errorf("tracer accessed out of bound memory: available %d, offset %d, size %d", mo.memory.Len(), addr, 32) + if len(mo.memory) < int(addr)+32 || addr < 0 { + return nil, fmt.Errorf("tracer accessed out of bound memory: available %d, offset %d, size %d", len(mo.memory), addr, 32) } - return new(big.Int).SetBytes(mo.memory.GetPtr(addr, 32)), nil + return new(big.Int).SetBytes(tracers.MemoryPtr(mo.memory, addr, 32)), nil } func (mo *memoryObj) Length() int { - return mo.memory.Len() + return len(mo.memory) } func (m *memoryObj) setupObject() *goja.Object { @@ -614,7 +642,7 @@ func (m *memoryObj) setupObject() *goja.Object { } type stackObj struct { - stack *stack.Stack + stack []uint256.Int vm *goja.Runtime toBig toBigFn } @@ -635,14 +663,14 @@ func (s *stackObj) Peek(idx int) goja.Value { // peek returns the nth-from-the-top element of the stack. func (s *stackObj) peek(idx int) (*big.Int, error) { - if len(s.stack.Data) <= idx || idx < 0 { - return nil, fmt.Errorf("tracer accessed out of bound stack: size %d, index %d", len(s.stack.Data), idx) + if len(s.stack) <= idx || idx < 0 { + return nil, fmt.Errorf("tracer accessed out of bound stack: size %d, index %d", len(s.stack), idx) } - return s.stack.Back(idx).ToBig(), nil + return tracers.StackBack(s.stack, idx).ToBig(), nil } func (s *stackObj) Length() int { - return len(s.stack.Data) + return len(s.stack) } func (s *stackObj) setupObject() *goja.Object { @@ -653,7 +681,7 @@ func (s *stackObj) setupObject() *goja.Object { } type dbObj struct { - ibs evmtypes.IntraBlockState + ibs tracing.IntraBlockState vm *goja.Runtime toBig toBigFn toBuf toBufFn @@ -746,14 +774,14 @@ func (do *dbObj) setupObject() *goja.Object { } type contractObj struct { - contract *vm.Contract - vm *goja.Runtime - toBig toBigFn - toBuf toBufFn + scope tracing.OpContext + vm *goja.Runtime + toBig toBigFn + toBuf toBufFn } func (co *contractObj) GetCaller() goja.Value { - caller := co.contract.Caller().Bytes() + caller := co.scope.Caller().Bytes() res, err := co.toBuf(co.vm, caller) if err != nil { co.vm.Interrupt(err) @@ -763,7 +791,7 @@ func (co *contractObj) GetCaller() goja.Value { } func (co *contractObj) GetAddress() goja.Value { - addr := co.contract.Address().Bytes() + addr := co.scope.Address().Bytes() res, err := co.toBuf(co.vm, addr) if err != nil { co.vm.Interrupt(err) @@ -773,7 +801,7 @@ func (co *contractObj) GetAddress() goja.Value { } func (co *contractObj) GetValue() goja.Value { - value := co.contract.Value() + value := co.scope.CallValue() res, err := co.toBig(co.vm, value.String()) if err != nil { co.vm.Interrupt(err) @@ -783,7 +811,7 @@ func (co *contractObj) GetValue() goja.Value { } func (co *contractObj) GetInput() goja.Value { - input := libcommon.CopyBytes(co.contract.Input) + input := libcommon.CopyBytes(co.scope.CallInput()) res, err := co.toBuf(co.vm, input) if err != nil { co.vm.Interrupt(err) diff --git a/eth/tracers/js/tracer_test.go b/eth/tracers/js/tracer_test.go index 080230f7cc0..5aa68d570bd 100644 --- a/eth/tracers/js/tracer_test.go +++ b/eth/tracers/js/tracer_test.go @@ -29,6 +29,7 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/core/vm/evmtypes" "github.com/ledgerwatch/erigon/eth/tracers" @@ -64,9 +65,9 @@ func testCtx() *vmContext { return &vmContext{blockCtx: evmtypes.BlockContext{BlockNumber: 1}, txCtx: evmtypes.TxContext{GasPrice: uint256.NewInt(100000)}} } -func runTrace(tracer tracers.Tracer, vmctx *vmContext, chaincfg *chain.Config, contractCode []byte) (json.RawMessage, error) { +func runTrace(tracer *tracers.Tracer, vmctx *vmContext, chaincfg *chain.Config, contractCode []byte) (json.RawMessage, error) { var ( - env = vm.NewEVM(vmctx.blockCtx, vmctx.txCtx, &dummyStatedb{}, chaincfg, vm.Config{Debug: true, Tracer: tracer}) + env = vm.NewEVM(vmctx.blockCtx, vmctx.txCtx, &dummyStatedb{}, chaincfg, vm.Config{Debug: true, Tracer: tracer.Hooks}) gasLimit uint64 = 31000 startGas uint64 = 10000 value = uint256.NewInt(0) @@ -77,12 +78,12 @@ func runTrace(tracer tracers.Tracer, vmctx *vmContext, chaincfg *chain.Config, c contract.Code = contractCode } - tracer.CaptureTxStart(gasLimit) - tracer.CaptureStart(env, contract.Caller(), contract.Address(), false /* precompile */, false /* create */, []byte{}, startGas, value, []byte{} /* code */) + tracer.OnTxStart(env.GetVMContext(), types.NewTransaction(0, libcommon.Address{}, nil, gasLimit, nil, nil), contract.Caller()) + tracer.OnEnter(0, byte(vm.CALL), contract.Caller(), contract.Address(), false, []byte{}, startGas, value, contractCode) ret, err := env.Interpreter().Run(contract, []byte{}, false) - tracer.CaptureEnd(ret, startGas-contract.Gas, err) + tracer.OnExit(0, ret, startGas-contract.Gas, err, true) // Rest gas assumes no refund - tracer.CaptureTxEnd(contract.Gas) + tracer.OnTxEnd(&types.Receipt{GasUsed: gasLimit - contract.Gas}, nil) if err != nil { return nil, err } @@ -154,7 +155,7 @@ func TestTracer(t *testing.T) { }, { code: "{res: [], step: function(log) { if (log.op.toString() === 'STOP') { this.res.push(log.memory.slice(5, 1025 * 1024)) } }, fault: function() {}, result: function() { return this.res }}", want: "", - fail: "tracer reached limit for padding memory slice: end 1049600, memorySize 32 at step (:1:83(23)) in server-side tracer function 'step'", + fail: "reached limit for padding memory slice: 1049568 at step (:1:83(23)) in server-side tracer function 'step'", contract: []byte{byte(vm.PUSH1), byte(0xff), byte(vm.PUSH1), byte(0x00), byte(vm.MSTORE8), byte(vm.STOP)}, }, } { @@ -184,36 +185,38 @@ func TestHaltBetweenSteps(t *testing.T) { if err != nil { t.Fatal(err) } - env := vm.NewEVM(evmtypes.BlockContext{BlockNumber: 1}, evmtypes.TxContext{GasPrice: uint256.NewInt(1)}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer}) + env := vm.NewEVM(evmtypes.BlockContext{BlockNumber: 1}, evmtypes.TxContext{GasPrice: uint256.NewInt(1)}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer.Hooks}) scope := &vm.ScopeContext{ Contract: vm.NewContract(&account{}, libcommon.Address{}, uint256.NewInt(0), 0, false /* skipAnalysis */), } - tracer.CaptureStart(env, libcommon.Address{}, libcommon.Address{}, false /* precompile */, false /* create */, []byte{}, 0, uint256.NewInt(0), []byte{} /* code */) - tracer.CaptureState(0, 0, 0, 0, scope, nil, 0, nil) + tracer.OnTxStart(env.GetVMContext(), types.NewTransaction(0, libcommon.Address{}, new(uint256.Int), 0, new(uint256.Int), nil), libcommon.Address{}) + tracer.OnEnter(0, byte(vm.CALL), libcommon.Address{}, libcommon.Address{}, false, []byte{}, 0, uint256.NewInt(0), []byte{}) + tracer.OnOpcode(0, 0, 0, 0, scope, nil, 0, nil) timeout := errors.New("stahp") tracer.Stop(timeout) - tracer.CaptureState(0, 0, 0, 0, scope, nil, 0, nil) + tracer.OnOpcode(0, 0, 0, 0, scope, nil, 0, nil) if _, err := tracer.GetResult(); !strings.Contains(err.Error(), timeout.Error()) { t.Errorf("Expected timeout error, got %v", err) } } -// testNoStepExec tests a regular value transfer (no exec), and accessing the statedb +// TestNoStepExec tests a regular value transfer (no exec), and accessing the statedb // in 'result' func TestNoStepExec(t *testing.T) { execTracer := func(code string) []byte { t.Helper() tracer, err := newJsTracer(code, nil, nil) if err != nil { - t.Fatal(err) + panic(err) } - env := vm.NewEVM(evmtypes.BlockContext{BlockNumber: 1}, evmtypes.TxContext{GasPrice: uint256.NewInt(100)}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer}) - tracer.CaptureStart(env, libcommon.Address{}, libcommon.Address{}, false /* precompile */, false /* create */, []byte{}, 1000, uint256.NewInt(0), []byte{} /* code */) - tracer.CaptureEnd(nil, 0, nil) + env := vm.NewEVM(evmtypes.BlockContext{BlockNumber: 1}, evmtypes.TxContext{GasPrice: uint256.NewInt(100)}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer.Hooks}) + tracer.OnTxStart(env.GetVMContext(), types.NewTransaction(0, libcommon.Address{}, new(uint256.Int), 0, new(uint256.Int), nil), libcommon.Address{}) + tracer.OnEnter(0, byte(vm.CALL), libcommon.Address{}, libcommon.Address{}, false, []byte{}, 1000, uint256.NewInt(0), []byte{}) + tracer.OnExit(0, nil, 0, nil, false) ret, err := tracer.GetResult() if err != nil { - t.Fatal(err) + panic(err) } return ret } @@ -279,8 +282,8 @@ func TestEnterExit(t *testing.T) { scope := &vm.ScopeContext{ Contract: vm.NewContract(&account{}, libcommon.Address{}, uint256.NewInt(0), 0, false /* skipAnalysis */), } - tracer.CaptureEnter(vm.CALL, scope.Contract.Caller(), scope.Contract.Address(), false, false, []byte{}, 1000, new(uint256.Int), []byte{}) - tracer.CaptureExit([]byte{}, 400, nil) + tracer.OnEnter(1, byte(vm.CALL), scope.Contract.Caller(), scope.Contract.Address(), false, []byte{}, 1000, new(uint256.Int), []byte{}) + tracer.OnExit(1, []byte{}, 400, nil, false) have, err := tracer.GetResult() if err != nil { diff --git a/eth/tracers/live/printer.go b/eth/tracers/live/printer.go new file mode 100644 index 00000000000..2a850bc3669 --- /dev/null +++ b/eth/tracers/live/printer.go @@ -0,0 +1,137 @@ +package live + +import ( + "encoding/json" + "fmt" + "math/big" + + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/chain" + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/hexutility" + "github.com/ledgerwatch/erigon/core/tracing" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/tracers" +) + +func init() { + register("livePrinter", newPrinter) +} + +type Printer struct{} + +func newPrinter(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, error) { + t := &Printer{} + return &tracers.Tracer{ + Hooks: &tracing.Hooks{ + OnTxStart: t.OnTxStart, + OnTxEnd: t.OnTxEnd, + OnEnter: t.OnEnter, + OnExit: t.OnExit, + OnOpcode: t.OnOpcode, + OnFault: t.OnFault, + OnGasChange: t.OnGasChange, + OnBalanceChange: t.OnBalanceChange, + OnNonceChange: t.OnNonceChange, + OnCodeChange: t.OnCodeChange, + OnStorageChange: t.OnStorageChange, + OnLog: t.OnLog, + }, + GetResult: t.GetResult, + Stop: t.Stop, + }, nil +} + +// OnExit is called after the call finishes to finalize the tracing. +func (p *Printer) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) { + fmt.Printf("OnExit: output=%s, gasUsed=%v, err=%v\n", hexutility.Bytes(output), gasUsed, err) +} + +// OnOpcode implements the EVMLogger interface to trace a single step of VM execution. +func (p *Printer) OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) { + fmt.Printf("OnOpcode: pc=%v, op=%v, gas=%v, cost=%v, scope=%v, rData=%v, depth=%v, err=%v\n", pc, op, gas, cost, scope, rData, depth, err) +} + +// OnFault implements the EVMLogger interface to trace an execution fault. +func (p *Printer) OnFault(pc uint64, op byte, gas, cost uint64, _ tracing.OpContext, depth int, err error) { + fmt.Printf("OnFault: pc=%v, op=%v, gas=%v, cost=%v, depth=%v, err=%v\n", pc, op, gas, cost, depth, err) +} + +func (p *Printer) OnEnter(depth int, typ byte, from libcommon.Address, to libcommon.Address, precompile bool, input []byte, gas uint64, value *uint256.Int, code []byte) { + fmt.Printf("CaptureEnter: depth=%v, typ=%v from=%v, to=%v, input=%s, gas=%v, value=%v\n", depth, typ, from, to, hexutility.Bytes(input), gas, value) +} + +func (p *Printer) OnTxStart(env *tracing.VMContext, tx types.Transaction, from libcommon.Address) { + buf, err := json.Marshal(tx) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Printf("OnTxStart: tx=%s\n", buf) + +} + +func (p *Printer) OnTxEnd(receipt *types.Receipt, err error) { + if err != nil { + fmt.Printf("CaptureTxEnd err: %v\n", err) + return + } + buf, err := json.Marshal(receipt) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Printf("CaptureTxEnd: receipt=%s\n", buf) +} + +func (p *Printer) OnBlockStart(b *types.Block, td *big.Int, finalized, safe *types.Header, chainConfig *chain.Config) { + if finalized != nil && safe != nil { + fmt.Printf("OnBlockStart: b=%v, td=%v, finalized=%v, safe=%v\n", b.NumberU64(), td, finalized.Number.Uint64(), safe.Number.Uint64()) + } else { + fmt.Printf("OnBlockStart: b=%v, td=%v\n", b.NumberU64(), td) + } +} + +func (p *Printer) OnBlockEnd(err error) { + fmt.Printf("OnBlockEnd: err=%v\n", err) +} + +func (p *Printer) OnGenesisBlock(b *types.Block, alloc types.GenesisAlloc) { + fmt.Printf("OnGenesisBlock: b=%v, allocLength=%d\n", b.NumberU64(), len(alloc)) +} + +func (p *Printer) OnBalanceChange(a libcommon.Address, prev, new *uint256.Int, reason tracing.BalanceChangeReason) { + fmt.Printf("OnBalanceChange: a=%v, prev=%v, new=%v\n", a, prev, new) +} + +func (p *Printer) OnNonceChange(a libcommon.Address, prev, new uint64) { + fmt.Printf("OnNonceChange: a=%v, prev=%v, new=%v\n", a, prev, new) +} + +func (p *Printer) OnCodeChange(a libcommon.Address, prevCodeHash libcommon.Hash, prev []byte, codeHash libcommon.Hash, code []byte) { + fmt.Printf("OnCodeChange: a=%v, prevCodeHash=%v, prev=%s, codeHash=%v, code=%s\n", a, prevCodeHash, hexutility.Bytes(prev), codeHash, hexutility.Bytes(code)) +} + +func (p *Printer) OnStorageChange(a libcommon.Address, k *libcommon.Hash, prev, new uint256.Int) { + fmt.Printf("OnStorageChange: a=%v, k=%v, prev=%v, new=%v\n", a, k, prev, new) +} + +func (p *Printer) OnLog(l *types.Log) { + buf, err := json.Marshal(l) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Printf("OnLog: l=%s\n", buf) +} + +func (p *Printer) OnGasChange(old, new uint64, reason tracing.GasChangeReason) { + fmt.Printf("OnGasChange: old=%v, new=%v, diff=%v\n", old, new, new-old) +} + +func (p *Printer) GetResult() (json.RawMessage, error) { + return json.RawMessage{}, nil +} + +func (p *Printer) Stop(err error) { +} diff --git a/eth/tracers/live/tracer.go b/eth/tracers/live/tracer.go new file mode 100644 index 00000000000..1b9979e31e6 --- /dev/null +++ b/eth/tracers/live/tracer.go @@ -0,0 +1,38 @@ +package live + +import ( + "encoding/json" + "errors" + + "github.com/ledgerwatch/erigon/eth/tracers" +) + +// init registers itself this packages as a lookup for tracers. +func init() { + tracers.RegisterLookup(false, lookup) +} + +// ctorFn is the constructor signature of a native tracer. +type ctorFn = func(*tracers.Context, json.RawMessage) (*tracers.Tracer, error) + +// ctors is a map of package-local tracer constructors. +var ctors map[string]ctorFn + +// register is used by native tracers to register their presence. +func register(name string, ctor ctorFn) { + if ctors == nil { + ctors = make(map[string]ctorFn) + } + ctors[name] = ctor +} + +// lookup returns a tracer, if one can be matched to the given name. +func lookup(name string, ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, error) { + if ctors == nil { + ctors = make(map[string]ctorFn) + } + if ctor, ok := ctors[name]; ok { + return ctor(ctx, cfg) + } + return nil, errors.New("no tracer found") +} diff --git a/eth/tracers/logger/access_list_tracer.go b/eth/tracers/logger/access_list_tracer.go index a869ea036ec..21b69ff3f56 100644 --- a/eth/tracers/logger/access_list_tracer.go +++ b/eth/tracers/logger/access_list_tracer.go @@ -17,15 +17,15 @@ package logger import ( + "encoding/json" "sort" - "github.com/holiman/uint256" libcommon "github.com/ledgerwatch/erigon-lib/common" types2 "github.com/ledgerwatch/erigon-lib/types" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/core/vm/evmtypes" - "github.com/ledgerwatch/erigon/crypto" ) // accessList is an accumulator for the set of accounts and storage slots an EVM @@ -170,81 +170,42 @@ func NewAccessListTracer(acl types2.AccessList, exclude map[libcommon.Address]st } } -func (a *AccessListTracer) CaptureTxStart(gasLimit uint64) {} - -func (a *AccessListTracer) CaptureTxEnd(restGas uint64) {} - -func (a *AccessListTracer) CaptureStart(env *vm.EVM, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { -} - -func (a *AccessListTracer) CaptureEnter(typ vm.OpCode, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { +func (a *AccessListTracer) Hooks() *tracing.Hooks { + return &tracing.Hooks{ + OnOpcode: a.OnOpcode, + } } -// CaptureState captures all opcodes that touch storage or addresses and adds them to the accesslist. -func (a *AccessListTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { - stack := scope.Stack - contract := scope.Contract - caller := contract.Address() - - stackData := stack.Data +// OnOpcode captures all opcodes that touch storage or addresses and adds them to the accesslist. +func (a *AccessListTracer) OnOpcode(pc uint64, opcode byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) { + stackData := scope.StackData() stackLen := len(stackData) + op := vm.OpCode(opcode) if (op == vm.SLOAD || op == vm.SSTORE) && stackLen >= 1 { - addr := contract.Address() slot := libcommon.Hash(stackData[stackLen-1].Bytes32()) - if _, ok := a.excl[addr]; !ok { - a.list.addSlot(addr, slot) - if _, ok := a.createdContracts[addr]; !ok { - a.usedBeforeCreation[addr] = struct{}{} - } - } + a.list.addSlot(scope.Address(), slot) } if (op == vm.EXTCODECOPY || op == vm.EXTCODEHASH || op == vm.EXTCODESIZE || op == vm.BALANCE || op == vm.SELFDESTRUCT) && stackLen >= 1 { addr := libcommon.Address(stackData[stackLen-1].Bytes20()) if _, ok := a.excl[addr]; !ok { a.list.addAddress(addr) - if _, ok := a.createdContracts[addr]; !ok { - a.usedBeforeCreation[addr] = struct{}{} - } } } if (op == vm.DELEGATECALL || op == vm.CALL || op == vm.STATICCALL || op == vm.CALLCODE) && stackLen >= 5 { addr := libcommon.Address(stackData[stackLen-2].Bytes20()) if _, ok := a.excl[addr]; !ok { a.list.addAddress(addr) - if _, ok := a.createdContracts[addr]; !ok { - a.usedBeforeCreation[addr] = struct{}{} - } - } - } - if op == vm.CREATE { - // contract address for CREATE can only be generated with state - if a.state != nil { - nonce := a.state.GetNonce(caller) - addr := crypto.CreateAddress(caller, nonce) - if _, ok := a.excl[addr]; !ok { - a.createdContracts[addr] = struct{}{} - } - } - } - if op == vm.CREATE2 && stackLen >= 4 { - offset := stackData[stackLen-2] - size := stackData[stackLen-3] - init := scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) - inithash := crypto.Keccak256(init) - salt := stackData[stackLen-4] - addr := crypto.CreateAddress2(caller, salt.Bytes32(), inithash) - if _, ok := a.excl[addr]; !ok { - a.createdContracts[addr] = struct{}{} } } - } -func (*AccessListTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { +// GetResult returns an empty json object. +func (a *AccessListTracer) GetResult() (json.RawMessage, error) { + return json.RawMessage(`{}`), nil } -func (*AccessListTracer) CaptureEnd(output []byte, usedGas uint64, err error) { -} -func (*AccessListTracer) CaptureExit(output []byte, usedGas uint64, err error) { + +// Stop terminates execution of the tracer at the first opportune moment. +func (a *AccessListTracer) Stop(err error) { } // AccessList returns the current accesslist maintained by the tracer. diff --git a/eth/tracers/logger/json_stream.go b/eth/tracers/logger/json_stream.go index 5b616e81e7f..0408e8fdcda 100644 --- a/eth/tracers/logger/json_stream.go +++ b/eth/tracers/logger/json_stream.go @@ -3,6 +3,7 @@ package logger import ( "context" "encoding/hex" + "encoding/json" "sort" "github.com/holiman/uint256" @@ -10,7 +11,10 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/tracing" + "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/eth/tracers" ) // JsonStreamLogger is an EVM state logger and implements Tracer. @@ -30,7 +34,7 @@ type JsonStreamLogger struct { logs []StructLog output []byte //nolint err error //nolint - env *vm.EVM + env *tracing.VMContext } // NewStructLogger returns a new logger @@ -47,25 +51,33 @@ func NewJsonStreamLogger(cfg *LogConfig, ctx context.Context, stream *jsoniter.S return logger } -func (l *JsonStreamLogger) CaptureTxStart(gasLimit uint64) {} - -func (l *JsonStreamLogger) CaptureTxEnd(restGas uint64) {} +func (l *JsonStreamLogger) Tracer() *tracers.Tracer { + return &tracers.Tracer{ + Hooks: &tracing.Hooks{ + OnTxStart: l.OnTxStart, + OnOpcode: l.OnOpcode, + }, + } +} -// CaptureStart implements the Tracer interface to initialize the tracing operation. -func (l *JsonStreamLogger) CaptureStart(env *vm.EVM, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { - l.env = env +func (l *JsonStreamLogger) Hooks() *tracing.Hooks { + return &tracing.Hooks{ + OnTxStart: l.OnTxStart, + OnOpcode: l.OnOpcode, + } } -func (l *JsonStreamLogger) CaptureEnter(typ vm.OpCode, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { +func (l *JsonStreamLogger) OnTxStart(env *tracing.VMContext, tx types.Transaction, from libcommon.Address) { + l.env = env } -// CaptureState logs a new structured log message and pushes it out to the environment +// OnOpcode logs a new structured log message and pushes it out to the environment // -// CaptureState also tracks SLOAD/SSTORE ops to track storage change. -func (l *JsonStreamLogger) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { - contract := scope.Contract - memory := scope.Memory - stack := scope.Stack +// OnOpcode also tracks SLOAD/SSTORE ops to track storage change. +func (l *JsonStreamLogger) OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) { + contractAddr := scope.Address() + memory := scope.MemoryData() + stack := scope.StackData() select { case <-l.ctx.Done(): @@ -85,26 +97,26 @@ func (l *JsonStreamLogger) CaptureState(pc uint64, op vm.OpCode, gas, cost uint6 if !l.cfg.DisableStorage { // initialise new changed values storage container for this contract // if not present. - if l.storage[contract.Address()] == nil { - l.storage[contract.Address()] = make(Storage) + if l.storage[contractAddr] == nil { + l.storage[contractAddr] = make(Storage) } // capture SLOAD opcodes and record the read entry in the local storage - if op == vm.SLOAD && stack.Len() >= 1 { + if vm.OpCode(op) == vm.SLOAD && len(stack) >= 1 { var ( - address = libcommon.Hash(stack.Data[stack.Len()-1].Bytes32()) + address = libcommon.Hash(stack[len(stack)-1].Bytes32()) value uint256.Int ) - l.env.IntraBlockState().GetState(contract.Address(), &address, &value) - l.storage[contract.Address()][address] = value.Bytes32() + l.env.IntraBlockState.GetState(contractAddr, &address, &value) + l.storage[contractAddr][address] = value.Bytes32() outputStorage = true } // capture SSTORE opcodes and record the written entry in the local storage. - if op == vm.SSTORE && stack.Len() >= 2 { + if vm.OpCode(op) == vm.SSTORE && len(stack) >= 2 { var ( - value = libcommon.Hash(stack.Data[stack.Len()-2].Bytes32()) - address = libcommon.Hash(stack.Data[stack.Len()-1].Bytes32()) + value = libcommon.Hash(stack[len(stack)-2].Bytes32()) + address = libcommon.Hash(stack[len(stack)-1].Bytes32()) ) - l.storage[contract.Address()][address] = value + l.storage[contractAddr][address] = value outputStorage = true } } @@ -114,7 +126,7 @@ func (l *JsonStreamLogger) CaptureState(pc uint64, op vm.OpCode, gas, cost uint6 l.stream.WriteUint64(pc) l.stream.WriteMore() l.stream.WriteObjectField("op") - l.stream.WriteString(op.String()) + l.stream.WriteString(vm.OpCode(op).String()) l.stream.WriteMore() l.stream.WriteObjectField("gas") l.stream.WriteUint64(gas) @@ -135,7 +147,7 @@ func (l *JsonStreamLogger) CaptureState(pc uint64, op vm.OpCode, gas, cost uint6 l.stream.WriteMore() l.stream.WriteObjectField("stack") l.stream.WriteArrayStart() - for i, stackValue := range stack.Data { + for i, stackValue := range stack { if i > 0 { l.stream.WriteMore() } @@ -144,7 +156,7 @@ func (l *JsonStreamLogger) CaptureState(pc uint64, op vm.OpCode, gas, cost uint6 l.stream.WriteArrayEnd() } if !l.cfg.DisableMemory { - memData := memory.Data() + memData := memory l.stream.WriteMore() l.stream.WriteObjectField("memory") l.stream.WriteArrayStart() @@ -165,7 +177,7 @@ func (l *JsonStreamLogger) CaptureState(pc uint64, op vm.OpCode, gas, cost uint6 if l.locations != nil { l.locations = l.locations[:0] } - s := l.storage[contract.Address()] + s := l.storage[contractAddr] for loc := range s { l.locations = append(l.locations, loc) } @@ -186,14 +198,11 @@ func (l *JsonStreamLogger) CaptureState(pc uint64, op vm.OpCode, gas, cost uint6 _ = l.stream.Flush() } -// CaptureFault implements the Tracer interface to trace an execution fault -// while running an opcode. -func (l *JsonStreamLogger) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { -} - -// CaptureEnd is called after the call finishes to finalize the tracing. -func (l *JsonStreamLogger) CaptureEnd(output []byte, usedGas uint64, err error) { +// GetResult returns an empty json object. +func (l *JsonStreamLogger) GetResult() (json.RawMessage, error) { + return json.RawMessage(`{}`), nil } -func (l *JsonStreamLogger) CaptureExit(output []byte, usedGas uint64, err error) { +// Stop terminates execution of the tracer at the first opportune moment. +func (l *JsonStreamLogger) Stop(err error) { } diff --git a/eth/tracers/logger/logger.go b/eth/tracers/logger/logger.go index c49a6c4ca85..2bed66b6581 100644 --- a/eth/tracers/logger/logger.go +++ b/eth/tracers/logger/logger.go @@ -17,8 +17,10 @@ import ( "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon/common/math" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/eth/tracers" ) var ErrTraceLimitReached = errors.New("the number of logs reached the specified limit") @@ -116,7 +118,9 @@ type StructLogger struct { logs []StructLog output []byte err error - env *vm.EVM + env *tracing.VMContext + + usedGas uint64 } // NewStructLogger returns a new logger @@ -130,26 +134,30 @@ func NewStructLogger(cfg *LogConfig) *StructLogger { return logger } -func (l *StructLogger) CaptureTxStart(gasLimit uint64) {} - -func (l *StructLogger) CaptureTxEnd(restGas uint64) {} - -// CaptureStart implements the Tracer interface to initialize the tracing operation. -func (l *StructLogger) CaptureStart(env *vm.EVM, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { - l.env = env +func (l *StructLogger) Hooks() *tracing.Hooks { + return &tracing.Hooks{ + OnTxStart: l.OnTxStart, + OnTxEnd: l.OnTxEnd, + OnExit: l.OnExit, + OnOpcode: l.OnOpcode, + } } -// CaptureEnter implements the Tracer interface to initialize the tracing operation for an internal call. -func (l *StructLogger) CaptureEnter(typ vm.OpCode, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { +func (l *StructLogger) Tracer() *tracers.Tracer { + return &tracers.Tracer{ + Hooks: l.Hooks(), + GetResult: l.GetResult, + Stop: l.Stop, + } } -// CaptureState logs a new structured log message and pushes it out to the environment +// OnOpcode logs a new structured log message and pushes it out to the environment // -// CaptureState also tracks SLOAD/SSTORE ops to track storage change. -func (l *StructLogger) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { - memory := scope.Memory - stack := scope.Stack - contract := scope.Contract +// OnOpcode also tracks SLOAD/SSTORE ops to track storage change. +func (l *StructLogger) OnOpcode(pc uint64, opcode byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) { + op := vm.OpCode(opcode) + memory := scope.MemoryData() + stack := scope.StackData() // check if already accumulated the specified number of logs if l.cfg.Limit != 0 && l.cfg.Limit <= len(l.logs) { @@ -159,43 +167,46 @@ func (l *StructLogger) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, s // Copy a snapshot of the current memory state to a new buffer var mem []byte if !l.cfg.DisableMemory { - mem = make([]byte, len(memory.Data())) - copy(mem, memory.Data()) + mem = make([]byte, len(memory)) + copy(mem, memory) } // Copy a snapshot of the current stack state to a new buffer var stck []*big.Int if !l.cfg.DisableStack { - stck = make([]*big.Int, len(stack.Data)) - for i, item := range stack.Data { + stck = make([]*big.Int, len(stack)) + for i, item := range stack { stck[i] = new(big.Int).Set(item.ToBig()) } } + + contractAddr := scope.Address() + stackLen := len(stack) // Copy a snapshot of the current storage to a new container var storage Storage if !l.cfg.DisableStorage { // initialise new changed values storage container for this contract // if not present. - if l.storage[contract.Address()] == nil { - l.storage[contract.Address()] = make(Storage) + if l.storage[contractAddr] == nil { + l.storage[contractAddr] = make(Storage) } // capture SLOAD opcodes and record the read entry in the local storage - if op == vm.SLOAD && stack.Len() >= 1 { + if op == vm.SLOAD && stackLen >= 1 { var ( - address = libcommon.Hash(stack.Data[stack.Len()-1].Bytes32()) + address = libcommon.Hash(stack[stackLen-1].Bytes32()) value uint256.Int ) - l.env.IntraBlockState().GetState(contract.Address(), &address, &value) - l.storage[contract.Address()][address] = value.Bytes32() + l.env.IntraBlockState.GetState(contractAddr, &address, &value) + l.storage[contractAddr][address] = value.Bytes32() } // capture SSTORE opcodes and record the written entry in the local storage. - if op == vm.SSTORE && stack.Len() >= 2 { + if op == vm.SSTORE && stackLen >= 2 { var ( - value = libcommon.Hash(stack.Data[stack.Len()-2].Bytes32()) - address = libcommon.Hash(stack.Data[stack.Len()-1].Bytes32()) + value = libcommon.Hash(stack[stackLen-2].Bytes32()) + address = libcommon.Hash(stack[stackLen-1].Bytes32()) ) - l.storage[contract.Address()][address] = value + l.storage[contractAddr][address] = value } - storage = l.storage[contract.Address()].Copy() + storage = l.storage[contractAddr].Copy() } var rdata []byte if !l.cfg.DisableReturnData { @@ -203,17 +214,16 @@ func (l *StructLogger) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, s copy(rdata, rData) } // create a new snapshot of the EVM. - log := StructLog{pc, op, gas, cost, mem, memory.Len(), stck, rdata, storage, depth, l.env.IntraBlockState().GetRefund(), err} + log := StructLog{pc, op, gas, cost, mem, len(memory), stck, rdata, storage, depth, l.env.IntraBlockState.GetRefund(), err} l.logs = append(l.logs, log) } -// CaptureFault implements the Tracer interface to trace an execution fault -// while running an opcode. -func (l *StructLogger) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { -} +// OnExit is called after the call finishes to finalize the tracing. +func (l *StructLogger) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) { + if depth != 0 { + return + } -// CaptureEnd is called after the call finishes to finalize the tracing. -func (l *StructLogger) CaptureEnd(output []byte, usedGas uint64, err error) { l.output = output l.err = err if l.cfg.Debug { @@ -224,8 +234,28 @@ func (l *StructLogger) CaptureEnd(output []byte, usedGas uint64, err error) { } } -// CaptureExit is called after the internal call finishes to finalize the tracing. -func (l *StructLogger) CaptureExit(output []byte, usedGas uint64, err error) { +func (l *StructLogger) OnTxStart(env *tracing.VMContext, tx types.Transaction, from libcommon.Address) { + l.env = env +} + +func (l *StructLogger) OnTxEnd(receipt *types.Receipt, err error) { + if err != nil { + // Don't override vm error + if l.err == nil { + l.err = err + } + return + } + l.usedGas = receipt.GasUsed +} + +// GetResult returns an empty json object. +func (l *StructLogger) GetResult() (json.RawMessage, error) { + return json.RawMessage(`{}`), nil +} + +// Stop terminates execution of the tracer at the first opportune moment. +func (l *StructLogger) Stop(err error) { } // StructLogs returns the captured log entries. @@ -339,7 +369,7 @@ func WriteLogs(writer io.Writer, logs []*types.Log) { type mdLogger struct { out io.Writer cfg *LogConfig - env *vm.EVM + env *tracing.VMContext } // NewMarkdownLogger creates a logger which outputs information in a format adapted @@ -352,9 +382,19 @@ func NewMarkdownLogger(cfg *LogConfig, writer io.Writer) *mdLogger { return l } -func (t *mdLogger) CaptureTxStart(gasLimit uint64) {} +func (t *mdLogger) Hooks() *tracing.Hooks { + return &tracing.Hooks{ + OnTxStart: t.OnTxStart, + OnEnter: t.OnEnter, + OnExit: t.OnExit, + OnOpcode: t.OnOpcode, + OnFault: t.OnFault, + } +} -func (t *mdLogger) CaptureTxEnd(restGas uint64) {} +func (t *mdLogger) OnTxStart(env *tracing.VMContext, tx types.Transaction, from libcommon.Address) { + t.env = env +} func (t *mdLogger) captureStartOrEnter(from, to libcommon.Address, create bool, input []byte, gas uint64, value *uint256.Int) { if !create { @@ -373,49 +413,51 @@ func (t *mdLogger) captureStartOrEnter(from, to libcommon.Address, create bool, `) } -func (t *mdLogger) CaptureStart(env *vm.EVM, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { //nolint:interfacer - t.env = env +func (t *mdLogger) OnEnter(depth int, typ byte, from libcommon.Address, to libcommon.Address, precompile bool, input []byte, gas uint64, value *uint256.Int, code []byte) { + if depth != 0 { + return + } + create := vm.OpCode(typ) == vm.CREATE t.captureStartOrEnter(from, to, create, input, gas, value) } -func (t *mdLogger) CaptureEnter(typ vm.OpCode, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { //nolint:interfacer - t.captureStartOrEnter(from, to, create, input, gas, value) +func (t *mdLogger) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) { + if depth == 0 { + fmt.Fprintf(t.out, "\nOutput: `%#x`\nConsumed gas: `%d`\nError: `%v`\n", + output, gasUsed, err) + } } -func (t *mdLogger) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { - stack := scope.Stack - +// OnOpcode also tracks SLOAD/SSTORE ops to track storage change. +func (t *mdLogger) OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) { + stack := scope.StackData() fmt.Fprintf(t.out, "| %4d | %10v | %3d |", pc, op, cost) if !t.cfg.DisableStack { // format stack var a []string - for _, elem := range stack.Data { + for _, elem := range stack { a = append(a, elem.String()) } b := fmt.Sprintf("[%v]", strings.Join(a, ",")) fmt.Fprintf(t.out, "%10v |", b) } - fmt.Fprintf(t.out, "%10v |", t.env.IntraBlockState().GetRefund()) + fmt.Fprintf(t.out, "%10v |", t.env.IntraBlockState.GetRefund()) fmt.Fprintln(t.out, "") if err != nil { fmt.Fprintf(t.out, "Error: %v\n", err) } } -func (t *mdLogger) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { +func (t *mdLogger) OnFault(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, depth int, err error) { fmt.Fprintf(t.out, "\nError: at pc=%d, op=%v: %v\n", pc, op, err) } -func (t *mdLogger) captureEndOrExit(output []byte, usedGas uint64, err error) { - fmt.Fprintf(t.out, "\nOutput: `0x%x`\nConsumed gas: `%d`\nError: `%v`\n", - output, usedGas, err) -} - -func (t *mdLogger) CaptureEnd(output []byte, usedGas uint64, err error) { - t.captureEndOrExit(output, usedGas, err) +// GetResult returns an empty json object. +func (t *mdLogger) GetResult() (json.RawMessage, error) { + return json.RawMessage(`{}`), nil } -func (t *mdLogger) CaptureExit(output []byte, usedGas uint64, err error) { - t.captureEndOrExit(output, usedGas, err) +// Stop terminates execution of the tracer at the first opportune moment. +func (t *mdLogger) Stop(err error) { } diff --git a/eth/tracers/logger/logger_json.go b/eth/tracers/logger/logger_json.go index c06b4769226..5d6d36e19e5 100644 --- a/eth/tracers/logger/logger_json.go +++ b/eth/tracers/logger/logger_json.go @@ -21,64 +21,66 @@ import ( "io" "math/big" - "github.com/holiman/uint256" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/math" + "github.com/ledgerwatch/erigon/core/tracing" + "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/eth/tracers" ) type JSONLogger struct { encoder *json.Encoder cfg *LogConfig - env *vm.EVM + env *tracing.VMContext } // NewJSONLogger creates a new EVM tracer that prints execution steps as JSON objects // into the provided stream. -func NewJSONLogger(cfg *LogConfig, writer io.Writer) *JSONLogger { +func NewJSONLogger(cfg *LogConfig, writer io.Writer) *tracers.Tracer { l := &JSONLogger{json.NewEncoder(writer), cfg, nil} if l.cfg == nil { l.cfg = &LogConfig{} } - return l + return &tracers.Tracer{ + Hooks: &tracing.Hooks{ + OnTxStart: l.OnTxStart, + OnExit: l.OnExit, + OnOpcode: l.OnOpcode, + OnFault: l.OnFault, + }, + } } -func (l *JSONLogger) CaptureTxStart(gasLimit uint64) {} - -func (l *JSONLogger) CaptureTxEnd(restGas uint64) {} - -func (l *JSONLogger) CaptureStart(env *vm.EVM, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { +func (l *JSONLogger) OnTxStart(env *tracing.VMContext, tx types.Transaction, from libcommon.Address) { l.env = env } -func (l *JSONLogger) CaptureEnter(typ vm.OpCode, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { -} - -// CaptureState outputs state information on the logger. -func (l *JSONLogger) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { - memory := scope.Memory - stack := scope.Stack +// OnOpcode outputs state information on the logger. +func (l *JSONLogger) OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) { + memory := scope.MemoryData() + stack := scope.StackData() log := StructLog{ Pc: pc, - Op: op, + Op: vm.OpCode(op), Gas: gas, GasCost: cost, - MemorySize: memory.Len(), + MemorySize: len(memory), Storage: nil, Depth: depth, - RefundCounter: l.env.IntraBlockState().GetRefund(), + RefundCounter: l.env.IntraBlockState.GetRefund(), Err: err, } if !l.cfg.DisableMemory { - log.Memory = memory.Data() + log.Memory = memory } if !l.cfg.DisableStack { //TODO(@holiman) improve this - logstack := make([]*big.Int, len(stack.Data)) - for i, item := range stack.Data { + logstack := make([]*big.Int, len(stack)) + for i, item := range stack { logstack[i] = item.ToBig() } log.Stack = logstack @@ -87,11 +89,15 @@ func (l *JSONLogger) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, sco } // CaptureFault outputs state information on the logger. -func (l *JSONLogger) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { +func (l *JSONLogger) OnFault(pc uint64, op byte, gas uint64, cost uint64, scope tracing.OpContext, depth int, err error) { + l.OnOpcode(pc, op, gas, cost, scope, nil, depth, err) } // CaptureEnd is triggered at end of execution. -func (l *JSONLogger) CaptureEnd(output []byte, usedGas uint64, err error) { +func (l *JSONLogger) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) { + if depth > 0 { + return + } type endLog struct { Output string `json:"output"` GasUsed math.HexOrDecimal64 `json:"gasUsed"` @@ -101,8 +107,14 @@ func (l *JSONLogger) CaptureEnd(output []byte, usedGas uint64, err error) { if err != nil { errMsg = err.Error() } - _ = l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(usedGas), errMsg}) + _ = l.encoder.Encode(endLog{common.Bytes2Hex(output), math.HexOrDecimal64(gasUsed), errMsg}) +} + +// GetResult returns an empty json object. +func (l *JSONLogger) GetResult() (json.RawMessage, error) { + return json.RawMessage(`{}`), nil } -func (l *JSONLogger) CaptureExit(output []byte, usedGas uint64, err error) { +// Stop terminates execution of the tracer at the first opportune moment. +func (l *JSONLogger) Stop(err error) { } diff --git a/eth/tracers/logger/logger_test.go b/eth/tracers/logger/logger_test.go index b4b41213754..77a32ac5876 100644 --- a/eth/tracers/logger/logger_test.go +++ b/eth/tracers/logger/logger_test.go @@ -56,8 +56,8 @@ func (*dummyStatedb) GetRefund() uint64 { return 1337 } func TestStoreCapture(t *testing.T) { var ( - env = vm.NewEVM(evmtypes.BlockContext{}, evmtypes.TxContext{}, &dummyStatedb{}, params.TestChainConfig, vm.Config{}) logger = NewStructLogger(nil) + env = vm.NewEVM(evmtypes.BlockContext{}, evmtypes.TxContext{}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Tracer: logger.Hooks()}) mem = vm.NewMemory() stack = stack.New() contract = vm.NewContract(&dummyContractRef{}, libcommon.Address{}, new(uint256.Int), 0, false /* skipAnalysis */) @@ -65,8 +65,8 @@ func TestStoreCapture(t *testing.T) { stack.Push(uint256.NewInt(1)) stack.Push(uint256.NewInt(0)) var index libcommon.Hash - logger.CaptureStart(env, libcommon.Address{}, libcommon.Address{}, false, false, nil, 0, nil, nil) - logger.CaptureState(0, vm.SSTORE, 0, 0, &vm.ScopeContext{ + logger.OnTxStart(env.GetVMContext(), nil, libcommon.Address{}) + logger.OnOpcode(0, byte(vm.SSTORE), 0, 0, &vm.ScopeContext{ Memory: mem, Stack: stack, Contract: contract, diff --git a/eth/tracers/native/4byte.go b/eth/tracers/native/4byte.go index c61617171f0..b14dd2956c1 100644 --- a/eth/tracers/native/4byte.go +++ b/eth/tracers/native/4byte.go @@ -25,6 +25,8 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/tracing" + "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/tracers" ) @@ -48,20 +50,26 @@ func init() { // 0xc281d19e-0: 1 // } type fourByteTracer struct { - noopTracer ids map[string]int // ids aggregates the 4byte ids found interrupt uint32 // Atomic flag to signal execution interruption reason error // Textual reason for the interruption - activePrecompiles []libcommon.Address // Updated on CaptureStart based on given rules + activePrecompiles []libcommon.Address // Updated on tx start based on given rules } // newFourByteTracer returns a native go tracer which collects // 4 byte-identifiers of a tx, and implements vm.EVMLogger. -func newFourByteTracer(ctx *tracers.Context, _ json.RawMessage) (tracers.Tracer, error) { +func newFourByteTracer(ctx *tracers.Context, _ json.RawMessage) (*tracers.Tracer, error) { t := &fourByteTracer{ ids: make(map[string]int), } - return t, nil + return &tracers.Tracer{ + Hooks: &tracing.Hooks{ + OnTxStart: t.OnTxStart, + OnEnter: t.OnEnter, + }, + GetResult: t.GetResult, + Stop: t.Stop, + }, nil } // isPrecompiled returns whether the addr is a precompile. Logic borrowed from newJsTracer in eth/tracers/js/tracer.go @@ -80,21 +88,13 @@ func (t *fourByteTracer) store(id []byte, size int) { t.ids[key] += 1 } -// CaptureStart implements the EVMLogger interface to initialize the tracing operation. -func (t *fourByteTracer) CaptureStart(env *vm.EVM, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { - // Update list of precompiles based on current block - rules := env.ChainRules() +func (t *fourByteTracer) OnTxStart(env *tracing.VMContext, tx types.Transaction, from libcommon.Address) { + rules := env.ChainConfig.Rules(env.BlockNumber, env.Time) t.activePrecompiles = vm.ActivePrecompiles(rules) - - // Save the outer calldata also - if len(input) >= 4 { - t.store(input[0:4], len(input)-4) - } } // CaptureEnter is called when EVM enters a new scope (via call, create or selfdestruct). -func (t *fourByteTracer) CaptureEnter(op vm.OpCode, from libcommon.Address, to libcommon.Address, precompile, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { - // Skip if tracing was interrupted +func (t *fourByteTracer) OnEnter(depth int, opcode byte, from libcommon.Address, to libcommon.Address, precompile bool, input []byte, gas uint64, value *uint256.Int, code []byte) { // Skip if tracing was interrupted if atomic.LoadUint32(&t.interrupt) > 0 { return } @@ -102,6 +102,7 @@ func (t *fourByteTracer) CaptureEnter(op vm.OpCode, from libcommon.Address, to l return } // primarily we want to avoid CREATE/CREATE2/SELFDESTRUCT + op := vm.OpCode(opcode) if op != vm.DELEGATECALL && op != vm.STATICCALL && op != vm.CALL && op != vm.CALLCODE { return diff --git a/eth/tracers/native/call.go b/eth/tracers/native/call.go index 7f3c2321a39..c0fba46a2d6 100644 --- a/eth/tracers/native/call.go +++ b/eth/tracers/native/call.go @@ -23,11 +23,12 @@ import ( "sync/atomic" "github.com/holiman/uint256" - libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/hexutil" "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon/accounts/abi" + "github.com/ledgerwatch/erigon/core/tracing" + "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/tracers" ) @@ -59,7 +60,8 @@ type callFrame struct { Logs []callLog `json:"logs,omitempty" rlp:"optional"` // Placed at end on purpose. The RLP will be decoded to 0 instead of // nil if there are non-empty elements after in the struct. - Value *big.Int `json:"value,omitempty" rlp:"optional"` + Value *big.Int `json:"value,omitempty" rlp:"optional"` + revertedSnapshot bool } func (f callFrame) TypeString() string { @@ -67,16 +69,17 @@ func (f callFrame) TypeString() string { } func (f callFrame) failed() bool { - return len(f.Error) > 0 + return len(f.Error) > 0 && f.revertedSnapshot } -func (f *callFrame) processOutput(output []byte, err error) { +func (f *callFrame) processOutput(output []byte, err error, reverted bool) { output = libcommon.CopyBytes(output) if err == nil { f.Output = output return } f.Error = err.Error() + f.revertedSnapshot = reverted if f.Type == vm.CREATE || f.Type == vm.CREATE2 { f.To = libcommon.Address{} } @@ -102,10 +105,10 @@ type callFrameMarshaling struct { } type callTracer struct { - noopTracer callstack []callFrame config callTracerConfig gasLimit uint64 + depth int interrupt uint32 // Atomic flag to signal execution interruption reason error // Textual reason for the interruption logIndex uint64 @@ -119,7 +122,7 @@ type callTracerConfig struct { // newCallTracer returns a native go tracer which tracks // call frames of a tx, and implements vm.EVMLogger. -func newCallTracer(ctx *tracers.Context, cfg json.RawMessage) (tracers.Tracer, error) { +func newCallTracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, error) { var config callTracerConfig if cfg != nil { if err := json.Unmarshal(cfg, &config); err != nil { @@ -128,101 +131,59 @@ func newCallTracer(ctx *tracers.Context, cfg json.RawMessage) (tracers.Tracer, e } // First callframe contains tx context info // and is populated on start and end. - return &callTracer{callstack: make([]callFrame, 1), config: config}, nil -} - -// CaptureStart implements the EVMLogger interface to initialize the tracing operation. -func (t *callTracer) CaptureStart(env *vm.EVM, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { - t.callstack[0] = callFrame{ - Type: vm.CALL, - From: from, - To: to, - Input: libcommon.CopyBytes(input), - Gas: t.gasLimit, // gas has intrinsicGas already subtracted - } - if value != nil { - t.callstack[0].Value = value.ToBig() - } - if create { - t.callstack[0].Type = vm.CREATE - } -} - -// CaptureEnd is called after the call finishes to finalize the tracing. -func (t *callTracer) CaptureEnd(output []byte, gasUsed uint64, err error) { - t.callstack[0].processOutput(output, err) -} - -// CaptureState implements the EVMLogger interface to trace a single step of VM execution. -func (t *callTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { - // Only logs need to be captured via opcode processing - if !t.config.WithLog { - return - } - // Avoid processing nested calls when only caring about top call - if t.config.OnlyTopCall && depth > 0 { - return - } - // Skip if tracing was interrupted - if atomic.LoadUint32(&t.interrupt) > 0 { - return - } - switch op { - case vm.LOG0, vm.LOG1, vm.LOG2, vm.LOG3, vm.LOG4: - size := int(op - vm.LOG0) - - stack := scope.Stack - stackData := stack.Data - stackSize := len(stackData) - if stackSize < 2 { - return - } - // Don't modify the stack - mStart := stackData[stackSize-1] - mSize := stackData[stackSize-2] - topics := make([]libcommon.Hash, size) - dataStart := stackSize - 3 - for i := 0; i < size && dataStart-i >= 0; i++ { - topic := stackData[dataStart-i] - topics[i] = libcommon.Hash(topic.Bytes32()) - } - - data := scope.Memory.GetCopy(int64(mStart.Uint64()), int64(mSize.Uint64())) - log := callLog{Address: scope.Contract.Address(), Topics: topics, Data: hexutility.Bytes(data), Index: t.logIndex} - t.logIndex++ - t.callstack[len(t.callstack)-1].Logs = append(t.callstack[len(t.callstack)-1].Logs, log) - } + t := &callTracer{callstack: make([]callFrame, 0, 1), config: config} + return &tracers.Tracer{ + Hooks: &tracing.Hooks{ + OnTxStart: t.OnTxStart, + OnTxEnd: t.OnTxEnd, + OnEnter: t.OnEnter, + OnExit: t.OnExit, + OnLog: t.OnLog, + }, + GetResult: t.GetResult, + Stop: t.Stop, + }, nil } // CaptureEnter is called when EVM enters a new scope (via call, create or selfdestruct). -func (t *callTracer) CaptureEnter(typ vm.OpCode, from libcommon.Address, to libcommon.Address, precompile, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { - if t.config.OnlyTopCall { +func (t *callTracer) OnEnter(depth int, typ byte, from libcommon.Address, to libcommon.Address, precompile bool, input []byte, gas uint64, value *uint256.Int, code []byte) { + t.depth = depth + if t.config.OnlyTopCall && depth > 0 { return } // Skip if tracing was interrupted if atomic.LoadUint32(&t.interrupt) > 0 { return } - + toCopy := to call := callFrame{ - Type: typ, + Type: vm.OpCode(typ), From: from, - To: to, + To: toCopy, Input: libcommon.CopyBytes(input), Gas: gas, + Value: value.ToBig(), } - if value != nil { - call.Value = value.ToBig() + if depth == 0 { + call.Gas = t.gasLimit } t.callstack = append(t.callstack, call) } // CaptureExit is called when EVM exits a scope, even if the scope didn't // execute any code. -func (t *callTracer) CaptureExit(output []byte, gasUsed uint64, err error) { + +func (t *callTracer) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) { + if depth == 0 { + t.captureEnd(output, gasUsed, err, reverted) + return + } + + t.depth = depth - 1 if t.config.OnlyTopCall { return } + size := len(t.callstack) if size <= 1 { return @@ -233,18 +194,28 @@ func (t *callTracer) CaptureExit(output []byte, gasUsed uint64, err error) { size -= 1 call.GasUsed = gasUsed - call.processOutput(output, err) + call.processOutput(output, err, reverted) t.callstack[size-1].Calls = append(t.callstack[size-1].Calls, call) } -func (t *callTracer) CaptureTxStart(gasLimit uint64) { - t.gasLimit = gasLimit - t.logIndex = 0 - t.logGaps = make(map[uint64]int) +func (t *callTracer) captureEnd(output []byte, gasUsed uint64, err error, reverted bool) { + if len(t.callstack) != 1 { + return + } + t.callstack[0].processOutput(output, err, reverted) } -func (t *callTracer) CaptureTxEnd(restGas uint64) { - t.callstack[0].GasUsed = t.gasLimit - restGas +func (t *callTracer) OnTxStart(env *tracing.VMContext, tx types.Transaction, from libcommon.Address) { + t.gasLimit = tx.GetGas() +} + +func (t *callTracer) OnTxEnd(receipt *types.Receipt, err error) { + // Error happened during tx validation. + if err != nil { + return + } + + t.callstack[0].GasUsed = receipt.GasUsed if t.config.WithLog { // Logs are not emitted when the call fails clearFailedLogs(&t.callstack[0], false, t.logGaps) @@ -254,6 +225,23 @@ func (t *callTracer) CaptureTxEnd(restGas uint64) { t.logGaps = nil } +func (t *callTracer) OnLog(log *types.Log) { + // Only logs need to be captured via opcode processing + if !t.config.WithLog { + return + } + // Avoid processing nested calls when only caring about top call + if t.config.OnlyTopCall && t.depth > 0 { + return + } + // Skip if tracing was interrupted + if atomic.LoadUint32(&t.interrupt) > 0 { + return + } + t.callstack[len(t.callstack)-1].Logs = append(t.callstack[len(t.callstack)-1].Logs, callLog{Address: log.Address, Topics: log.Topics, Data: log.Data, Index: t.logIndex}) + t.logIndex++ +} + // GetResult returns the json-encoded nested list of call traces, and any // error arising from the encoding or forceful termination (via `Stop`). func (t *callTracer) GetResult() (json.RawMessage, error) { @@ -264,7 +252,7 @@ func (t *callTracer) GetResult() (json.RawMessage, error) { if err != nil { return nil, err } - return json.RawMessage(res), t.reason + return res, t.reason } // Stop terminates execution of the tracer at the first opportune moment. diff --git a/eth/tracers/native/mux.go b/eth/tracers/native/mux.go index e8a14bb4ad2..84ad42bdfe3 100644 --- a/eth/tracers/native/mux.go +++ b/eth/tracers/native/mux.go @@ -22,7 +22,8 @@ import ( "github.com/holiman/uint256" libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/core/tracing" + "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/tracers" ) @@ -34,18 +35,18 @@ func init() { // runs multiple tracers in one go. type muxTracer struct { names []string - tracers []tracers.Tracer + tracers []*tracers.Tracer } // newMuxTracer returns a new mux tracer. -func newMuxTracer(ctx *tracers.Context, cfg json.RawMessage) (tracers.Tracer, error) { +func newMuxTracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, error) { var config map[string]json.RawMessage if cfg != nil { if err := json.Unmarshal(cfg, &config); err != nil { return nil, err } } - objects := make([]tracers.Tracer, 0, len(config)) + objects := make([]*tracers.Tracer, 0, len(config)) names := make([]string, 0, len(config)) for k, v := range config { t, err := tracers.New(k, ctx, v) @@ -56,61 +57,124 @@ func newMuxTracer(ctx *tracers.Context, cfg json.RawMessage) (tracers.Tracer, er names = append(names, k) } - return &muxTracer{names: names, tracers: objects}, nil + t := &muxTracer{names: names, tracers: objects} + return &tracers.Tracer{ + Hooks: &tracing.Hooks{ + OnTxStart: t.OnTxStart, + OnTxEnd: t.OnTxEnd, + OnEnter: t.OnEnter, + OnExit: t.OnExit, + OnOpcode: t.OnOpcode, + OnFault: t.OnFault, + OnGasChange: t.OnGasChange, + OnBalanceChange: t.OnBalanceChange, + OnNonceChange: t.OnNonceChange, + OnCodeChange: t.OnCodeChange, + OnStorageChange: t.OnStorageChange, + OnLog: t.OnLog, + }, + GetResult: t.GetResult, + Stop: t.Stop, + }, nil } -// CaptureStart implements the EVMLogger interface to initialize the tracing operation. -func (t *muxTracer) CaptureStart(env *vm.EVM, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { +func (t *muxTracer) OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) { for _, t := range t.tracers { - t.CaptureStart(env, from, to, precompile, create, input, gas, value, code) + if t.OnOpcode != nil { + t.OnOpcode(pc, op, gas, cost, scope, rData, depth, err) + } + } +} + +func (t *muxTracer) OnFault(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, depth int, err error) { + for _, t := range t.tracers { + if t.OnFault != nil { + t.OnFault(pc, op, gas, cost, scope, depth, err) + } + } +} + +func (t *muxTracer) OnGasChange(old, new uint64, reason tracing.GasChangeReason) { + for _, t := range t.tracers { + if t.OnGasChange != nil { + t.OnGasChange(old, new, reason) + } + } +} + +func (t *muxTracer) OnEnter(depth int, typ byte, from libcommon.Address, to libcommon.Address, precompile bool, input []byte, gas uint64, value *uint256.Int, code []byte) { + for _, t := range t.tracers { + if t.OnEnter != nil { + t.OnEnter(depth, typ, from, to, precompile, input, gas, value, code) + } + } +} + +func (t *muxTracer) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) { + for _, t := range t.tracers { + if t.OnExit != nil { + t.OnExit(depth, output, gasUsed, err, reverted) + } } } -// CaptureEnd is called after the call finishes to finalize the tracing. -func (t *muxTracer) CaptureEnd(output []byte, gasUsed uint64, err error) { +func (t *muxTracer) OnTxStart(env *tracing.VMContext, tx types.Transaction, from libcommon.Address) { for _, t := range t.tracers { - t.CaptureEnd(output, gasUsed, err) + if t.OnTxStart != nil { + t.OnTxStart(env, tx, from) + } + } +} + +func (t *muxTracer) OnTxEnd(receipt *types.Receipt, err error) { + for _, t := range t.tracers { + if t.OnTxEnd != nil { + t.OnTxEnd(receipt, err) + } } } -// CaptureState implements the EVMLogger interface to trace a single step of VM execution. -func (t *muxTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { +func (t *muxTracer) OnBlockEnd(err error) { for _, t := range t.tracers { - t.CaptureState(pc, op, gas, cost, scope, rData, depth, err) + t.OnBlockEnd(err) } } -// CaptureFault implements the EVMLogger interface to trace an execution fault. -func (t *muxTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { +func (t *muxTracer) OnGenesisBlock(b *types.Block, alloc types.GenesisAlloc) { for _, t := range t.tracers { - t.CaptureFault(pc, op, gas, cost, scope, depth, err) + t.OnGenesisBlock(b, alloc) } } -// CaptureEnter is called when EVM enters a new scope (via call, create or selfdestruct). -func (t *muxTracer) CaptureEnter(typ vm.OpCode, from libcommon.Address, to libcommon.Address, precompile, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { +func (t *muxTracer) OnBalanceChange(a libcommon.Address, prev, new *uint256.Int, reason tracing.BalanceChangeReason) { for _, t := range t.tracers { - t.CaptureEnter(typ, from, to, precompile, create, input, gas, value, code) + if t.OnBalanceChange != nil { + t.OnBalanceChange(a, prev, new, reason) + } } } -// CaptureExit is called when EVM exits a scope, even if the scope didn't -// execute any code. -func (t *muxTracer) CaptureExit(output []byte, gasUsed uint64, err error) { +func (t *muxTracer) OnNonceChange(a libcommon.Address, prev, new uint64) { for _, t := range t.tracers { - t.CaptureExit(output, gasUsed, err) + if t.OnNonceChange != nil { + t.OnNonceChange(a, prev, new) + } } } -func (t *muxTracer) CaptureTxStart(gasLimit uint64) { +func (t *muxTracer) OnCodeChange(a libcommon.Address, prevCodeHash libcommon.Hash, prev []byte, codeHash libcommon.Hash, code []byte) { for _, t := range t.tracers { - t.CaptureTxStart(gasLimit) + if t.OnCodeChange != nil { + t.OnCodeChange(a, prevCodeHash, prev, codeHash, code) + } } } -func (t *muxTracer) CaptureTxEnd(restGas uint64) { +func (t *muxTracer) OnStorageChange(addr libcommon.Address, slot *libcommon.Hash, prev uint256.Int, new uint256.Int) { for _, t := range t.tracers { - t.CaptureTxEnd(restGas) + if t.OnStorageChange != nil { + t.OnStorageChange(addr, slot, prev, new) + } } } @@ -131,6 +195,14 @@ func (t *muxTracer) GetResult() (json.RawMessage, error) { return res, nil } +func (t *muxTracer) OnLog(log *types.Log) { + for _, t := range t.tracers { + if t.OnLog != nil { + t.OnLog(log) + } + } +} + // Stop terminates execution of the tracer at the first opportune moment. func (t *muxTracer) Stop(err error) { for _, t := range t.tracers { diff --git a/eth/tracers/native/noop.go b/eth/tracers/native/noop.go index 29365d00d86..814a5a9b4f3 100644 --- a/eth/tracers/native/noop.go +++ b/eth/tracers/native/noop.go @@ -20,59 +20,81 @@ import ( "encoding/json" "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/common" libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/core/tracing" + "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/tracers" ) func init() { - register("noopTracer", newNoopTracer) + // register("noopTracer", newNoopTracer) } -// noopTracer is a go implementation of the Tracer interface which +// NoopTracer is a go implementation of the Tracer interface which // performs no action. It's mostly useful for testing purposes. -type noopTracer struct{} +type NoopTracer struct{} // newNoopTracer returns a new noop tracer. -func newNoopTracer(ctx *tracers.Context, _ json.RawMessage) (tracers.Tracer, error) { - return &noopTracer{}, nil +func newNoopTracer(ctx *tracers.Context, _ json.RawMessage) (*tracers.Tracer, error) { + t := &NoopTracer{} + return &tracers.Tracer{ + Hooks: &tracing.Hooks{ + OnTxStart: t.OnTxStart, + OnTxEnd: t.OnTxEnd, + OnEnter: t.OnEnter, + OnExit: t.OnExit, + OnOpcode: t.OnOpcode, + OnFault: t.OnFault, + OnGasChange: t.OnGasChange, + OnBalanceChange: t.OnBalanceChange, + OnNonceChange: t.OnNonceChange, + OnCodeChange: t.OnCodeChange, + OnStorageChange: t.OnStorageChange, + OnLog: t.OnLog, + }, + GetResult: t.GetResult, + Stop: t.Stop, + }, nil } -// CaptureStart implements the EVMLogger interface to initialize the tracing operation. -func (t *noopTracer) CaptureStart(env *vm.EVM, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { +func (t *NoopTracer) OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) { } -// CaptureEnd is called after the call finishes to finalize the tracing. -func (t *noopTracer) CaptureEnd(output []byte, gasUsed uint64, err error) { +func (t *NoopTracer) OnFault(pc uint64, op byte, gas, cost uint64, _ tracing.OpContext, depth int, err error) { } -// CaptureState implements the EVMLogger interface to trace a single step of VM execution. -func (t *noopTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { +func (t *NoopTracer) OnGasChange(old, new uint64, reason tracing.GasChangeReason) {} + +func (t *NoopTracer) OnEnter(depth int, typ byte, from libcommon.Address, to libcommon.Address, precompile bool, input []byte, gas uint64, value *uint256.Int, code []byte) { } -// CaptureFault implements the EVMLogger interface to trace an execution fault. -func (t *noopTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, _ *vm.ScopeContext, depth int, err error) { +func (t *NoopTracer) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) { } -// CaptureEnter is called when EVM enters a new scope (via call, create or selfdestruct). -func (t *noopTracer) CaptureEnter(typ vm.OpCode, from libcommon.Address, to libcommon.Address, precompile, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { +func (*NoopTracer) OnTxStart(env *tracing.VMContext, tx types.Transaction, from common.Address) { } -// CaptureExit is called when EVM exits a scope, even if the scope didn't -// execute any code. -func (t *noopTracer) CaptureExit(output []byte, gasUsed uint64, err error) { +func (*NoopTracer) OnTxEnd(receipt *types.Receipt, err error) {} + +func (*NoopTracer) OnBalanceChange(a common.Address, prev, new *uint256.Int, reason tracing.BalanceChangeReason) { +} + +func (*NoopTracer) OnNonceChange(a libcommon.Address, prev, new uint64) {} + +func (*NoopTracer) OnCodeChange(a libcommon.Address, prevCodeHash libcommon.Hash, prev []byte, codeHash libcommon.Hash, code []byte) { } -func (*noopTracer) CaptureTxStart(gasLimit uint64) {} +func (*NoopTracer) OnStorageChange(a libcommon.Address, k *libcommon.Hash, prev, new uint256.Int) {} -func (*noopTracer) CaptureTxEnd(restGas uint64) {} +func (*NoopTracer) OnLog(log *types.Log) {} // GetResult returns an empty json object. -func (t *noopTracer) GetResult() (json.RawMessage, error) { +func (t *NoopTracer) GetResult() (json.RawMessage, error) { return json.RawMessage(`{}`), nil } // Stop terminates execution of the tracer at the first opportune moment. -func (t *noopTracer) Stop(err error) { +func (t *NoopTracer) Stop(err error) { } diff --git a/eth/tracers/native/prestate.go b/eth/tracers/native/prestate.go index 6dff96a8950..72f62ec49cc 100644 --- a/eth/tracers/native/prestate.go +++ b/eth/tracers/native/prestate.go @@ -19,6 +19,7 @@ package native import ( "bytes" "encoding/json" + "fmt" "math/big" "sync/atomic" @@ -28,6 +29,9 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/hexutility" + + "github.com/ledgerwatch/erigon/core/tracing" + "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/eth/tracers" @@ -58,8 +62,7 @@ type accountMarshaling struct { } type prestateTracer struct { - noopTracer - env *vm.EVM + env *tracing.VMContext pre state post state create bool @@ -76,73 +79,37 @@ type prestateTracerConfig struct { DiffMode bool `json:"diffMode"` // If true, this tracer will return state modifications } -func newPrestateTracer(ctx *tracers.Context, cfg json.RawMessage) (tracers.Tracer, error) { +func newPrestateTracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, error) { var config prestateTracerConfig if cfg != nil { if err := json.Unmarshal(cfg, &config); err != nil { return nil, err } } - return &prestateTracer{ + t := &prestateTracer{ pre: state{}, post: state{}, config: config, created: make(map[libcommon.Address]bool), deleted: make(map[libcommon.Address]bool), - }, nil -} - -// CaptureStart implements the EVMLogger interface to initialize the tracing operation. -func (t *prestateTracer) CaptureStart(env *vm.EVM, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { - t.env = env - t.create = create - t.to = to - - t.lookupAccount(from) - t.lookupAccount(to) - t.lookupAccount(env.Context.Coinbase) - - // The recipient balance includes the value transferred. - toBal := new(big.Int).Sub(t.pre[to].Balance, value.ToBig()) - t.pre[to].Balance = toBal - - // The sender balance is after reducing: value and gasLimit. - // We need to re-add them to get the pre-tx balance. - fromBal := new(big.Int).Set(t.pre[from].Balance) - gasPrice := env.GasPrice - consumedGas := new(big.Int).Mul(gasPrice.ToBig(), new(big.Int).SetUint64(t.gasLimit)) - fromBal.Add(fromBal, new(big.Int).Add(value.ToBig(), consumedGas)) - t.pre[from].Balance = fromBal - if t.pre[from].Nonce > 0 { - t.pre[from].Nonce-- - } - - if create && t.config.DiffMode { - t.created[to] = true - } -} - -// CaptureEnd is called after the call finishes to finalize the tracing. -func (t *prestateTracer) CaptureEnd(output []byte, gasUsed uint64, err error) { - if t.config.DiffMode { - return - } - - if t.create { - // Keep existing account prior to contract creation at that address - if s := t.pre[t.to]; s != nil && !s.exists() { - // Exclude newly created contract. - delete(t.pre, t.to) - } } + return &tracers.Tracer{ + Hooks: &tracing.Hooks{ + OnTxStart: t.OnTxStart, + OnTxEnd: t.OnTxEnd, + OnOpcode: t.OnOpcode, + }, + GetResult: t.GetResult, + Stop: t.Stop, + }, nil } -// CaptureState implements the EVMLogger interface to trace a single step of VM execution. -func (t *prestateTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { - stack := scope.Stack - stackData := stack.Data +// OnOpcode implements the EVMLogger interface to trace a single step of VM execution. +func (t *prestateTracer) OnOpcode(pc uint64, opcode byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) { + op := vm.OpCode(opcode) + stackData := scope.StackData() stackLen := len(stackData) - caller := scope.Contract.Address() + caller := scope.Address() switch { case stackLen >= 1 && (op == vm.SLOAD || op == vm.SSTORE): slot := libcommon.Hash(stackData[stackLen-1].Bytes32()) @@ -157,14 +124,18 @@ func (t *prestateTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, addr := libcommon.Address(stackData[stackLen-2].Bytes20()) t.lookupAccount(addr) case op == vm.CREATE: - nonce := t.env.IntraBlockState().GetNonce(caller) + nonce := t.env.IntraBlockState.GetNonce(caller) addr := crypto.CreateAddress(caller, nonce) t.lookupAccount(addr) t.created[addr] = true case stackLen >= 4 && op == vm.CREATE2: offset := stackData[stackLen-2] size := stackData[stackLen-3] - init := scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) + init, err := tracers.GetMemoryCopyPadded(scope.MemoryData(), int64(offset.Uint64()), int64(size.Uint64())) + if err != nil { + t.Stop(fmt.Errorf("failed to copy CREATE2 in prestate tracer input err: %s", err)) + return + } inithash := crypto.Keccak256(init) salt := stackData[stackLen-4] addr := crypto.CreateAddress2(caller, salt.Bytes32(), inithash) @@ -173,15 +144,41 @@ func (t *prestateTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, } } -func (t *prestateTracer) CaptureTxStart(gasLimit uint64) { - t.gasLimit = gasLimit +func (t *prestateTracer) OnTxStart(env *tracing.VMContext, tx types.Transaction, from libcommon.Address) { + t.env = env + + signer := types.MakeSigner(env.ChainConfig, env.BlockNumber, env.Time) + from, err := tx.Sender(*signer) + if err != nil { + t.Stop(fmt.Errorf("could not recover sender address: %v", err)) + return + } + if tx.GetTo() == nil { + t.create = true + t.to = crypto.CreateAddress(from, env.IntraBlockState.GetNonce(from)) + } else { + t.to = *tx.GetTo() + t.create = false + } + + t.lookupAccount(from) + t.lookupAccount(t.to) + t.lookupAccount(env.Coinbase) + + if t.create && t.config.DiffMode { + t.created[t.to] = true + } } -func (t *prestateTracer) CaptureTxEnd(restGas uint64) { +func (t *prestateTracer) OnTxEnd(receipt *types.Receipt, err error) { if !t.config.DiffMode { return } + if err != nil { + return + } + for addr, state := range t.pre { // The deleted account's state is pruned from `post` but kept in `pre` if _, ok := t.deleted[addr]; ok { @@ -189,9 +186,9 @@ func (t *prestateTracer) CaptureTxEnd(restGas uint64) { } modified := false postAccount := &account{Storage: make(map[libcommon.Hash]libcommon.Hash)} - newBalance := t.env.IntraBlockState().GetBalance(addr).ToBig() - newNonce := t.env.IntraBlockState().GetNonce(addr) - newCode := t.env.IntraBlockState().GetCode(addr) + newBalance := t.env.IntraBlockState.GetBalance(addr).ToBig() + newNonce := t.env.IntraBlockState.GetNonce(addr) + newCode := t.env.IntraBlockState.GetCode(addr) if newBalance.Cmp(t.pre[addr].Balance) != 0 { modified = true @@ -213,7 +210,7 @@ func (t *prestateTracer) CaptureTxEnd(restGas uint64) { } var newVal uint256.Int - t.env.IntraBlockState().GetState(addr, &key, &newVal) + t.env.IntraBlockState.GetState(addr, &key, &newVal) if new(uint256.Int).SetBytes(val[:]).Eq(&newVal) { // Omit unchanged slots delete(t.pre[addr].Storage, key) @@ -274,9 +271,9 @@ func (t *prestateTracer) lookupAccount(addr libcommon.Address) { } t.pre[addr] = &account{ - Balance: t.env.IntraBlockState().GetBalance(addr).ToBig(), - Nonce: t.env.IntraBlockState().GetNonce(addr), - Code: t.env.IntraBlockState().GetCode(addr), + Balance: t.env.IntraBlockState.GetBalance(addr).ToBig(), + Nonce: t.env.IntraBlockState.GetNonce(addr), + Code: t.env.IntraBlockState.GetCode(addr), Storage: make(map[libcommon.Hash]libcommon.Hash), } } @@ -289,6 +286,6 @@ func (t *prestateTracer) lookupStorage(addr libcommon.Address, key libcommon.Has return } var val uint256.Int - t.env.IntraBlockState().GetState(addr, &key, &val) + t.env.IntraBlockState.GetState(addr, &key, &val) t.pre[addr].Storage[key] = val.Bytes32() } diff --git a/eth/tracers/native/tracer.go b/eth/tracers/native/tracer.go index bdcc31394ec..29bf07eebf4 100644 --- a/eth/tracers/native/tracer.go +++ b/eth/tracers/native/tracer.go @@ -43,7 +43,7 @@ func init() { } // ctorFn is the constructor signature of a native tracer. -type ctorFn = func(*tracers.Context, json.RawMessage) (tracers.Tracer, error) +type ctorFn = func(*tracers.Context, json.RawMessage) (*tracers.Tracer, error) /* ctors is a map of package-local tracer constructors. @@ -68,7 +68,7 @@ func register(name string, ctor ctorFn) { } // lookup returns a tracer, if one can be matched to the given name. -func lookup(name string, ctx *tracers.Context, cfg json.RawMessage) (tracers.Tracer, error) { +func lookup(name string, ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, error) { if ctors == nil { ctors = make(map[string]ctorFn) } diff --git a/eth/tracers/tracers.go b/eth/tracers/tracers.go index 7bf41f5769b..53d885e2f4e 100644 --- a/eth/tracers/tracers.go +++ b/eth/tracers/tracers.go @@ -21,9 +21,9 @@ import ( "encoding/json" "errors" - libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/core/tracing" - "github.com/ledgerwatch/erigon/core/vm" + libcommon "github.com/ledgerwatch/erigon-lib/common" ) // Context contains some contextual infos for a transaction execution that is not @@ -36,14 +36,14 @@ type Context struct { // Tracer interface extends vm.EVMLogger and additionally // allows collecting the tracing result. -type Tracer interface { - vm.EVMLogger - GetResult() (json.RawMessage, error) +type Tracer struct { + *tracing.Hooks + GetResult func() (json.RawMessage, error) // Stop terminates execution of the tracer at the first opportune moment. - Stop(err error) + Stop func(err error) } -type lookupFunc func(string, *Context, json.RawMessage) (Tracer, error) +type lookupFunc func(string, *Context, json.RawMessage) (*Tracer, error) var ( lookups []lookupFunc @@ -63,7 +63,7 @@ func RegisterLookup(wildcard bool, lookup lookupFunc) { // New returns a new instance of a tracer, by iterating through the // registered lookups. -func New(code string, ctx *Context, cfg json.RawMessage) (Tracer, error) { +func New(code string, ctx *Context, cfg json.RawMessage) (*Tracer, error) { for _, lookup := range lookups { if tracer, err := lookup(code, ctx, cfg); err == nil { return tracer, nil diff --git a/eth/tracers/tracers_test.go b/eth/tracers/tracers_test.go index 5d2b53c5984..d65537b84bf 100644 --- a/eth/tracers/tracers_test.go +++ b/eth/tracers/tracers_test.go @@ -20,10 +20,11 @@ import ( "crypto/ecdsa" "crypto/rand" "encoding/json" - "github.com/ledgerwatch/erigon-lib/common/hexutil" "math/big" "testing" + "github.com/ledgerwatch/erigon-lib/common/hexutil" + libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/types" @@ -109,16 +110,20 @@ func TestPrestateTracerCreate2(t *testing.T) { if err != nil { t.Fatalf("failed to prestate tracer: %v", err) } - evm := vm.NewEVM(context, txContext, statedb, params.AllProtocolChanges, vm.Config{Debug: true, Tracer: tracer}) + evm := vm.NewEVM(context, txContext, statedb, params.MainnetChainConfig, vm.Config{Debug: true, Tracer: tracer.Hooks}) msg, err := txn.AsMessage(*signer, nil, rules) if err != nil { t.Fatalf("failed to prepare transaction for tracing: %v", err) } + + tracer.OnTxStart(evm.GetVMContext(), txn, msg.From()) st := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(txn.GetGas()).AddBlobGas(txn.GetBlobGas())) - if _, err = st.TransitionDb(false, false); err != nil { + exeRes, err := st.TransitionDb(false, false) + if err != nil { t.Fatalf("failed to execute transaction: %v", err) } + tracer.OnTxEnd(&types.Receipt{GasUsed: exeRes.UsedGas}, nil) // Retrieve the trace result and compare against the etalon res, err := tracer.GetResult() if err != nil { diff --git a/eth/tracers/util.go b/eth/tracers/util.go new file mode 100644 index 00000000000..b115df34925 --- /dev/null +++ b/eth/tracers/util.go @@ -0,0 +1,80 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . +package tracers + +import ( + "errors" + "fmt" + + "github.com/holiman/uint256" +) + +const ( + memoryPadLimit = 1024 * 1024 +) + +// GetMemoryCopyPadded returns offset + size as a new slice. +// It zero-pads the slice if it extends beyond memory bounds. +func GetMemoryCopyPadded(m []byte, offset, size int64) ([]byte, error) { + if offset < 0 || size < 0 { + return nil, errors.New("offset or size must not be negative") + } + length := int64(len(m)) + if offset+size < length { // slice fully inside memory + return memoryCopy(m, offset, size), nil + } + paddingNeeded := offset + size - length + if paddingNeeded > memoryPadLimit { + return nil, fmt.Errorf("reached limit for padding memory slice: %d", paddingNeeded) + } + cpy := make([]byte, size) + if overlap := length - offset; overlap > 0 { + copy(cpy, MemoryPtr(m, offset, overlap)) + } + return cpy, nil +} + +func memoryCopy(m []byte, offset, size int64) (cpy []byte) { + if size == 0 { + return nil + } + + if len(m) > int(offset) { + cpy = make([]byte, size) + copy(cpy, m[offset:offset+size]) + + return + } + + return +} + +func MemoryPtr(m []byte, offset, size int64) []byte { + if size == 0 { + return nil + } + + if len(m) > int(offset) { + return m[offset : offset+size] + } + + return nil +} + +// Back returns the n'th item in stack +func StackBack(st []uint256.Int, n int) *uint256.Int { + return &st[len(st)-n-1] +} diff --git a/eth/tracers/util_test.go b/eth/tracers/util_test.go new file mode 100644 index 00000000000..965c135b742 --- /dev/null +++ b/eth/tracers/util_test.go @@ -0,0 +1,59 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . +package tracers + +import ( + "testing" + + "github.com/ledgerwatch/erigon/core/vm" +) + +func TestMemCopying(t *testing.T) { + for i, tc := range []struct { + memsize int64 + offset int64 + size int64 + wantErr string + wantSize int + }{ + {0, 0, 100, "", 100}, // Should pad up to 100 + {0, 100, 0, "", 0}, // No need to pad (0 size) + {100, 50, 100, "", 100}, // Should pad 100-150 + {100, 50, 5, "", 5}, // Wanted range fully within memory + {100, -50, 0, "offset or size must not be negative", 0}, // Error + {0, 1, 1024*1024 + 1, "reached limit for padding memory slice: 1048578", 0}, // Error + {10, 0, 1024*1024 + 100, "reached limit for padding memory slice: 1048666", 0}, // Error + } { + mem := vm.NewMemory() + mem.Resize(uint64(tc.memsize)) + cpy, err := GetMemoryCopyPadded(mem.Data(), tc.offset, tc.size) + if want := tc.wantErr; want != "" { + if err == nil { + t.Fatalf("test %d: want '%v' have no error", i, want) + } + if have := err.Error(); want != have { + t.Fatalf("test %d: want '%v' have '%v'", i, want, have) + } + continue + } + if err != nil { + t.Fatalf("test %d: unexpected error: %v", i, err) + } + if want, have := tc.wantSize, len(cpy); have != want { + t.Fatalf("test %d: want %v have %v", i, want, have) + } + } +} diff --git a/ethdb/prune/storage_mode.go b/ethdb/prune/storage_mode.go index 616f7d98240..c1fc3be5242 100644 --- a/ethdb/prune/storage_mode.go +++ b/ethdb/prune/storage_mode.go @@ -20,15 +20,14 @@ var DefaultMode = Mode{ Receipts: Distance(math.MaxUint64), TxIndex: Distance(math.MaxUint64), CallTraces: Distance(math.MaxUint64), - Blocks: Distance(math.MaxUint64), Experiments: Experiments{}, // all off } type Experiments struct { } -func FromCli(chainId uint64, flags string, exactBlocks, exactHistory, exactReceipts, exactTxIndex, exactCallTraces, - beforeB, beforeH, beforeR, beforeT, beforeC uint64, experiments []string) (Mode, error) { +func FromCli(chainId uint64, flags string, exactHistory, exactReceipts, exactTxIndex, exactCallTraces, + beforeH, beforeR, beforeT, beforeC uint64, experiments []string) (Mode, error) { mode := DefaultMode if flags != "default" && flags != "disabled" { @@ -42,17 +41,12 @@ func FromCli(chainId uint64, flags string, exactBlocks, exactHistory, exactRecei mode.TxIndex = Distance(params.FullImmutabilityThreshold) case 'c': mode.CallTraces = Distance(params.FullImmutabilityThreshold) - case 'b': - mode.Blocks = Distance(params.FullImmutabilityThreshold) default: return DefaultMode, fmt.Errorf("unexpected flag found: %c", flag) } } } - if exactBlocks > 0 { - mode.Blocks = Distance(exactBlocks) - } if exactHistory > 0 { mode.History = Distance(exactHistory) } @@ -78,9 +72,6 @@ func FromCli(chainId uint64, flags string, exactBlocks, exactHistory, exactRecei if beforeC > 0 { mode.CallTraces = Before(beforeC) } - if beforeB > 0 { - mode.Blocks = Before(beforeB) - } for _, ex := range experiments { switch ex { @@ -129,14 +120,6 @@ func Get(db kv.Getter) (Mode, error) { prune.CallTraces = blockAmount } - blockAmount, err = get(db, kv.PruneBlocks) - if err != nil { - return prune, err - } - if blockAmount != nil { - prune.Blocks = blockAmount - } - return prune, nil } @@ -146,7 +129,6 @@ type Mode struct { Receipts BlockAmount TxIndex BlockAmount CallTraces BlockAmount - Blocks BlockAmount Experiments Experiments } @@ -212,13 +194,6 @@ func (m Mode) String() string { long += fmt.Sprintf(" --prune.h.%s=%d", m.History.dbType(), m.History.toValue()) } } - if m.Blocks.Enabled() { - if m.Blocks.useDefaultValue() { - short += fmt.Sprintf(" --prune.b.older=%d", defaultVal) - } else { - long += fmt.Sprintf(" --prune.b.%s=%d", m.Blocks.dbType(), m.Blocks.toValue()) - } - } if m.Receipts.Enabled() { if m.Receipts.useDefaultValue() { short += fmt.Sprintf(" --prune.r.older=%d", defaultVal) @@ -269,11 +244,6 @@ func Override(db kv.RwTx, sm Mode) error { return err } - err = set(db, kv.PruneBlocks, sm.Blocks) - if err != nil { - return err - } - return nil } @@ -320,7 +290,6 @@ func setIfNotExist(db kv.GetPut, pm Mode) error { string(kv.PruneReceipts): pm.Receipts, string(kv.PruneTxIndex): pm.TxIndex, string(kv.PruneCallTraces): pm.CallTraces, - string(kv.PruneBlocks): pm.Blocks, } for key, value := range pruneDBData { diff --git a/ethdb/prune/storage_mode_test.go b/ethdb/prune/storage_mode_test.go index bdddb99e93d..a5aeca248ac 100644 --- a/ethdb/prune/storage_mode_test.go +++ b/ethdb/prune/storage_mode_test.go @@ -15,16 +15,16 @@ func TestSetStorageModeIfNotExist(t *testing.T) { prune, err := Get(tx) assert.NoError(t, err) assert.Equal(t, Mode{true, Distance(math.MaxUint64), Distance(math.MaxUint64), - Distance(math.MaxUint64), Distance(math.MaxUint64), Distance(math.MaxUint64), Experiments{}}, prune) + Distance(math.MaxUint64), Distance(math.MaxUint64), Experiments{}}, prune) err = setIfNotExist(tx, Mode{true, Distance(1), Distance(2), - Before(3), Before(4), Before(100), Experiments{}}) + Before(3), Before(4), Experiments{}}) assert.NoError(t, err) prune, err = Get(tx) assert.NoError(t, err) assert.Equal(t, Mode{true, Distance(1), Distance(2), - Before(3), Before(4), Before(100), Experiments{}}, prune) + Before(3), Before(4), Experiments{}}, prune) } var distanceTests = []struct { diff --git a/ethdb/walk.go b/ethdb/walk.go index d43135643d9..006b1785fce 100644 --- a/ethdb/walk.go +++ b/ethdb/walk.go @@ -27,7 +27,7 @@ import ( // of composite storage key, but without // reconstructing the key // Instead, the key is split into two parts and -// functions `seekInFiles` and `Next` deliver both +// functions `Seek` and `Next` deliver both // parts as well as the corresponding value type splitCursor struct { c kv.Cursor // Unlerlying cursor diff --git a/go.mod b/go.mod index bbb06c8ded4..542c7d25533 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/ledgerwatch/erigon -go 1.21.5 +go 1.21 require ( github.com/erigontech/mdbx-go v0.38.0 @@ -20,6 +20,7 @@ require ( github.com/VictoriaMetrics/fastcache v1.12.2 github.com/alecthomas/atomic v0.1.0-alpha2 github.com/alecthomas/kong v0.8.1 + github.com/anacrolix/log v0.15.2 github.com/anacrolix/sync v0.5.1 github.com/anacrolix/torrent v1.52.6-0.20231201115409-7ea994b6bbd8 github.com/benesch/cgosymbolizer v0.0.0-20190515212042-bec6fe6e597b @@ -128,7 +129,6 @@ require ( github.com/anacrolix/envpprof v1.3.0 // indirect github.com/anacrolix/generics v0.0.0-20230816105729-c755655aee45 // indirect github.com/anacrolix/go-libutp v1.3.1 // indirect - github.com/anacrolix/log v0.15.2 // indirect github.com/anacrolix/missinggo v1.3.0 // indirect github.com/anacrolix/missinggo/perf v1.0.0 // indirect github.com/anacrolix/missinggo/v2 v2.7.2-0.20230527121029-a582b4f397b9 // indirect @@ -161,7 +161,7 @@ require ( github.com/elastic/gosigar v0.14.2 // indirect github.com/flynn/noise v1.0.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/fsnotify/fsnotify v1.5.4 // indirect github.com/garslo/gogen v0.0.0-20170307003452-d6ebae628c7c // indirect github.com/go-llsqlite/adapter v0.0.0-20230927005056-7f5ce7f0c916 // indirect github.com/go-llsqlite/crawshaw v0.4.0 // indirect @@ -186,8 +186,7 @@ require ( github.com/koron/go-ssdp v0.0.4 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect - github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240510125329-7b93b3d7e99f // indirect - github.com/ledgerwatch/erigonwatch v0.1.0 + github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240505022337-08dce201593b // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect @@ -263,11 +262,11 @@ require ( github.com/shirou/gopsutil/v3 v3.24.3 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/shopspring/decimal v1.2.0 // indirect - github.com/showwin/speedtest-go v1.7.5 // indirect + github.com/showwin/speedtest-go v1.6.12 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/sosodev/duration v1.1.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/cast v1.5.0 // indirect + github.com/spf13/cast v1.3.1 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect github.com/supranational/blst v0.3.11 // indirect github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect @@ -297,6 +296,6 @@ require ( ) replace ( - github.com/anacrolix/torrent => github.com/erigontech/torrent v1.54.2-alpha-10 + github.com/anacrolix/torrent => github.com/erigontech/torrent v1.54.2-alpha-8 github.com/holiman/bloomfilter/v2 => github.com/AskAlexSharov/bloomfilter/v2 v2.0.8 ) diff --git a/go.sum b/go.sum index 417c8efa6fd..d48bd3cf332 100644 --- a/go.sum +++ b/go.sum @@ -273,8 +273,8 @@ github.com/erigontech/mdbx-go v0.38.0 h1:K64h6YHc2biN081DPEp/KP1TE+X0Jmxu8T+RJad github.com/erigontech/mdbx-go v0.38.0/go.mod h1:FAMxbOgqOnRDx51j8HjuJZIgznbDwjX7LItd+/UWyA4= github.com/erigontech/silkworm-go v0.18.0 h1:j56p61xZHBFhZGH1OixlGU8KcfjHzcw9pjAfjmVsOZA= github.com/erigontech/silkworm-go v0.18.0/go.mod h1:O50ux0apICEVEGyRWiE488K8qz8lc3PA/SXbQQAc8SU= -github.com/erigontech/torrent v1.54.2-alpha-10 h1:MqEorLDG5n2jsNAsSC+TKuZUyExO/KfGumHxh7GHG3o= -github.com/erigontech/torrent v1.54.2-alpha-10/go.mod h1:nYNeuR4xPlEl4CturFD9/KRXBRJEcJGqjegDNWakwG4= +github.com/erigontech/torrent v1.54.2-alpha-8 h1:MQobu6sUZCFbmWpsB7GqAh0IWs7VAZ370POaVxlApIk= +github.com/erigontech/torrent v1.54.2-alpha-8/go.mod h1:nYNeuR4xPlEl4CturFD9/KRXBRJEcJGqjegDNWakwG4= github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c h1:CndMRAH4JIwxbW8KYq6Q+cGWcGHz0FjGR3QqcInWcW0= github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= @@ -288,8 +288,8 @@ github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7z github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/garslo/gogen v0.0.0-20170307003452-d6ebae628c7c h1:uYNKzPntb8c6DKvP9EfrBjkLkU7pM4lM+uuHSIa8UtU= github.com/garslo/gogen v0.0.0-20170307003452-d6ebae628c7c/go.mod h1:Q0X6pkwTILDlzrGEckF6HKjXe48EgsY/l7K7vhY4MW8= github.com/gballet/go-verkle v0.0.0-20221121182333-31427a1f2d35 h1:I8QswD9gf3VEpr7bpepKKOm7ChxFITIG+oc1I5/S0no= @@ -539,10 +539,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240510125329-7b93b3d7e99f h1:vOUz9rYvrFWc84nuPUxReQj7OhU7QYWJCNXbH0NMPvI= -github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240510125329-7b93b3d7e99f/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= -github.com/ledgerwatch/erigonwatch v0.1.0 h1:TrCjklOu9ZI9/uiMigo1Jnknnk1I/dXUxXymA3xHfzo= -github.com/ledgerwatch/erigonwatch v0.1.0/go.mod h1:uYq4hs3RL1OtIYRXAxYq02tpdGkx6rtXlpzdazDDbWI= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240505022337-08dce201593b h1:lfllTgrcwFzFXX7c/L4i/xAj/8noP/yHNSmC8dDi08s= +github.com/ledgerwatch/erigon-snapshot v1.3.1-0.20240505022337-08dce201593b/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/log/v3 v3.9.0 h1:iDwrXe0PVwBC68Dd94YSsHbMgQ3ufsgjzXtFNFVZFRk= github.com/ledgerwatch/log/v3 v3.9.0/go.mod h1:EiAY6upmI/6LkNhOVxb4eVsmsP11HZCnZ3PlJMjYiqE= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= @@ -825,8 +823,8 @@ github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/showwin/speedtest-go v1.7.5 h1:FQ3EdM2vnfw5BRCRzGCYe8aWu70rr21Az5ZFHiW9CdE= -github.com/showwin/speedtest-go v1.7.5/go.mod h1:uLgdWCNarXxlYsL2E5TOZpCIwpgSWnEANZp7gfHXHu0= +github.com/showwin/speedtest-go v1.6.12 h1:q+hWNn2cM35KkqtXGGbSmuJgd67gTP8+VlneY2hq9vU= +github.com/showwin/speedtest-go v1.6.12/go.mod h1:uLgdWCNarXxlYsL2E5TOZpCIwpgSWnEANZp7gfHXHu0= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= @@ -868,9 +866,8 @@ github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0b github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= -github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -1188,12 +1185,12 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220608164250-635b8c9b7f68/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/migrations/commitment.go b/migrations/commitment.go index 2b9a7d1fb8e..8b8c3ef4149 100644 --- a/migrations/commitment.go +++ b/migrations/commitment.go @@ -9,6 +9,7 @@ import ( "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" libstate "github.com/ledgerwatch/erigon-lib/state" ) @@ -18,8 +19,7 @@ var SqueezeCommitmentFiles = Migration{ Name: "squeeze_commit_files", Up: func(db kv.RwDB, dirs datadir.Dirs, progress []byte, BeforeCommit Callback, logger log.Logger) (err error) { ctx := context.Background() - - if !EnableSqueezeCommitmentFiles || !libstate.AggregatorSqueezeCommitmentValues { //nolint:staticcheck + if !EnableSqueezeCommitmentFiles || !libstate.AggregatorSqueezeCommitmentValues || !kvcfg.HistoryV3.FromDB(db) { //nolint:staticcheck return db.Update(ctx, func(tx kv.RwTx) error { return BeforeCommit(tx, nil, true) }) diff --git a/migrations/prohibit_new_downloads2.go b/migrations/prohibit_new_downloads2.go index e278fa71113..22cd00372a9 100644 --- a/migrations/prohibit_new_downloads2.go +++ b/migrations/prohibit_new_downloads2.go @@ -45,7 +45,7 @@ var ProhibitNewDownloadsLock2 = Migration{ locked = append(locked, t.Name()) } - for _, t := range borsnaptype.BorSnapshotTypes() { + for _, t := range borsnaptype.BorSnapshotTypes { locked = append(locked, t.Name()) } diff --git a/node/node.go b/node/node.go index b2d79467ac3..ce5f804f7e1 100644 --- a/node/node.go +++ b/node/node.go @@ -345,7 +345,7 @@ func OpenDatabase(ctx context.Context, config *nodecfg.Config, label kv.Label, n if config.MdbxGrowthStep > 0 { opts = opts.GrowthStep(config.MdbxGrowthStep) } - opts = opts.DirtySpace(uint64(512 * datasize.MB)) + opts = opts.DirtySpace(uint64(128 * datasize.MB)) case kv.ConsensusDB: if config.MdbxPageSize.Bytes() > 0 { opts = opts.PageSize(config.MdbxPageSize.Bytes()) diff --git a/p2p/enode/nodedb.go b/p2p/enode/nodedb.go index 15498b15686..1b9e8e2d5e8 100644 --- a/p2p/enode/nodedb.go +++ b/p2p/enode/nodedb.go @@ -564,7 +564,7 @@ func (db *DB) QuerySeeds(n int, maxAge time.Duration) []*Node { } seek: for seeks := 0; len(nodes) < n && seeks < n*5; seeks++ { - // seekInFiles to a random entry. The first byte is incremented by a + // Seek to a random entry. The first byte is incremented by a // random amount each time in order to increase the likelihood // of hitting all existing nodes in very small databases. ctr := id[0] diff --git a/p2p/sentry/sentry_grpc_server_test.go b/p2p/sentry/sentry_grpc_server_test.go index 43cefaa98b0..b6b05c322de 100644 --- a/p2p/sentry/sentry_grpc_server_test.go +++ b/p2p/sentry/sentry_grpc_server_test.go @@ -83,14 +83,14 @@ func testForkIDSplit(t *testing.T, protocol uint) { SpuriousDragonBlock: big.NewInt(2), ByzantiumBlock: big.NewInt(3), } - dbNoFork, _ = temporaltest.NewTestDB(t, datadir.New(t.TempDir())) - dbProFork, _ = temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + _, dbNoFork, _ = temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + _, dbProFork, _ = temporaltest.NewTestDB(t, datadir.New(t.TempDir())) gspecNoFork = &types.Genesis{Config: configNoFork} gspecProFork = &types.Genesis{Config: configProFork} - genesisNoFork = core.MustCommitGenesis(gspecNoFork, dbNoFork, "", log.Root()) - genesisProFork = core.MustCommitGenesis(gspecProFork, dbProFork, "", log.Root()) + genesisNoFork = core.MustCommitGenesis(gspecNoFork, dbNoFork, "", log.Root(), nil) + genesisProFork = core.MustCommitGenesis(gspecProFork, dbProFork, "", log.Root(), nil) ) var s1, s2 *GrpcServer @@ -176,9 +176,9 @@ func TestSentryServerImpl_SetStatusInitPanic(t *testing.T) { }() configNoFork := &chain.Config{HomesteadBlock: big.NewInt(1), ChainID: big.NewInt(1)} - dbNoFork, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + _, dbNoFork, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) gspecNoFork := &types.Genesis{Config: configNoFork} - genesisNoFork := core.MustCommitGenesis(gspecNoFork, dbNoFork, "", log.Root()) + genesisNoFork := core.MustCommitGenesis(gspecNoFork, dbNoFork, "", log.Root(), nil) ss := &GrpcServer{p2p: &p2p.Config{}} _, err := ss.SetStatus(context.Background(), &proto_sentry.StatusData{ diff --git a/p2p/sentry/sentry_multi_client/sentry_multi_client.go b/p2p/sentry/sentry_multi_client/sentry_multi_client.go index f0ba178135d..d00ee47cedf 100644 --- a/p2p/sentry/sentry_multi_client/sentry_multi_client.go +++ b/p2p/sentry/sentry_multi_client/sentry_multi_client.go @@ -26,6 +26,8 @@ import ( proto_sentry "github.com/ledgerwatch/erigon-lib/gointerfaces/sentryproto" proto_types "github.com/ledgerwatch/erigon-lib/gointerfaces/typesproto" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" + "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/ethconfig" @@ -276,7 +278,8 @@ type MultiClient struct { // decouple sentry multi client from header and body downloading logic is done disableBlockDownload bool - logger log.Logger + historyV3 bool + logger log.Logger } func NewMultiClient( @@ -339,6 +342,7 @@ func NewMultiClient( logPeerInfo: logPeerInfo, sendHeaderRequestsToMultiplePeers: chainConfig.TerminalTotalDifficultyPassed, maxBlockBroadcastPeers: maxBlockBroadcastPeers, + historyV3: kvcfg.HistoryV3.FromDB(db), disableBlockDownload: disableBlockDownload, logger: logger, } @@ -682,44 +686,47 @@ func (cs *MultiClient) getBlockBodies66(ctx context.Context, inreq *proto_sentry } func (cs *MultiClient) getReceipts66(ctx context.Context, inreq *proto_sentry.InboundMessage, sentry direct.SentryClient) error { - return nil //TODO: https://github.com/ledgerwatch/erigon/issues/10320 - //var query eth.GetReceiptsPacket66 - //if err := rlp.DecodeBytes(inreq.Data, &query); err != nil { - // return fmt.Errorf("decoding getReceipts66: %w, data: %x", err, inreq.Data) - //} - //tx, err := cs.db.BeginRo(ctx) - //if err != nil { - // return err - //} - //defer tx.Rollback() - //receipts, err := eth.AnswerGetReceiptsQuery(cs.blockReader, tx, query.GetReceiptsPacket) - //if err != nil { - // return err - //} - //tx.Rollback() - //b, err := rlp.EncodeToBytes(ð.ReceiptsRLPPacket66{ - // RequestId: query.RequestId, - // ReceiptsRLPPacket: receipts, - //}) - //if err != nil { - // return fmt.Errorf("encode header response: %w", err) - //} - //outreq := proto_sentry.SendMessageByIdRequest{ - // PeerId: inreq.PeerId, - // Data: &proto_sentry.OutboundMessageData{ - // Id: proto_sentry.MessageId_RECEIPTS_66, - // Data: b, - // }, - //} - //_, err = sentry.SendMessageById(ctx, &outreq, &grpc.EmptyCallOption{}) - //if err != nil { - // if isPeerNotFoundErr(err) { - // return nil - // } - // return fmt.Errorf("send bodies response: %w", err) - //} - ////cs.logger.Info(fmt.Sprintf("[%s] GetReceipts responseLen %d", ConvertH512ToPeerID(inreq.PeerId), len(b))) - //return nil + if cs.historyV3 { // historyV3 doesn't store receipts in DB + return nil + } + + var query eth.GetReceiptsPacket66 + if err := rlp.DecodeBytes(inreq.Data, &query); err != nil { + return fmt.Errorf("decoding getReceipts66: %w, data: %x", err, inreq.Data) + } + tx, err := cs.db.BeginRo(ctx) + if err != nil { + return err + } + defer tx.Rollback() + receipts, err := eth.AnswerGetReceiptsQuery(cs.blockReader, tx, query.GetReceiptsPacket) + if err != nil { + return err + } + tx.Rollback() + b, err := rlp.EncodeToBytes(ð.ReceiptsRLPPacket66{ + RequestId: query.RequestId, + ReceiptsRLPPacket: receipts, + }) + if err != nil { + return fmt.Errorf("encode header response: %w", err) + } + outreq := proto_sentry.SendMessageByIdRequest{ + PeerId: inreq.PeerId, + Data: &proto_sentry.OutboundMessageData{ + Id: proto_sentry.MessageId_RECEIPTS_66, + Data: b, + }, + } + _, err = sentry.SendMessageById(ctx, &outreq, &grpc.EmptyCallOption{}) + if err != nil { + if isPeerNotFoundErr(err) { + return nil + } + return fmt.Errorf("send bodies response: %w", err) + } + //cs.logger.Info(fmt.Sprintf("[%s] GetReceipts responseLen %d", ConvertH512ToPeerID(inreq.PeerId), len(b))) + return nil } func MakeInboundMessage() *proto_sentry.InboundMessage { diff --git a/p2p/server.go b/p2p/server.go index 1897da93c45..cbc8daf5b73 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -1221,7 +1221,7 @@ func (srv *Server) listErrors() []interface{} { srv.errorsMu.Lock() defer srv.errorsMu.Unlock() - list := make([]interface{}, 0, len(srv.errors)*2) + list := make([]interface{}, len(srv.errors)*2) for err, count := range srv.errors { list = append(list, err, count) } diff --git a/params/network_params.go b/params/network_params.go index d79192516e8..e914ff51a67 100644 --- a/params/network_params.go +++ b/params/network_params.go @@ -57,5 +57,5 @@ const ( // considered immutable (i.e. soft finality). It is used by the downloader as a // hard limit against deep ancestors, by the blockchain against deep reorgs, by // the freezer as the cutoff threshold and by clique as the snapshot trust limit. - FullImmutabilityThreshold = 100_000 + FullImmutabilityThreshold = 90000 ) diff --git a/params/protocol_params.go b/params/protocol_params.go index 05e4fe52d9f..d760de8658d 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -171,18 +171,11 @@ const ( // PIP-27: secp256r1 elliptic curve signature verifier gas price P256VerifyGas uint64 = 3450 - - // EIP-2935: Historical block hashes in state - BlockHashHistoryServeWindow uint64 = 8192 - BlockHashOldWindow uint64 = 256 ) // EIP-4788: Beacon block root in the EVM var BeaconRootsAddress = common.HexToAddress("0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02") -// EIP-2935: Historical block hashes in state -var HistoryStorageAddress = common.HexToAddress("0x25a219378dad9b3503c8268c9ca836a52427a4fb") - // Gas discount table for BLS12-381 G1 and G2 multi exponentiation operations var Bls12381MultiExpDiscountTable = [128]uint64{1200, 888, 764, 641, 594, 547, 500, 453, 438, 423, 408, 394, 379, 364, 349, 334, 330, 326, 322, 318, 314, 310, 306, 302, 298, 294, 289, 285, 281, 277, 273, 269, 268, 266, 265, 263, 262, 260, 259, 257, 256, 254, 253, 251, 250, 248, 247, 245, 244, 242, 241, 239, 238, 236, 235, 233, 232, 231, 229, 228, 226, 225, 223, 222, 221, 220, 219, 219, 218, 217, 216, 216, 215, 214, 213, 213, 212, 211, 211, 210, 209, 208, 208, 207, 206, 205, 205, 204, 203, 202, 202, 201, 200, 199, 199, 198, 197, 196, 196, 195, 194, 193, 193, 192, 191, 191, 190, 189, 188, 188, 187, 186, 185, 185, 184, 183, 182, 182, 181, 180, 179, 179, 178, 177, 176, 176, 175, 174} diff --git a/polygon/bor/bor.go b/polygon/bor/bor.go index 05525941386..ed395983ed9 100644 --- a/polygon/bor/bor.go +++ b/polygon/bor/bor.go @@ -31,6 +31,7 @@ import ( "github.com/ledgerwatch/erigon/consensus/misc" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/crypto" @@ -974,7 +975,7 @@ func (c *Bor) CalculateRewards(config *chain.Config, header *types.Header, uncle // Finalize implements consensus.Engine, ensuring no uncles are set, nor block // rewards given. func (c *Bor) Finalize(config *chain.Config, header *types.Header, state *state.IntraBlockState, - txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal, requests []*types.Request, + txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal, chain consensus.ChainReader, syscall consensus.SystemCall, logger log.Logger, ) (types.Transactions, types.Receipts, error) { headerNumber := header.Number.Uint64() @@ -1038,7 +1039,7 @@ func (c *Bor) changeContractCodeIfNeeded(headerNumber uint64, state *state.Intra // FinalizeAndAssemble implements consensus.Engine, ensuring no uncles are set, // nor block rewards given, and returns the final block. func (c *Bor) FinalizeAndAssemble(chainConfig *chain.Config, header *types.Header, state *state.IntraBlockState, - txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, requests []*types.Request, + txs types.Transactions, uncles []*types.Header, receipts types.Receipts, withdrawals []*types.Withdrawal, chain consensus.ChainReader, syscall consensus.SystemCall, call consensus.Call, logger log.Logger, ) (*types.Block, types.Transactions, types.Receipts, error) { // stateSyncData := []*types.StateSyncData{} @@ -1078,7 +1079,7 @@ func (c *Bor) FinalizeAndAssemble(chainConfig *chain.Config, header *types.Heade header.UncleHash = types.CalcUncleHash(nil) // Assemble block - block := types.NewBlock(header, txs, nil, receipts, withdrawals, requests) + block := types.NewBlock(header, txs, nil, receipts, withdrawals) // set state sync // bc := chain.(*core.BlockChain) @@ -1093,7 +1094,7 @@ func (c *Bor) GenerateSeal(chain consensus.ChainHeaderReader, currnt, parent *ty } func (c *Bor) Initialize(config *chain.Config, chain consensus.ChainHeaderReader, header *types.Header, - state *state.IntraBlockState, syscall consensus.SysCallCustom, logger log.Logger) { + state *state.IntraBlockState, syscall consensus.SysCallCustom, logger log.Logger, eLogger *tracing.Hooks) { } // Authorize injects a private key into the consensus engine to mint new blocks diff --git a/polygon/bor/bor_test.go b/polygon/bor/bor_test.go index 3e7dfcb8b5d..2905f790012 100644 --- a/polygon/bor/bor_test.go +++ b/polygon/bor/bor_test.go @@ -116,7 +116,7 @@ func (h test_heimdall) FetchCheckpointCount(ctx context.Context) (int64, error) return 0, fmt.Errorf("TODO") } -func (h *test_heimdall) FetchCheckpoints(ctx context.Context, page uint64, limit uint64) ([]*heimdall.Checkpoint, error) { +func (h *test_heimdall) FetchCheckpoints(ctx context.Context, page uint64, limit uint64) (heimdall.Checkpoints, error) { return nil, fmt.Errorf("TODO") } @@ -172,6 +172,14 @@ func (r headerReader) CurrentHeader() *types.Header { return nil } +func (cr headerReader) CurrentFinalizedHeader() *types.Header { + return nil +} + +func (cr headerReader) CurrentSafeHeader() *types.Header { + return nil +} + func (r headerReader) GetHeader(_ libcommon.Hash, blockNo uint64) *types.Header { return r.GetHeaderByNumber(blockNo) } diff --git a/polygon/bor/fake.go b/polygon/bor/fake.go index fc3485eef54..fb79b7642da 100644 --- a/polygon/bor/fake.go +++ b/polygon/bor/fake.go @@ -21,8 +21,8 @@ func NewFaker() *FakeBor { } func (f *FakeBor) Finalize(config *chain.Config, header *types.Header, state *state.IntraBlockState, - txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal, requests []*types.Request, + txs types.Transactions, uncles []*types.Header, r types.Receipts, withdrawals []*types.Withdrawal, chain consensus.ChainReader, syscall consensus.SystemCall, logger log.Logger, ) (types.Transactions, types.Receipts, error) { - return f.FakeEthash.Finalize(config, header, state, txs, uncles, r, withdrawals, requests, chain, syscall, logger) + return f.FakeEthash.Finalize(config, header, state, txs, uncles, r, withdrawals, chain, syscall, logger) } diff --git a/polygon/bor/snaptype/types.go b/polygon/bor/snaptype/types.go index c3abcde1a70..f0f994ec68f 100644 --- a/polygon/bor/snaptype/types.go +++ b/polygon/bor/snaptype/types.go @@ -31,11 +31,7 @@ import ( ) func init() { - initTypes() -} - -func initTypes() { - borTypes := append(coresnaptype.BlockSnapshotTypes, BorSnapshotTypes()...) + borTypes := append(coresnaptype.BlockSnapshotTypes, BorSnapshotTypes...) snapcfg.RegisterKnownTypes(networkname.MumbaiChainName, borTypes) snapcfg.RegisterKnownTypes(networkname.AmoyChainName, borTypes) @@ -406,22 +402,9 @@ var ( return buildValueIndex(ctx, sn, salt, d, firstMilestoneId, tmpDir, p, lvl, logger) }), ) -) - -var recordWaypoints bool - -func RecordWayPoints(value bool) { - recordWaypoints = value - initTypes() -} - -func BorSnapshotTypes() []snaptype.Type { - if recordWaypoints { - return []snaptype.Type{BorEvents, BorSpans, BorCheckpoints, BorMilestones} - } - return []snaptype.Type{BorEvents, BorSpans} -} + BorSnapshotTypes = []snaptype.Type{BorEvents, BorSpans, BorCheckpoints, BorMilestones} +) func extractValueRange(ctx context.Context, table string, valueFrom, valueTo uint64, db kv.RoDB, collect func([]byte) error, workers int, lvl log.Lvl, logger log.Logger) (uint64, error) { logEvery := time.NewTicker(20 * time.Second) diff --git a/polygon/heimdall/checkpoint.go b/polygon/heimdall/checkpoint.go index 37ba2baa999..a1d7a763c54 100644 --- a/polygon/heimdall/checkpoint.go +++ b/polygon/heimdall/checkpoint.go @@ -20,10 +20,6 @@ type Checkpoint struct { Fields WaypointFields } -func (c Checkpoint) RawId() uint64 { - return uint64(c.Id) -} - func (c Checkpoint) StartBlock() *big.Int { return c.Fields.StartBlock } @@ -32,13 +28,6 @@ func (c Checkpoint) EndBlock() *big.Int { return c.Fields.EndBlock } -func (c Checkpoint) BlockNumRange() ClosedRange { - return ClosedRange{ - Start: c.StartBlock().Uint64(), - End: c.EndBlock().Uint64(), - } -} - func (c Checkpoint) RootHash() libcommon.Hash { return c.Fields.RootHash } diff --git a/polygon/heimdall/client.go b/polygon/heimdall/client.go index 4eee9e64d19..df33539aa60 100644 --- a/polygon/heimdall/client.go +++ b/polygon/heimdall/client.go @@ -47,7 +47,7 @@ type HeimdallClient interface { FetchCheckpoint(ctx context.Context, number int64) (*Checkpoint, error) FetchCheckpointCount(ctx context.Context) (int64, error) - FetchCheckpoints(ctx context.Context, page uint64, limit uint64) ([]*Checkpoint, error) + FetchCheckpoints(ctx context.Context, page uint64, limit uint64) (Checkpoints, error) FetchMilestone(ctx context.Context, number int64) (*Milestone, error) FetchMilestoneCount(ctx context.Context) (int64, error) @@ -250,7 +250,7 @@ func (c *Client) FetchCheckpoint(ctx context.Context, number int64) (*Checkpoint return &response.Result, nil } -func (c *Client) FetchCheckpoints(ctx context.Context, page uint64, limit uint64) ([]*Checkpoint, error) { +func (c *Client) FetchCheckpoints(ctx context.Context, page uint64, limit uint64) (Checkpoints, error) { url, err := checkpointListURL(c.urlString, page, limit) if err != nil { return nil, err diff --git a/polygon/heimdall/client_mock.go b/polygon/heimdall/client_mock.go index 1b1718b47f7..7d3d81c2b06 100644 --- a/polygon/heimdall/client_mock.go +++ b/polygon/heimdall/client_mock.go @@ -155,10 +155,10 @@ func (c *MockHeimdallClientFetchCheckpointCountCall) DoAndReturn(f func(context. } // FetchCheckpoints mocks base method. -func (m *MockHeimdallClient) FetchCheckpoints(arg0 context.Context, arg1, arg2 uint64) ([]*Checkpoint, error) { +func (m *MockHeimdallClient) FetchCheckpoints(arg0 context.Context, arg1, arg2 uint64) (Checkpoints, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "FetchCheckpoints", arg0, arg1, arg2) - ret0, _ := ret[0].([]*Checkpoint) + ret0, _ := ret[0].(Checkpoints) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -176,19 +176,19 @@ type MockHeimdallClientFetchCheckpointsCall struct { } // Return rewrite *gomock.Call.Return -func (c *MockHeimdallClientFetchCheckpointsCall) Return(arg0 []*Checkpoint, arg1 error) *MockHeimdallClientFetchCheckpointsCall { +func (c *MockHeimdallClientFetchCheckpointsCall) Return(arg0 Checkpoints, arg1 error) *MockHeimdallClientFetchCheckpointsCall { c.Call = c.Call.Return(arg0, arg1) return c } // Do rewrite *gomock.Call.Do -func (c *MockHeimdallClientFetchCheckpointsCall) Do(f func(context.Context, uint64, uint64) ([]*Checkpoint, error)) *MockHeimdallClientFetchCheckpointsCall { +func (c *MockHeimdallClientFetchCheckpointsCall) Do(f func(context.Context, uint64, uint64) (Checkpoints, error)) *MockHeimdallClientFetchCheckpointsCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockHeimdallClientFetchCheckpointsCall) DoAndReturn(f func(context.Context, uint64, uint64) ([]*Checkpoint, error)) *MockHeimdallClientFetchCheckpointsCall { +func (c *MockHeimdallClientFetchCheckpointsCall) DoAndReturn(f func(context.Context, uint64, uint64) (Checkpoints, error)) *MockHeimdallClientFetchCheckpointsCall { c.Call = c.Call.DoAndReturn(f) return c } diff --git a/polygon/heimdall/closed_range.go b/polygon/heimdall/closed_range.go deleted file mode 100644 index 1c14986df26..00000000000 --- a/polygon/heimdall/closed_range.go +++ /dev/null @@ -1,29 +0,0 @@ -package heimdall - -type ClosedRange struct { - Start uint64 - End uint64 -} - -func (r ClosedRange) Len() uint64 { - return r.End + 1 - r.Start -} - -func ClosedRangeMap[TResult any](r ClosedRange, projection func(i uint64) (TResult, error)) ([]TResult, error) { - results := make([]TResult, 0, r.Len()) - - for i := r.Start; i <= r.End; i++ { - entity, err := projection(i) - if err != nil { - return nil, err - } - - results = append(results, entity) - } - - return results, nil -} - -func (r ClosedRange) Map(projection func(i uint64) (any, error)) ([]any, error) { - return ClosedRangeMap(r, projection) -} diff --git a/polygon/heimdall/entity.go b/polygon/heimdall/entity.go deleted file mode 100644 index b6dcfb38e8f..00000000000 --- a/polygon/heimdall/entity.go +++ /dev/null @@ -1,6 +0,0 @@ -package heimdall - -type Entity interface { - RawId() uint64 - BlockNumRange() ClosedRange -} diff --git a/polygon/heimdall/entity_fetcher.go b/polygon/heimdall/entity_fetcher.go deleted file mode 100644 index bb5bad50ab3..00000000000 --- a/polygon/heimdall/entity_fetcher.go +++ /dev/null @@ -1,122 +0,0 @@ -package heimdall - -import ( - "cmp" - "context" - "fmt" - "slices" - "time" - - "github.com/ledgerwatch/log/v3" -) - -type entityFetcher interface { - FetchLastEntityId(ctx context.Context) (uint64, error) - FetchEntitiesRange(ctx context.Context, idRange ClosedRange) ([]Entity, error) -} - -type entityFetcherImpl struct { - name string - - fetchLastEntityId func(ctx context.Context) (int64, error) - fetchEntity func(ctx context.Context, id int64) (Entity, error) - fetchEntitiesPage func(ctx context.Context, page uint64, limit uint64) ([]Entity, error) - - logger log.Logger -} - -func newEntityFetcher( - name string, - fetchLastEntityId func(ctx context.Context) (int64, error), - fetchEntity func(ctx context.Context, id int64) (Entity, error), - fetchEntitiesPage func(ctx context.Context, page uint64, limit uint64) ([]Entity, error), - logger log.Logger, -) entityFetcher { - return &entityFetcherImpl{ - name: name, - fetchLastEntityId: fetchLastEntityId, - fetchEntity: fetchEntity, - fetchEntitiesPage: fetchEntitiesPage, - logger: logger, - } -} - -func (f *entityFetcherImpl) FetchLastEntityId(ctx context.Context) (uint64, error) { - id, err := f.fetchLastEntityId(ctx) - return uint64(id), err -} - -func (f *entityFetcherImpl) FetchEntitiesRange(ctx context.Context, idRange ClosedRange) ([]Entity, error) { - count := idRange.Len() - - const batchFetchThreshold = 100 - if (count > batchFetchThreshold) && (f.fetchEntitiesPage != nil) { - allEntities, err := f.FetchAllEntities(ctx) - if err != nil { - return nil, err - } - startIndex := idRange.Start - 1 - return allEntities[startIndex : startIndex+count], nil - } - - return f.FetchEntitiesRangeSequentially(ctx, idRange) -} - -func (f *entityFetcherImpl) FetchEntitiesRangeSequentially(ctx context.Context, idRange ClosedRange) ([]Entity, error) { - return ClosedRangeMap(idRange, func(id uint64) (Entity, error) { - return f.fetchEntity(ctx, int64(id)) - }) -} - -func (f *entityFetcherImpl) FetchAllEntities(ctx context.Context) ([]Entity, error) { - // TODO: once heimdall API is fixed to return sorted items in pages we can only fetch - // - // the new pages after lastStoredCheckpointId using the checkpoints/list paging API - // (for now we have to fetch all of them) - // and also remove sorting we do after fetching - - var entities []Entity - - fetchStartTime := time.Now() - progressLogTicker := time.NewTicker(30 * time.Second) - defer progressLogTicker.Stop() - - for page := uint64(1); ; page++ { - entitiesPage, err := f.fetchEntitiesPage(ctx, page, 10_000) - if err != nil { - return nil, err - } - if len(entitiesPage) == 0 { - break - } - - for _, entity := range entitiesPage { - entities = append(entities, entity) - } - - select { - case <-progressLogTicker.C: - f.logger.Debug( - heimdallLogPrefix(fmt.Sprintf("%s progress", f.name)), - "page", page, - "len", len(entities), - ) - default: - // carry-on - } - } - - slices.SortFunc(entities, func(e1, e2 Entity) int { - n1 := e1.BlockNumRange().Start - n2 := e2.BlockNumRange().Start - return cmp.Compare(n1, n2) - }) - - f.logger.Debug( - heimdallLogPrefix(fmt.Sprintf("%s done", f.name)), - "len", len(entities), - "duration", time.Since(fetchStartTime), - ) - - return entities, nil -} diff --git a/polygon/heimdall/entity_store.go b/polygon/heimdall/entity_store.go deleted file mode 100644 index 1064dfe025d..00000000000 --- a/polygon/heimdall/entity_store.go +++ /dev/null @@ -1,156 +0,0 @@ -package heimdall - -import ( - "context" - "encoding/binary" - "encoding/json" - "sync" - - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/iter" -) - -type entityStore interface { - Prepare(ctx context.Context) error - Close() - GetLastEntityId(ctx context.Context) (uint64, bool, error) - GetEntity(ctx context.Context, id uint64) (Entity, error) - PutEntity(ctx context.Context, id uint64, entity Entity) error - FindByBlockNum(ctx context.Context, blockNum uint64) (Entity, error) -} - -type entityStoreImpl struct { - tx kv.RwTx - table string - - makeEntity func() Entity - getLastEntityId func(ctx context.Context, tx kv.Tx) (uint64, bool, error) - loadEntityBytes func(ctx context.Context, tx kv.Getter, id uint64) ([]byte, error) - - blockNumToIdIndex *RangeIndex - prepareOnce sync.Once -} - -func newEntityStore( - tx kv.RwTx, - table string, - makeEntity func() Entity, - getLastEntityId func(ctx context.Context, tx kv.Tx) (uint64, bool, error), - loadEntityBytes func(ctx context.Context, tx kv.Getter, id uint64) ([]byte, error), - blockNumToIdIndex *RangeIndex, -) entityStore { - return &entityStoreImpl{ - tx: tx, - table: table, - - makeEntity: makeEntity, - getLastEntityId: getLastEntityId, - loadEntityBytes: loadEntityBytes, - - blockNumToIdIndex: blockNumToIdIndex, - } -} - -func (s *entityStoreImpl) Prepare(ctx context.Context) error { - var err error - s.prepareOnce.Do(func() { - iteratorFactory := func() (iter.KV, error) { return s.tx.Range(s.table, nil, nil) } - err = buildBlockNumToIdIndex(ctx, s.blockNumToIdIndex, iteratorFactory, s.entityUnmarshalJSON) - }) - return err -} - -func (s *entityStoreImpl) Close() { - s.blockNumToIdIndex.Close() -} - -func (s *entityStoreImpl) GetLastEntityId(ctx context.Context) (uint64, bool, error) { - return s.getLastEntityId(ctx, s.tx) -} - -func entityStoreKey(id uint64) [8]byte { - var key [8]byte - binary.BigEndian.PutUint64(key[:], id) - return key -} - -func (s *entityStoreImpl) entityUnmarshalJSON(jsonBytes []byte) (Entity, error) { - entity := s.makeEntity() - if err := json.Unmarshal(jsonBytes, entity); err != nil { - return nil, err - } - return entity, nil -} - -func (s *entityStoreImpl) GetEntity(ctx context.Context, id uint64) (Entity, error) { - jsonBytes, err := s.loadEntityBytes(ctx, s.tx, id) - if err != nil { - return nil, err - } - // not found - if jsonBytes == nil { - return nil, nil - } - - return s.entityUnmarshalJSON(jsonBytes) -} - -func (s *entityStoreImpl) PutEntity(ctx context.Context, id uint64, entity Entity) error { - jsonBytes, err := json.Marshal(entity) - if err != nil { - return err - } - - key := entityStoreKey(id) - err = s.tx.Put(s.table, key[:], jsonBytes) - if err != nil { - return err - } - - // update blockNumToIdIndex - return s.blockNumToIdIndex.Put(ctx, entity.BlockNumRange(), id) -} - -func (s *entityStoreImpl) FindByBlockNum(ctx context.Context, blockNum uint64) (Entity, error) { - id, err := s.blockNumToIdIndex.Lookup(ctx, blockNum) - if err != nil { - return nil, err - } - // not found - if id == 0 { - return nil, nil - } - - return s.GetEntity(ctx, id) -} - -func buildBlockNumToIdIndex( - ctx context.Context, - index *RangeIndex, - iteratorFactory func() (iter.KV, error), - entityUnmarshalJSON func([]byte) (Entity, error), -) error { - it, err := iteratorFactory() - if err != nil { - return err - } - defer it.Close() - - for it.HasNext() { - _, jsonBytes, err := it.Next() - if err != nil { - return err - } - - entity, err := entityUnmarshalJSON(jsonBytes) - if err != nil { - return err - } - - if err = index.Put(ctx, entity.BlockNumRange(), entity.RawId()); err != nil { - return err - } - } - - return nil -} diff --git a/polygon/heimdall/heimdall.go b/polygon/heimdall/heimdall.go index d560994c5cc..4d2b12c0554 100644 --- a/polygon/heimdall/heimdall.go +++ b/polygon/heimdall/heimdall.go @@ -15,11 +15,20 @@ import ( // //go:generate mockgen -typed=true -destination=./heimdall_mock.go -package=heimdall . Heimdall type Heimdall interface { + LastCheckpointId(ctx context.Context) (CheckpointId, bool, error) + LastMilestoneId(ctx context.Context) (MilestoneId, bool, error) + LastSpanId(ctx context.Context) (SpanId, bool, error) FetchLatestSpan(ctx context.Context) (*Span, error) + FetchCheckpoints(ctx context.Context, start CheckpointId, end CheckpointId) ([]*Checkpoint, error) + FetchMilestones(ctx context.Context, start MilestoneId, end MilestoneId) ([]*Milestone, error) + FetchSpans(ctx context.Context, start SpanId, end SpanId) ([]*Span, error) + FetchCheckpointsFromBlock(ctx context.Context, startBlock uint64) (Waypoints, error) FetchMilestonesFromBlock(ctx context.Context, startBlock uint64) (Waypoints, error) + FetchSpansFromBlock(ctx context.Context, startBlock uint64) ([]*Span, error) + OnCheckpointEvent(ctx context.Context, callback func(*Checkpoint)) error OnMilestoneEvent(ctx context.Context, callback func(*Milestone)) error OnSpanEvent(ctx context.Context, callback func(*Span)) error } @@ -27,6 +36,7 @@ type Heimdall interface { // ErrIncompleteMilestoneRange happens when FetchMilestones is called with an old start block because old milestones are evicted var ErrIncompleteMilestoneRange = errors.New("milestone range doesn't contain the start block") var ErrIncompleteCheckpointRange = errors.New("checkpoint range doesn't contain the start block") +var ErrIncompleteSpanRange = errors.New("span range doesn't contain the start block") const checkpointsBatchFetchThreshold = 100 @@ -60,6 +70,18 @@ type heimdall struct { store Store } +func (h *heimdall) LastCheckpointId(ctx context.Context) (CheckpointId, bool, error) { + // todo get this from store if its likely not changed (need timeout) + + count, err := h.client.FetchCheckpointCount(ctx) + + if err != nil { + return 0, false, err + } + + return CheckpointId(count), true, nil +} + func (h *heimdall) FetchCheckpointsFromBlock(ctx context.Context, startBlock uint64) (Waypoints, error) { h.logger.Debug(heimdallLogPrefix("fetching checkpoints from block"), "start", startBlock) startFetchTime := time.Now() @@ -339,6 +361,43 @@ func (h *heimdall) FetchLatestSpan(ctx context.Context) (*Span, error) { return h.client.FetchLatestSpan(ctx) } +func (h *heimdall) FetchSpansFromBlock(ctx context.Context, startBlock uint64) ([]*Span, error) { + last, _, err := h.LastSpanId(ctx) + + if err != nil { + return nil, err + } + + var spans []*Span + + for i := last; i >= 1; i-- { + m, err := h.FetchSpans(ctx, i, i) + if err != nil { + if errors.Is(err, ErrNotInSpanList) { + common.SliceReverse(spans) + return spans, ErrIncompleteSpanRange + } + return nil, err + } + + cmpResult := m[0].CmpRange(startBlock) + // the start block is past the last span + if cmpResult > 0 { + return nil, nil + } + + spans = append(spans, m...) + + // the checkpoint contains the start block + if cmpResult == 0 { + break + } + } + + common.SliceReverse(spans) + return spans, nil +} + func (h *heimdall) FetchSpans(ctx context.Context, start SpanId, end SpanId) ([]*Span, error) { var spans []*Span @@ -408,7 +467,7 @@ func (h *heimdall) pollSpans(ctx context.Context, tip SpanId, cb func(*Span)) { latestSpan, err := h.client.FetchLatestSpan(ctx) if err != nil { h.logger.Warn( - heimdallLogPrefix("heimdall.OnSpanEvent FetchLatestSpan failed"), + heimdallLogPrefix("heimdall.OnSpanEvent FetchSpanCount failed"), "err", err, ) @@ -425,7 +484,7 @@ func (h *heimdall) pollSpans(ctx context.Context, tip SpanId, cb func(*Span)) { m, err := h.FetchSpans(ctx, tip+1, latestSpan.Id) if err != nil { h.logger.Warn( - heimdallLogPrefix("heimdall.OnSpanEvent FetchSpans failed"), + heimdallLogPrefix("heimdall.OnSpanEvent FetchSpan failed"), "err", err, ) @@ -439,6 +498,60 @@ func (h *heimdall) pollSpans(ctx context.Context, tip SpanId, cb func(*Span)) { } } +func (h *heimdall) OnCheckpointEvent(ctx context.Context, cb func(*Checkpoint)) error { + tip, ok, err := h.store.LastCheckpointId(ctx) + if err != nil { + return err + } + + if !ok { + tip, _, err = h.LastCheckpointId(ctx) + if err != nil { + return err + } + } + + go h.pollCheckpoints(ctx, tip, cb) + + return nil +} + +func (h *heimdall) pollCheckpoints(ctx context.Context, tip CheckpointId, cb func(*Checkpoint)) { + for ctx.Err() == nil { + count, err := h.client.FetchCheckpointCount(ctx) + if err != nil { + h.logger.Warn( + heimdallLogPrefix("OnCheckpointEvent.OnCheckpointEvent FetchCheckpointCount failed"), + "err", err, + ) + + h.waitPollingDelay(ctx) + // keep background goroutine alive in case of heimdall errors + continue + } + + if count <= int64(tip) { + h.waitPollingDelay(ctx) + continue + } + + m, err := h.FetchCheckpoints(ctx, tip+1, CheckpointId(count)) + if err != nil { + h.logger.Warn( + heimdallLogPrefix("heimdall.OnCheckpointEvent FetchCheckpoints failed"), + "err", err, + ) + + h.waitPollingDelay(ctx) + // keep background goroutine alive in case of heimdall errors + continue + } + + tip = CheckpointId(count) + go cb(m[len(m)-1]) + } +} + func (h *heimdall) OnMilestoneEvent(ctx context.Context, cb func(*Milestone)) error { tip, ok, err := h.store.LastMilestoneId(ctx) if err != nil { @@ -479,7 +592,7 @@ func (h *heimdall) pollMilestones(ctx context.Context, tip MilestoneId, cb func( m, err := h.FetchMilestones(ctx, tip+1, MilestoneId(count)) if err != nil { h.logger.Warn( - heimdallLogPrefix("heimdall.OnMilestoneEvent FetchMilestones failed"), + heimdallLogPrefix("heimdall.OnMilestoneEvent FetchMilestone failed"), "err", err, ) @@ -554,5 +667,13 @@ func (h *heimdall) batchFetchCheckpoints( } func (h *heimdall) waitPollingDelay(ctx context.Context) { - common.Sleep(ctx, h.pollDelay) + pollDelayTimer := time.NewTimer(h.pollDelay) + defer pollDelayTimer.Stop() + + select { + case <-ctx.Done(): + return + case <-pollDelayTimer.C: + return + } } diff --git a/polygon/heimdall/heimdall_mock.go b/polygon/heimdall/heimdall_mock.go index 1b037cd8aa2..646cd97debb 100644 --- a/polygon/heimdall/heimdall_mock.go +++ b/polygon/heimdall/heimdall_mock.go @@ -39,6 +39,45 @@ func (m *MockHeimdall) EXPECT() *MockHeimdallMockRecorder { return m.recorder } +// FetchCheckpoints mocks base method. +func (m *MockHeimdall) FetchCheckpoints(arg0 context.Context, arg1, arg2 CheckpointId) ([]*Checkpoint, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchCheckpoints", arg0, arg1, arg2) + ret0, _ := ret[0].([]*Checkpoint) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FetchCheckpoints indicates an expected call of FetchCheckpoints. +func (mr *MockHeimdallMockRecorder) FetchCheckpoints(arg0, arg1, arg2 any) *MockHeimdallFetchCheckpointsCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchCheckpoints", reflect.TypeOf((*MockHeimdall)(nil).FetchCheckpoints), arg0, arg1, arg2) + return &MockHeimdallFetchCheckpointsCall{Call: call} +} + +// MockHeimdallFetchCheckpointsCall wrap *gomock.Call +type MockHeimdallFetchCheckpointsCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockHeimdallFetchCheckpointsCall) Return(arg0 []*Checkpoint, arg1 error) *MockHeimdallFetchCheckpointsCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockHeimdallFetchCheckpointsCall) Do(f func(context.Context, CheckpointId, CheckpointId) ([]*Checkpoint, error)) *MockHeimdallFetchCheckpointsCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockHeimdallFetchCheckpointsCall) DoAndReturn(f func(context.Context, CheckpointId, CheckpointId) ([]*Checkpoint, error)) *MockHeimdallFetchCheckpointsCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + // FetchCheckpointsFromBlock mocks base method. func (m *MockHeimdall) FetchCheckpointsFromBlock(arg0 context.Context, arg1 uint64) (Waypoints, error) { m.ctrl.T.Helper() @@ -117,6 +156,45 @@ func (c *MockHeimdallFetchLatestSpanCall) DoAndReturn(f func(context.Context) (* return c } +// FetchMilestones mocks base method. +func (m *MockHeimdall) FetchMilestones(arg0 context.Context, arg1, arg2 MilestoneId) ([]*Milestone, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchMilestones", arg0, arg1, arg2) + ret0, _ := ret[0].([]*Milestone) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FetchMilestones indicates an expected call of FetchMilestones. +func (mr *MockHeimdallMockRecorder) FetchMilestones(arg0, arg1, arg2 any) *MockHeimdallFetchMilestonesCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchMilestones", reflect.TypeOf((*MockHeimdall)(nil).FetchMilestones), arg0, arg1, arg2) + return &MockHeimdallFetchMilestonesCall{Call: call} +} + +// MockHeimdallFetchMilestonesCall wrap *gomock.Call +type MockHeimdallFetchMilestonesCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockHeimdallFetchMilestonesCall) Return(arg0 []*Milestone, arg1 error) *MockHeimdallFetchMilestonesCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockHeimdallFetchMilestonesCall) Do(f func(context.Context, MilestoneId, MilestoneId) ([]*Milestone, error)) *MockHeimdallFetchMilestonesCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockHeimdallFetchMilestonesCall) DoAndReturn(f func(context.Context, MilestoneId, MilestoneId) ([]*Milestone, error)) *MockHeimdallFetchMilestonesCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + // FetchMilestonesFromBlock mocks base method. func (m *MockHeimdall) FetchMilestonesFromBlock(arg0 context.Context, arg1 uint64) (Waypoints, error) { m.ctrl.T.Helper() @@ -156,6 +234,242 @@ func (c *MockHeimdallFetchMilestonesFromBlockCall) DoAndReturn(f func(context.Co return c } +// FetchSpans mocks base method. +func (m *MockHeimdall) FetchSpans(arg0 context.Context, arg1, arg2 SpanId) ([]*Span, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchSpans", arg0, arg1, arg2) + ret0, _ := ret[0].([]*Span) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FetchSpans indicates an expected call of FetchSpans. +func (mr *MockHeimdallMockRecorder) FetchSpans(arg0, arg1, arg2 any) *MockHeimdallFetchSpansCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchSpans", reflect.TypeOf((*MockHeimdall)(nil).FetchSpans), arg0, arg1, arg2) + return &MockHeimdallFetchSpansCall{Call: call} +} + +// MockHeimdallFetchSpansCall wrap *gomock.Call +type MockHeimdallFetchSpansCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockHeimdallFetchSpansCall) Return(arg0 []*Span, arg1 error) *MockHeimdallFetchSpansCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockHeimdallFetchSpansCall) Do(f func(context.Context, SpanId, SpanId) ([]*Span, error)) *MockHeimdallFetchSpansCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockHeimdallFetchSpansCall) DoAndReturn(f func(context.Context, SpanId, SpanId) ([]*Span, error)) *MockHeimdallFetchSpansCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// FetchSpansFromBlock mocks base method. +func (m *MockHeimdall) FetchSpansFromBlock(arg0 context.Context, arg1 uint64) ([]*Span, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchSpansFromBlock", arg0, arg1) + ret0, _ := ret[0].([]*Span) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FetchSpansFromBlock indicates an expected call of FetchSpansFromBlock. +func (mr *MockHeimdallMockRecorder) FetchSpansFromBlock(arg0, arg1 any) *MockHeimdallFetchSpansFromBlockCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchSpansFromBlock", reflect.TypeOf((*MockHeimdall)(nil).FetchSpansFromBlock), arg0, arg1) + return &MockHeimdallFetchSpansFromBlockCall{Call: call} +} + +// MockHeimdallFetchSpansFromBlockCall wrap *gomock.Call +type MockHeimdallFetchSpansFromBlockCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockHeimdallFetchSpansFromBlockCall) Return(arg0 []*Span, arg1 error) *MockHeimdallFetchSpansFromBlockCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockHeimdallFetchSpansFromBlockCall) Do(f func(context.Context, uint64) ([]*Span, error)) *MockHeimdallFetchSpansFromBlockCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockHeimdallFetchSpansFromBlockCall) DoAndReturn(f func(context.Context, uint64) ([]*Span, error)) *MockHeimdallFetchSpansFromBlockCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// LastCheckpointId mocks base method. +func (m *MockHeimdall) LastCheckpointId(arg0 context.Context) (CheckpointId, bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LastCheckpointId", arg0) + ret0, _ := ret[0].(CheckpointId) + ret1, _ := ret[1].(bool) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// LastCheckpointId indicates an expected call of LastCheckpointId. +func (mr *MockHeimdallMockRecorder) LastCheckpointId(arg0 any) *MockHeimdallLastCheckpointIdCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastCheckpointId", reflect.TypeOf((*MockHeimdall)(nil).LastCheckpointId), arg0) + return &MockHeimdallLastCheckpointIdCall{Call: call} +} + +// MockHeimdallLastCheckpointIdCall wrap *gomock.Call +type MockHeimdallLastCheckpointIdCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockHeimdallLastCheckpointIdCall) Return(arg0 CheckpointId, arg1 bool, arg2 error) *MockHeimdallLastCheckpointIdCall { + c.Call = c.Call.Return(arg0, arg1, arg2) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockHeimdallLastCheckpointIdCall) Do(f func(context.Context) (CheckpointId, bool, error)) *MockHeimdallLastCheckpointIdCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockHeimdallLastCheckpointIdCall) DoAndReturn(f func(context.Context) (CheckpointId, bool, error)) *MockHeimdallLastCheckpointIdCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// LastMilestoneId mocks base method. +func (m *MockHeimdall) LastMilestoneId(arg0 context.Context) (MilestoneId, bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LastMilestoneId", arg0) + ret0, _ := ret[0].(MilestoneId) + ret1, _ := ret[1].(bool) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// LastMilestoneId indicates an expected call of LastMilestoneId. +func (mr *MockHeimdallMockRecorder) LastMilestoneId(arg0 any) *MockHeimdallLastMilestoneIdCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastMilestoneId", reflect.TypeOf((*MockHeimdall)(nil).LastMilestoneId), arg0) + return &MockHeimdallLastMilestoneIdCall{Call: call} +} + +// MockHeimdallLastMilestoneIdCall wrap *gomock.Call +type MockHeimdallLastMilestoneIdCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockHeimdallLastMilestoneIdCall) Return(arg0 MilestoneId, arg1 bool, arg2 error) *MockHeimdallLastMilestoneIdCall { + c.Call = c.Call.Return(arg0, arg1, arg2) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockHeimdallLastMilestoneIdCall) Do(f func(context.Context) (MilestoneId, bool, error)) *MockHeimdallLastMilestoneIdCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockHeimdallLastMilestoneIdCall) DoAndReturn(f func(context.Context) (MilestoneId, bool, error)) *MockHeimdallLastMilestoneIdCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// LastSpanId mocks base method. +func (m *MockHeimdall) LastSpanId(arg0 context.Context) (SpanId, bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LastSpanId", arg0) + ret0, _ := ret[0].(SpanId) + ret1, _ := ret[1].(bool) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// LastSpanId indicates an expected call of LastSpanId. +func (mr *MockHeimdallMockRecorder) LastSpanId(arg0 any) *MockHeimdallLastSpanIdCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastSpanId", reflect.TypeOf((*MockHeimdall)(nil).LastSpanId), arg0) + return &MockHeimdallLastSpanIdCall{Call: call} +} + +// MockHeimdallLastSpanIdCall wrap *gomock.Call +type MockHeimdallLastSpanIdCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockHeimdallLastSpanIdCall) Return(arg0 SpanId, arg1 bool, arg2 error) *MockHeimdallLastSpanIdCall { + c.Call = c.Call.Return(arg0, arg1, arg2) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockHeimdallLastSpanIdCall) Do(f func(context.Context) (SpanId, bool, error)) *MockHeimdallLastSpanIdCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockHeimdallLastSpanIdCall) DoAndReturn(f func(context.Context) (SpanId, bool, error)) *MockHeimdallLastSpanIdCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// OnCheckpointEvent mocks base method. +func (m *MockHeimdall) OnCheckpointEvent(arg0 context.Context, arg1 func(*Checkpoint)) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "OnCheckpointEvent", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// OnCheckpointEvent indicates an expected call of OnCheckpointEvent. +func (mr *MockHeimdallMockRecorder) OnCheckpointEvent(arg0, arg1 any) *MockHeimdallOnCheckpointEventCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnCheckpointEvent", reflect.TypeOf((*MockHeimdall)(nil).OnCheckpointEvent), arg0, arg1) + return &MockHeimdallOnCheckpointEventCall{Call: call} +} + +// MockHeimdallOnCheckpointEventCall wrap *gomock.Call +type MockHeimdallOnCheckpointEventCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockHeimdallOnCheckpointEventCall) Return(arg0 error) *MockHeimdallOnCheckpointEventCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockHeimdallOnCheckpointEventCall) Do(f func(context.Context, func(*Checkpoint)) error) *MockHeimdallOnCheckpointEventCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockHeimdallOnCheckpointEventCall) DoAndReturn(f func(context.Context, func(*Checkpoint)) error) *MockHeimdallOnCheckpointEventCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + // OnMilestoneEvent mocks base method. func (m *MockHeimdall) OnMilestoneEvent(arg0 context.Context, arg1 func(*Milestone)) error { m.ctrl.T.Helper() diff --git a/polygon/heimdall/heimdall_test.go b/polygon/heimdall/heimdall_test.go index 74192bcd1df..43734e264bd 100644 --- a/polygon/heimdall/heimdall_test.go +++ b/polygon/heimdall/heimdall_test.go @@ -89,7 +89,7 @@ func (test heimdallTest) setupCheckpoints(count int) []*Checkpoint { } else { client.EXPECT(). FetchCheckpoints(gomock.Any(), gomock.Any(), gomock.Any()). - DoAndReturn(func(ctx context.Context, page uint64, limit uint64) ([]*Checkpoint, error) { + DoAndReturn(func(ctx context.Context, page uint64, limit uint64) (Checkpoints, error) { if page == 0 { return nil, nil } diff --git a/polygon/heimdall/milestone.go b/polygon/heimdall/milestone.go index 3d74dac7fcc..12e64c00243 100644 --- a/polygon/heimdall/milestone.go +++ b/polygon/heimdall/milestone.go @@ -20,10 +20,6 @@ type Milestone struct { Fields WaypointFields } -func (m Milestone) RawId() uint64 { - return uint64(m.Id) -} - func (m Milestone) StartBlock() *big.Int { return m.Fields.StartBlock } @@ -32,13 +28,6 @@ func (m Milestone) EndBlock() *big.Int { return m.Fields.EndBlock } -func (m Milestone) BlockNumRange() ClosedRange { - return ClosedRange{ - Start: m.StartBlock().Uint64(), - End: m.EndBlock().Uint64(), - } -} - func (m Milestone) RootHash() libcommon.Hash { return m.Fields.RootHash } diff --git a/polygon/heimdall/range_index.go b/polygon/heimdall/range_index.go deleted file mode 100644 index 7919dae7043..00000000000 --- a/polygon/heimdall/range_index.go +++ /dev/null @@ -1,93 +0,0 @@ -package heimdall - -import ( - "context" - "encoding/binary" - - "github.com/c2h5oh/datasize" - "github.com/ledgerwatch/log/v3" - - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/mdbx" -) - -type RangeIndex struct { - db kv.RwDB -} - -const rangeIndexTableName = "Index" - -func NewRangeIndex(ctx context.Context, tmpDir string, logger log.Logger) (*RangeIndex, error) { - db, err := mdbx.NewMDBX(logger). - InMem(tmpDir). - WithTableCfg(func(_ kv.TableCfg) kv.TableCfg { return kv.TableCfg{rangeIndexTableName: {}} }). - MapSize(1 * datasize.GB). - Open(ctx) - if err != nil { - return nil, err - } - - return &RangeIndex{db}, nil -} - -func (i *RangeIndex) Close() { - i.db.Close() -} - -func rangeIndexKey(blockNum uint64) [8]byte { - var key [8]byte - binary.BigEndian.PutUint64(key[:], blockNum) - return key -} - -func rangeIndexValue(id uint64) [8]byte { - var value [8]byte - binary.BigEndian.PutUint64(value[:], id) - return value -} - -func rangeIndexValueParse(value []byte) uint64 { - return binary.BigEndian.Uint64(value) -} - -// Put a mapping from a range to an id. -func (i *RangeIndex) Put(ctx context.Context, r ClosedRange, id uint64) error { - tx, err := i.db.BeginRw(ctx) - if err != nil { - return err - } - defer tx.Rollback() - - key := rangeIndexKey(r.End) - value := rangeIndexValue(id) - if err = tx.Put(rangeIndexTableName, key[:], value[:]); err != nil { - return err - } - return tx.Commit() -} - -// Lookup an id of a range by a blockNum within that range. -func (i *RangeIndex) Lookup(ctx context.Context, blockNum uint64) (uint64, error) { - var id uint64 - err := i.db.View(ctx, func(tx kv.Tx) error { - cursor, err := tx.Cursor(rangeIndexTableName) - if err != nil { - return err - } - defer cursor.Close() - - key := rangeIndexKey(blockNum) - _, value, err := cursor.Seek(key[:]) - if err != nil { - return err - } - // not found - if value == nil { - return nil - } - - id = rangeIndexValueParse(value) - return nil - }) - return id, err -} diff --git a/polygon/heimdall/range_index_test.go b/polygon/heimdall/range_index_test.go deleted file mode 100644 index f9094f67671..00000000000 --- a/polygon/heimdall/range_index_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package heimdall - -import ( - "context" - "testing" - - "github.com/ledgerwatch/log/v3" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -type rangeIndexTest struct { - index *RangeIndex - ctx context.Context - logger log.Logger -} - -func newRangeIndexTest(t *testing.T) rangeIndexTest { - tmpDir := t.TempDir() - ctx := context.Background() - logger := log.New() - index, err := NewRangeIndex(ctx, tmpDir, logger) - require.NoError(t, err) - - t.Cleanup(index.Close) - - return rangeIndexTest{ - index: index, - ctx: ctx, - logger: logger, - } -} - -func TestRangeIndexEmpty(t *testing.T) { - test := newRangeIndexTest(t) - actualId, err := test.index.Lookup(test.ctx, 1000) - require.NoError(t, err) - assert.Equal(t, uint64(0), actualId) -} - -func TestRangeIndex(t *testing.T) { - test := newRangeIndexTest(t) - ctx := test.ctx - - ranges := []ClosedRange{ - {100, 200 - 1}, - {200, 500 - 1}, - {500, 1000 - 1}, - {1000, 1200 - 1}, - {1200, 1500 - 1}, - } - - for i, r := range ranges { - require.NoError(t, test.index.Put(ctx, r, uint64(i+1))) - } - - examples := map[uint64]uint64{ - 100: 1, - 101: 1, - 102: 1, - 150: 1, - 199: 1, - 200: 2, - 201: 2, - 202: 2, - 300: 2, - 498: 2, - 499: 2, - 500: 3, - 501: 3, - 502: 3, - 900: 3, - 998: 3, - 999: 3, - 1000: 4, - 1001: 4, - 1002: 4, - 1100: 4, - 1199: 4, - 1200: 5, - 1201: 5, - 1400: 5, - 1499: 5, - 1500: 0, - 1501: 0, - 2000: 0, - 5000: 0, - } - - for blockNum, expectedId := range examples { - actualId, err := test.index.Lookup(ctx, blockNum) - require.NoError(t, err) - assert.Equal(t, expectedId, actualId) - } -} diff --git a/polygon/heimdall/scraper.go b/polygon/heimdall/scraper.go deleted file mode 100644 index 53838f42d87..00000000000 --- a/polygon/heimdall/scraper.go +++ /dev/null @@ -1,302 +0,0 @@ -package heimdall - -import ( - "context" - "time" - - "github.com/ledgerwatch/log/v3" - "golang.org/x/sync/errgroup" - - libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/polygon/polygoncommon" - "github.com/ledgerwatch/erigon/turbo/services" -) - -type Scraper struct { - txProvider func() kv.RwTx - readerProvider func() reader - - client HeimdallClient - pollDelay time.Duration - - checkpointObservers *polygoncommon.Observers[[]*Checkpoint] - milestoneObservers *polygoncommon.Observers[[]*Milestone] - spanObservers *polygoncommon.Observers[[]*Span] - - checkpointSyncEvent *polygoncommon.EventNotifier - milestoneSyncEvent *polygoncommon.EventNotifier - spanSyncEvent *polygoncommon.EventNotifier - - tmpDir string - logger log.Logger -} - -func NewScraperTODO( - client HeimdallClient, - pollDelay time.Duration, - tmpDir string, - logger log.Logger, -) *Scraper { - return NewScraper( - func() kv.RwTx { /* TODO */ return nil }, - func() reader { /* TODO */ return nil }, - client, - pollDelay, - tmpDir, - logger, - ) -} - -func NewScraper( - txProvider func() kv.RwTx, - readerProvider func() reader, - - client HeimdallClient, - pollDelay time.Duration, - tmpDir string, - logger log.Logger, -) *Scraper { - return &Scraper{ - txProvider: txProvider, - readerProvider: readerProvider, - - client: client, - pollDelay: pollDelay, - - checkpointObservers: polygoncommon.NewObservers[[]*Checkpoint](), - milestoneObservers: polygoncommon.NewObservers[[]*Milestone](), - spanObservers: polygoncommon.NewObservers[[]*Span](), - - checkpointSyncEvent: polygoncommon.NewEventNotifier(), - milestoneSyncEvent: polygoncommon.NewEventNotifier(), - spanSyncEvent: polygoncommon.NewEventNotifier(), - - tmpDir: tmpDir, - logger: logger, - } -} - -func (s *Scraper) syncEntity( - ctx context.Context, - store entityStore, - fetcher entityFetcher, - callback func([]Entity), - syncEvent *polygoncommon.EventNotifier, -) error { - defer store.Close() - if err := store.Prepare(ctx); err != nil { - return err - } - - for ctx.Err() == nil { - lastKnownId, hasLastKnownId, err := store.GetLastEntityId(ctx) - if err != nil { - return err - } - - var idRange ClosedRange - if hasLastKnownId { - idRange.Start = lastKnownId + 1 - } else { - idRange.Start = 1 - } - - idRange.End, err = fetcher.FetchLastEntityId(ctx) - if err != nil { - return err - } - - if idRange.Start > idRange.End { - syncEvent.SetAndBroadcast() - libcommon.Sleep(ctx, s.pollDelay) - if ctx.Err() != nil { - syncEvent.Reset() - } - } else { - entities, err := fetcher.FetchEntitiesRange(ctx, idRange) - if err != nil { - return err - } - - for i, entity := range entities { - if err = store.PutEntity(ctx, idRange.Start+uint64(i), entity); err != nil { - return err - } - } - - if callback != nil { - go callback(entities) - } - } - } - return ctx.Err() -} - -func newCheckpointStore(tx kv.RwTx, reader services.BorCheckpointReader, blockNumToIdIndexFactory func() *RangeIndex) entityStore { - makeEntity := func() Entity { return new(Checkpoint) } - return newEntityStore(tx, kv.BorCheckpoints, makeEntity, reader.LastCheckpointId, reader.Checkpoint, blockNumToIdIndexFactory()) -} - -func newMilestoneStore(tx kv.RwTx, reader services.BorMilestoneReader, blockNumToIdIndexFactory func() *RangeIndex) entityStore { - makeEntity := func() Entity { return new(Milestone) } - return newEntityStore(tx, kv.BorMilestones, makeEntity, reader.LastMilestoneId, reader.Milestone, blockNumToIdIndexFactory()) -} - -func newSpanStore(tx kv.RwTx, reader services.BorSpanReader, blockNumToIdIndexFactory func() *RangeIndex) entityStore { - makeEntity := func() Entity { return new(Span) } - return newEntityStore(tx, kv.BorSpans, makeEntity, reader.LastSpanId, reader.Span, blockNumToIdIndexFactory()) -} - -func newCheckpointFetcher(client HeimdallClient, logger log.Logger) entityFetcher { - fetchEntity := func(ctx context.Context, id int64) (Entity, error) { return client.FetchCheckpoint(ctx, id) } - - fetchEntitiesPage := func(ctx context.Context, page uint64, limit uint64) ([]Entity, error) { - entities, err := client.FetchCheckpoints(ctx, page, limit) - return libcommon.SliceMap(entities, func(c *Checkpoint) Entity { return c }), err - } - - return newEntityFetcher( - "CheckpointFetcher", - client.FetchCheckpointCount, - fetchEntity, - fetchEntitiesPage, - logger, - ) -} - -func newMilestoneFetcher(client HeimdallClient, logger log.Logger) entityFetcher { - fetchEntity := func(ctx context.Context, id int64) (Entity, error) { return client.FetchMilestone(ctx, id) } - - return newEntityFetcher( - "MilestoneFetcher", - client.FetchMilestoneCount, - fetchEntity, - nil, - logger, - ) -} - -func newSpanFetcher(client HeimdallClient, logger log.Logger) entityFetcher { - fetchLastEntityId := func(ctx context.Context) (int64, error) { - span, err := client.FetchLatestSpan(ctx) - if err != nil { - return 0, err - } - return int64(span.Id), nil - } - - fetchEntity := func(ctx context.Context, id int64) (Entity, error) { - return client.FetchSpan(ctx, uint64(id)) - } - - return newEntityFetcher( - "SpanFetcher", - fetchLastEntityId, - fetchEntity, - nil, - logger, - ) -} - -func downcastCheckpointEntity(e Entity) *Checkpoint { - return e.(*Checkpoint) -} - -func downcastMilestoneEntity(e Entity) *Milestone { - return e.(*Milestone) -} - -func downcastSpanEntity(e Entity) *Span { - return e.(*Span) -} - -func (s *Scraper) RegisterCheckpointObserver(observer func([]*Checkpoint)) polygoncommon.UnregisterFunc { - return s.checkpointObservers.Register(observer) -} - -func (s *Scraper) RegisterMilestoneObserver(observer func([]*Milestone)) polygoncommon.UnregisterFunc { - return s.milestoneObservers.Register(observer) -} - -func (s *Scraper) RegisterSpanObserver(observer func([]*Span)) polygoncommon.UnregisterFunc { - return s.spanObservers.Register(observer) -} - -func (s *Scraper) Synchronize(ctx context.Context) { - s.checkpointSyncEvent.Wait(ctx) - s.milestoneSyncEvent.Wait(ctx) - s.spanSyncEvent.Wait(ctx) -} - -func (s *Scraper) Run(parentCtx context.Context) error { - tx := s.txProvider() - if tx == nil { - // TODO: implement and remove - s.logger.Warn("heimdall.Scraper txProvider is not implemented yet") - return nil - } - reader := s.readerProvider() - if reader == nil { - // TODO: implement and remove - s.logger.Warn("heimdall.Scraper readerProvider is not implemented yet") - return nil - } - - blockNumToIdIndexFactory := func() *RangeIndex { - index, err := NewRangeIndex(parentCtx, s.tmpDir, s.logger) - if err != nil { - panic(err) - } - return index - } - - group, ctx := errgroup.WithContext(parentCtx) - - // sync checkpoints - group.Go(func() error { - return s.syncEntity( - ctx, - newCheckpointStore(tx, reader, blockNumToIdIndexFactory), - newCheckpointFetcher(s.client, s.logger), - func(entities []Entity) { - s.checkpointObservers.Notify(libcommon.SliceMap(entities, downcastCheckpointEntity)) - }, - s.checkpointSyncEvent, - ) - }) - - // sync milestones - group.Go(func() error { - return s.syncEntity( - ctx, - newMilestoneStore(tx, reader, blockNumToIdIndexFactory), - newMilestoneFetcher(s.client, s.logger), - func(entities []Entity) { - s.milestoneObservers.Notify(libcommon.SliceMap(entities, downcastMilestoneEntity)) - }, - s.milestoneSyncEvent, - ) - }) - - // sync spans - group.Go(func() error { - return s.syncEntity( - ctx, - newSpanStore(tx, reader, blockNumToIdIndexFactory), - newSpanFetcher(s.client, s.logger), - func(entities []Entity) { - s.spanObservers.Notify(libcommon.SliceMap(entities, downcastSpanEntity)) - }, - s.spanSyncEvent, - ) - }) - - defer func() { - s.checkpointObservers.Close() - s.milestoneObservers.Close() - s.spanObservers.Close() - }() - - return group.Wait() -} diff --git a/polygon/heimdall/span.go b/polygon/heimdall/span.go index 6083395f4aa..10c36998c1f 100644 --- a/polygon/heimdall/span.go +++ b/polygon/heimdall/span.go @@ -15,17 +15,6 @@ type Span struct { ChainID string `json:"bor_chain_id,omitempty" yaml:"bor_chain_id"` } -func (s *Span) RawId() uint64 { - return uint64(s.Id) -} - -func (s *Span) BlockNumRange() ClosedRange { - return ClosedRange{ - Start: s.StartBlock, - End: s.EndBlock, - } -} - func (hs *Span) Less(other btree.Item) bool { otherHs := other.(*Span) if hs.EndBlock == 0 || otherHs.EndBlock == 0 { diff --git a/polygon/heimdall/store.go b/polygon/heimdall/store.go index 0177a1ba93c..f354fc7298f 100644 --- a/polygon/heimdall/store.go +++ b/polygon/heimdall/store.go @@ -129,7 +129,7 @@ func (s txReadStore) LastCheckpointId(ctx context.Context) (CheckpointId, bool, } func (s txReadStore) GetCheckpoint(ctx context.Context, checkpointId CheckpointId) (*Checkpoint, error) { - checkpointBytes, err := s.reader.Checkpoint(ctx, s.tx, uint64(checkpointId)) + checkpointBytes, err := s.reader.Milestone(ctx, s.tx, uint64(checkpointId)) if err != nil { return nil, err } diff --git a/polygon/p2p/message_listener.go b/polygon/p2p/message_listener.go index dd87ec9841d..9da0171cb04 100644 --- a/polygon/p2p/message_listener.go +++ b/polygon/p2p/message_listener.go @@ -12,7 +12,6 @@ import ( sentry "github.com/ledgerwatch/erigon-lib/gointerfaces/sentryproto" "github.com/ledgerwatch/erigon/eth/protocols/eth" sentrymulticlient "github.com/ledgerwatch/erigon/p2p/sentry/sentry_multi_client" - "github.com/ledgerwatch/erigon/polygon/polygoncommon" "github.com/ledgerwatch/erigon/rlp" ) @@ -22,15 +21,17 @@ type DecodedInboundMessage[TPacket any] struct { PeerId *PeerId } -type UnregisterFunc = polygoncommon.UnregisterFunc +type MessageObserver[TMessage any] func(message TMessage) + +type UnregisterFunc func() type MessageListener interface { Run(ctx context.Context) - RegisterNewBlockObserver(observer polygoncommon.Observer[*DecodedInboundMessage[*eth.NewBlockPacket]]) UnregisterFunc - RegisterNewBlockHashesObserver(observer polygoncommon.Observer[*DecodedInboundMessage[*eth.NewBlockHashesPacket]]) UnregisterFunc - RegisterBlockHeadersObserver(observer polygoncommon.Observer[*DecodedInboundMessage[*eth.BlockHeadersPacket66]]) UnregisterFunc - RegisterBlockBodiesObserver(observer polygoncommon.Observer[*DecodedInboundMessage[*eth.BlockBodiesPacket66]]) UnregisterFunc - RegisterPeerEventObserver(observer polygoncommon.Observer[*sentry.PeerEvent]) UnregisterFunc + RegisterNewBlockObserver(observer MessageObserver[*DecodedInboundMessage[*eth.NewBlockPacket]]) UnregisterFunc + RegisterNewBlockHashesObserver(observer MessageObserver[*DecodedInboundMessage[*eth.NewBlockHashesPacket]]) UnregisterFunc + RegisterBlockHeadersObserver(observer MessageObserver[*DecodedInboundMessage[*eth.BlockHeadersPacket66]]) UnregisterFunc + RegisterBlockBodiesObserver(observer MessageObserver[*DecodedInboundMessage[*eth.BlockBodiesPacket66]]) UnregisterFunc + RegisterPeerEventObserver(observer MessageObserver[*sentry.PeerEvent]) UnregisterFunc } func NewMessageListener( @@ -53,25 +54,27 @@ func newMessageListener( sentryClient: sentryClient, statusDataFactory: statusDataFactory, peerPenalizer: peerPenalizer, - newBlockObservers: polygoncommon.NewObservers[*DecodedInboundMessage[*eth.NewBlockPacket]](), - newBlockHashesObservers: polygoncommon.NewObservers[*DecodedInboundMessage[*eth.NewBlockHashesPacket]](), - blockHeadersObservers: polygoncommon.NewObservers[*DecodedInboundMessage[*eth.BlockHeadersPacket66]](), - blockBodiesObservers: polygoncommon.NewObservers[*DecodedInboundMessage[*eth.BlockBodiesPacket66]](), - peerEventObservers: polygoncommon.NewObservers[*sentry.PeerEvent](), + newBlockObservers: map[uint64]MessageObserver[*DecodedInboundMessage[*eth.NewBlockPacket]]{}, + newBlockHashesObservers: map[uint64]MessageObserver[*DecodedInboundMessage[*eth.NewBlockHashesPacket]]{}, + blockHeadersObservers: map[uint64]MessageObserver[*DecodedInboundMessage[*eth.BlockHeadersPacket66]]{}, + blockBodiesObservers: map[uint64]MessageObserver[*DecodedInboundMessage[*eth.BlockBodiesPacket66]]{}, + peerEventObservers: map[uint64]MessageObserver[*sentry.PeerEvent]{}, } } type messageListener struct { once sync.Once + observerIdSequence uint64 logger log.Logger sentryClient direct.SentryClient statusDataFactory sentrymulticlient.StatusDataFactory peerPenalizer PeerPenalizer - newBlockObservers *polygoncommon.Observers[*DecodedInboundMessage[*eth.NewBlockPacket]] - newBlockHashesObservers *polygoncommon.Observers[*DecodedInboundMessage[*eth.NewBlockHashesPacket]] - blockHeadersObservers *polygoncommon.Observers[*DecodedInboundMessage[*eth.BlockHeadersPacket66]] - blockBodiesObservers *polygoncommon.Observers[*DecodedInboundMessage[*eth.BlockBodiesPacket66]] - peerEventObservers *polygoncommon.Observers[*sentry.PeerEvent] + observersMu sync.Mutex + newBlockObservers map[uint64]MessageObserver[*DecodedInboundMessage[*eth.NewBlockPacket]] + newBlockHashesObservers map[uint64]MessageObserver[*DecodedInboundMessage[*eth.NewBlockHashesPacket]] + blockHeadersObservers map[uint64]MessageObserver[*DecodedInboundMessage[*eth.BlockHeadersPacket66]] + blockBodiesObservers map[uint64]MessageObserver[*DecodedInboundMessage[*eth.BlockBodiesPacket66]] + peerEventObservers map[uint64]MessageObserver[*sentry.PeerEvent] stopWg sync.WaitGroup } @@ -93,31 +96,33 @@ func (ml *messageListener) Run(ctx context.Context) { ml.stopWg.Wait() // unregister all observers - ml.newBlockObservers.Close() - ml.newBlockHashesObservers.Close() - ml.blockHeadersObservers.Close() - ml.blockBodiesObservers.Close() - ml.peerEventObservers.Close() + ml.observersMu.Lock() + defer ml.observersMu.Unlock() + ml.newBlockObservers = map[uint64]MessageObserver[*DecodedInboundMessage[*eth.NewBlockPacket]]{} + ml.newBlockHashesObservers = map[uint64]MessageObserver[*DecodedInboundMessage[*eth.NewBlockHashesPacket]]{} + ml.blockHeadersObservers = map[uint64]MessageObserver[*DecodedInboundMessage[*eth.BlockHeadersPacket66]]{} + ml.blockBodiesObservers = map[uint64]MessageObserver[*DecodedInboundMessage[*eth.BlockBodiesPacket66]]{} + ml.peerEventObservers = map[uint64]MessageObserver[*sentry.PeerEvent]{} } -func (ml *messageListener) RegisterNewBlockObserver(observer polygoncommon.Observer[*DecodedInboundMessage[*eth.NewBlockPacket]]) UnregisterFunc { - return ml.newBlockObservers.Register(observer) +func (ml *messageListener) RegisterNewBlockObserver(observer MessageObserver[*DecodedInboundMessage[*eth.NewBlockPacket]]) UnregisterFunc { + return registerObserver(ml, ml.newBlockObservers, observer) } -func (ml *messageListener) RegisterNewBlockHashesObserver(observer polygoncommon.Observer[*DecodedInboundMessage[*eth.NewBlockHashesPacket]]) UnregisterFunc { - return ml.newBlockHashesObservers.Register(observer) +func (ml *messageListener) RegisterNewBlockHashesObserver(observer MessageObserver[*DecodedInboundMessage[*eth.NewBlockHashesPacket]]) UnregisterFunc { + return registerObserver(ml, ml.newBlockHashesObservers, observer) } -func (ml *messageListener) RegisterBlockHeadersObserver(observer polygoncommon.Observer[*DecodedInboundMessage[*eth.BlockHeadersPacket66]]) UnregisterFunc { - return ml.blockHeadersObservers.Register(observer) +func (ml *messageListener) RegisterBlockHeadersObserver(observer MessageObserver[*DecodedInboundMessage[*eth.BlockHeadersPacket66]]) UnregisterFunc { + return registerObserver(ml, ml.blockHeadersObservers, observer) } -func (ml *messageListener) RegisterBlockBodiesObserver(observer polygoncommon.Observer[*DecodedInboundMessage[*eth.BlockBodiesPacket66]]) UnregisterFunc { - return ml.blockBodiesObservers.Register(observer) +func (ml *messageListener) RegisterBlockBodiesObserver(observer MessageObserver[*DecodedInboundMessage[*eth.BlockBodiesPacket66]]) UnregisterFunc { + return registerObserver(ml, ml.blockBodiesObservers, observer) } -func (ml *messageListener) RegisterPeerEventObserver(observer polygoncommon.Observer[*sentry.PeerEvent]) UnregisterFunc { - return ml.peerEventObservers.Register(observer) +func (ml *messageListener) RegisterPeerEventObserver(observer MessageObserver[*sentry.PeerEvent]) UnregisterFunc { + return registerObserver(ml, ml.peerEventObservers, observer) } func (ml *messageListener) listenInboundMessages(ctx context.Context) { @@ -135,6 +140,9 @@ func (ml *messageListener) listenInboundMessages(ctx context.Context) { } streamMessages(ctx, ml, "InboundMessages", streamFactory, func(message *sentry.InboundMessage) error { + ml.observersMu.Lock() + defer ml.observersMu.Unlock() + switch message.Id { case sentry.MessageId_NEW_BLOCK_66: return notifyInboundMessageObservers(ctx, ml.logger, ml.peerPenalizer, ml.newBlockObservers, message) @@ -159,12 +167,52 @@ func (ml *messageListener) listenPeerEvents(ctx context.Context) { } func (ml *messageListener) notifyPeerEventObservers(peerEvent *sentry.PeerEvent) error { + ml.observersMu.Lock() + defer ml.observersMu.Unlock() + // wait on all observers to finish processing the peer event before notifying them // with subsequent events in order to preserve the ordering of the sentry messages - ml.peerEventObservers.NotifySync(peerEvent) + var wg sync.WaitGroup + for _, observer := range ml.peerEventObservers { + wg.Add(1) + go func(observer MessageObserver[*sentry.PeerEvent]) { + defer wg.Done() + observer(peerEvent) + }(observer) + } + + wg.Wait() return nil } +func (ml *messageListener) nextObserverId() uint64 { + id := ml.observerIdSequence + ml.observerIdSequence++ + return id +} + +func registerObserver[TMessage any]( + ml *messageListener, + observers map[uint64]MessageObserver[*TMessage], + observer MessageObserver[*TMessage], +) UnregisterFunc { + ml.observersMu.Lock() + defer ml.observersMu.Unlock() + + observerId := ml.nextObserverId() + observers[observerId] = observer + return unregisterFunc(&ml.observersMu, observers, observerId) +} + +func unregisterFunc[TMessage any](mu *sync.Mutex, observers map[uint64]MessageObserver[TMessage], observerId uint64) UnregisterFunc { + return func() { + mu.Lock() + defer mu.Unlock() + + delete(observers, observerId) + } +} + func streamMessages[TMessage any]( ctx context.Context, ml *messageListener, @@ -195,7 +243,7 @@ func notifyInboundMessageObservers[TPacket any]( ctx context.Context, logger log.Logger, peerPenalizer PeerPenalizer, - observers *polygoncommon.Observers[*DecodedInboundMessage[TPacket]], + observers map[uint64]MessageObserver[*DecodedInboundMessage[TPacket]], message *sentry.InboundMessage, ) error { peerId := PeerIdFromH512(message.PeerId) @@ -213,16 +261,21 @@ func notifyInboundMessageObservers[TPacket any]( return err } - decodedMessage := DecodedInboundMessage[TPacket]{ + notifyObservers(observers, &DecodedInboundMessage[TPacket]{ InboundMessage: message, Decoded: decodedData, PeerId: peerId, - } - observers.Notify(&decodedMessage) + }) return nil } +func notifyObservers[TMessage any](observers map[uint64]MessageObserver[TMessage], message TMessage) { + for _, observer := range observers { + go observer(message) + } +} + func messageListenerLogPrefix(message string) string { return fmt.Sprintf("[p2p.message.listener] %s", message) } diff --git a/polygon/p2p/message_listener_test.go b/polygon/p2p/message_listener_test.go index dc55a209d61..601fd38fe47 100644 --- a/polygon/p2p/message_listener_test.go +++ b/polygon/p2p/message_listener_test.go @@ -416,7 +416,7 @@ func blockHeadersPacket66Bytes(t *testing.T, requestId uint64, headers []*types. func newMockNewBlockPacketBytes(t *testing.T) []byte { newBlockPacket := eth.NewBlockPacket{ - Block: types.NewBlock(newMockBlockHeaders(1)[0], nil, nil, nil, nil, nil), + Block: types.NewBlock(newMockBlockHeaders(1)[0], nil, nil, nil, nil), } newBlockPacketBytes, err := rlp.EncodeToBytes(&newBlockPacket) require.NoError(t, err) diff --git a/polygon/p2p/peer_tracker.go b/polygon/p2p/peer_tracker.go index 536e5383e8b..5fa490d879c 100644 --- a/polygon/p2p/peer_tracker.go +++ b/polygon/p2p/peer_tracker.go @@ -6,7 +6,6 @@ import ( "github.com/ledgerwatch/log/v3" sentry "github.com/ledgerwatch/erigon-lib/gointerfaces/sentryproto" - "github.com/ledgerwatch/erigon/polygon/polygoncommon" ) type PeerTracker interface { @@ -93,7 +92,7 @@ func (pt *peerTracker) updatePeerSyncProgress(peerId *PeerId, update func(psp *p update(peerSyncProgress) } -func NewPeerEventObserver(logger log.Logger, peerTracker PeerTracker) polygoncommon.Observer[*sentry.PeerEvent] { +func NewPeerEventObserver(logger log.Logger, peerTracker PeerTracker) MessageObserver[*sentry.PeerEvent] { return func(message *sentry.PeerEvent) { peerId := PeerIdFromH512(message.PeerId) diff --git a/polygon/p2p/service_mock.go b/polygon/p2p/service_mock.go index 7cd943b3a33..f06b58635c1 100644 --- a/polygon/p2p/service_mock.go +++ b/polygon/p2p/service_mock.go @@ -13,10 +13,9 @@ import ( context "context" reflect "reflect" - sentryproto "github.com/ledgerwatch/erigon-lib/gointerfaces/sentryproto" + sentry "github.com/ledgerwatch/erigon-lib/gointerfaces/sentryproto" types "github.com/ledgerwatch/erigon/core/types" eth "github.com/ledgerwatch/erigon/eth/protocols/eth" - polygoncommon "github.com/ledgerwatch/erigon/polygon/polygoncommon" gomock "go.uber.org/mock/gomock" ) @@ -419,7 +418,7 @@ func (c *MockServicePenalizeCall) DoAndReturn(f func(context.Context, *PeerId) e } // RegisterBlockBodiesObserver mocks base method. -func (m *MockService) RegisterBlockBodiesObserver(observer polygoncommon.Observer[*DecodedInboundMessage[*eth.BlockBodiesPacket66]]) UnregisterFunc { +func (m *MockService) RegisterBlockBodiesObserver(observer MessageObserver[*DecodedInboundMessage[*eth.BlockBodiesPacket66]]) UnregisterFunc { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RegisterBlockBodiesObserver", observer) ret0, _ := ret[0].(UnregisterFunc) @@ -445,19 +444,19 @@ func (c *MockServiceRegisterBlockBodiesObserverCall) Return(arg0 UnregisterFunc) } // Do rewrite *gomock.Call.Do -func (c *MockServiceRegisterBlockBodiesObserverCall) Do(f func(polygoncommon.Observer[*DecodedInboundMessage[*eth.BlockBodiesPacket66]]) UnregisterFunc) *MockServiceRegisterBlockBodiesObserverCall { +func (c *MockServiceRegisterBlockBodiesObserverCall) Do(f func(MessageObserver[*DecodedInboundMessage[*eth.BlockBodiesPacket66]]) UnregisterFunc) *MockServiceRegisterBlockBodiesObserverCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockServiceRegisterBlockBodiesObserverCall) DoAndReturn(f func(polygoncommon.Observer[*DecodedInboundMessage[*eth.BlockBodiesPacket66]]) UnregisterFunc) *MockServiceRegisterBlockBodiesObserverCall { +func (c *MockServiceRegisterBlockBodiesObserverCall) DoAndReturn(f func(MessageObserver[*DecodedInboundMessage[*eth.BlockBodiesPacket66]]) UnregisterFunc) *MockServiceRegisterBlockBodiesObserverCall { c.Call = c.Call.DoAndReturn(f) return c } // RegisterBlockHeadersObserver mocks base method. -func (m *MockService) RegisterBlockHeadersObserver(observer polygoncommon.Observer[*DecodedInboundMessage[*eth.BlockHeadersPacket66]]) UnregisterFunc { +func (m *MockService) RegisterBlockHeadersObserver(observer MessageObserver[*DecodedInboundMessage[*eth.BlockHeadersPacket66]]) UnregisterFunc { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RegisterBlockHeadersObserver", observer) ret0, _ := ret[0].(UnregisterFunc) @@ -483,19 +482,19 @@ func (c *MockServiceRegisterBlockHeadersObserverCall) Return(arg0 UnregisterFunc } // Do rewrite *gomock.Call.Do -func (c *MockServiceRegisterBlockHeadersObserverCall) Do(f func(polygoncommon.Observer[*DecodedInboundMessage[*eth.BlockHeadersPacket66]]) UnregisterFunc) *MockServiceRegisterBlockHeadersObserverCall { +func (c *MockServiceRegisterBlockHeadersObserverCall) Do(f func(MessageObserver[*DecodedInboundMessage[*eth.BlockHeadersPacket66]]) UnregisterFunc) *MockServiceRegisterBlockHeadersObserverCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockServiceRegisterBlockHeadersObserverCall) DoAndReturn(f func(polygoncommon.Observer[*DecodedInboundMessage[*eth.BlockHeadersPacket66]]) UnregisterFunc) *MockServiceRegisterBlockHeadersObserverCall { +func (c *MockServiceRegisterBlockHeadersObserverCall) DoAndReturn(f func(MessageObserver[*DecodedInboundMessage[*eth.BlockHeadersPacket66]]) UnregisterFunc) *MockServiceRegisterBlockHeadersObserverCall { c.Call = c.Call.DoAndReturn(f) return c } // RegisterNewBlockHashesObserver mocks base method. -func (m *MockService) RegisterNewBlockHashesObserver(observer polygoncommon.Observer[*DecodedInboundMessage[*eth.NewBlockHashesPacket]]) UnregisterFunc { +func (m *MockService) RegisterNewBlockHashesObserver(observer MessageObserver[*DecodedInboundMessage[*eth.NewBlockHashesPacket]]) UnregisterFunc { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RegisterNewBlockHashesObserver", observer) ret0, _ := ret[0].(UnregisterFunc) @@ -521,19 +520,19 @@ func (c *MockServiceRegisterNewBlockHashesObserverCall) Return(arg0 UnregisterFu } // Do rewrite *gomock.Call.Do -func (c *MockServiceRegisterNewBlockHashesObserverCall) Do(f func(polygoncommon.Observer[*DecodedInboundMessage[*eth.NewBlockHashesPacket]]) UnregisterFunc) *MockServiceRegisterNewBlockHashesObserverCall { +func (c *MockServiceRegisterNewBlockHashesObserverCall) Do(f func(MessageObserver[*DecodedInboundMessage[*eth.NewBlockHashesPacket]]) UnregisterFunc) *MockServiceRegisterNewBlockHashesObserverCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockServiceRegisterNewBlockHashesObserverCall) DoAndReturn(f func(polygoncommon.Observer[*DecodedInboundMessage[*eth.NewBlockHashesPacket]]) UnregisterFunc) *MockServiceRegisterNewBlockHashesObserverCall { +func (c *MockServiceRegisterNewBlockHashesObserverCall) DoAndReturn(f func(MessageObserver[*DecodedInboundMessage[*eth.NewBlockHashesPacket]]) UnregisterFunc) *MockServiceRegisterNewBlockHashesObserverCall { c.Call = c.Call.DoAndReturn(f) return c } // RegisterNewBlockObserver mocks base method. -func (m *MockService) RegisterNewBlockObserver(observer polygoncommon.Observer[*DecodedInboundMessage[*eth.NewBlockPacket]]) UnregisterFunc { +func (m *MockService) RegisterNewBlockObserver(observer MessageObserver[*DecodedInboundMessage[*eth.NewBlockPacket]]) UnregisterFunc { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RegisterNewBlockObserver", observer) ret0, _ := ret[0].(UnregisterFunc) @@ -559,19 +558,19 @@ func (c *MockServiceRegisterNewBlockObserverCall) Return(arg0 UnregisterFunc) *M } // Do rewrite *gomock.Call.Do -func (c *MockServiceRegisterNewBlockObserverCall) Do(f func(polygoncommon.Observer[*DecodedInboundMessage[*eth.NewBlockPacket]]) UnregisterFunc) *MockServiceRegisterNewBlockObserverCall { +func (c *MockServiceRegisterNewBlockObserverCall) Do(f func(MessageObserver[*DecodedInboundMessage[*eth.NewBlockPacket]]) UnregisterFunc) *MockServiceRegisterNewBlockObserverCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockServiceRegisterNewBlockObserverCall) DoAndReturn(f func(polygoncommon.Observer[*DecodedInboundMessage[*eth.NewBlockPacket]]) UnregisterFunc) *MockServiceRegisterNewBlockObserverCall { +func (c *MockServiceRegisterNewBlockObserverCall) DoAndReturn(f func(MessageObserver[*DecodedInboundMessage[*eth.NewBlockPacket]]) UnregisterFunc) *MockServiceRegisterNewBlockObserverCall { c.Call = c.Call.DoAndReturn(f) return c } // RegisterPeerEventObserver mocks base method. -func (m *MockService) RegisterPeerEventObserver(observer polygoncommon.Observer[*sentryproto.PeerEvent]) UnregisterFunc { +func (m *MockService) RegisterPeerEventObserver(observer MessageObserver[*sentry.PeerEvent]) UnregisterFunc { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RegisterPeerEventObserver", observer) ret0, _ := ret[0].(UnregisterFunc) @@ -597,13 +596,13 @@ func (c *MockServiceRegisterPeerEventObserverCall) Return(arg0 UnregisterFunc) * } // Do rewrite *gomock.Call.Do -func (c *MockServiceRegisterPeerEventObserverCall) Do(f func(polygoncommon.Observer[*sentryproto.PeerEvent]) UnregisterFunc) *MockServiceRegisterPeerEventObserverCall { +func (c *MockServiceRegisterPeerEventObserverCall) Do(f func(MessageObserver[*sentry.PeerEvent]) UnregisterFunc) *MockServiceRegisterPeerEventObserverCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockServiceRegisterPeerEventObserverCall) DoAndReturn(f func(polygoncommon.Observer[*sentryproto.PeerEvent]) UnregisterFunc) *MockServiceRegisterPeerEventObserverCall { +func (c *MockServiceRegisterPeerEventObserverCall) DoAndReturn(f func(MessageObserver[*sentry.PeerEvent]) UnregisterFunc) *MockServiceRegisterPeerEventObserverCall { c.Call = c.Call.DoAndReturn(f) return c } diff --git a/polygon/polygoncommon/event_notifier.go b/polygon/polygoncommon/event_notifier.go deleted file mode 100644 index d913b8247d3..00000000000 --- a/polygon/polygoncommon/event_notifier.go +++ /dev/null @@ -1,66 +0,0 @@ -package polygoncommon - -import ( - "context" - "sync" - "sync/atomic" -) - -// EventNotifier notifies waiters about an event. -// It supports a single "producer" and multiple waiters. -// A producer can set the event state to "signaled" or "non-signaled". -// Waiters can wait for the "signaled" event state. -type EventNotifier struct { - mutex sync.Mutex - cond *sync.Cond - hasEvent atomic.Bool -} - -func NewEventNotifier() *EventNotifier { - instance := &EventNotifier{} - instance.cond = sync.NewCond(&instance.mutex) - return instance -} - -// Reset to the "non-signaled" state. -func (en *EventNotifier) Reset() { - en.hasEvent.Store(false) -} - -// SetAndBroadcast sets the "signaled" state and notifies all waiters. -func (en *EventNotifier) SetAndBroadcast() { - en.hasEvent.Store(true) - en.cond.Broadcast() -} - -// Wait for the "signaled" state. -// If the event is already "signaled" it returns immediately. -func (en *EventNotifier) Wait(ctx context.Context) { - waitCtx, waitCancel := context.WithCancel(ctx) - defer waitCancel() - - var wg sync.WaitGroup - wg.Add(1) - - go func() { - defer wg.Done() - - en.mutex.Lock() - defer en.mutex.Unlock() - - for !en.hasEvent.Load() && (waitCtx.Err() == nil) { - en.cond.Wait() - } - waitCancel() - }() - - // wait for the waiting goroutine or the parent context to finish, whichever happens first - <-waitCtx.Done() - - // if the parent context is done, force the waiting goroutine to exit - // this might lead to spurious wake ups for other waiters, - // but it is ok due to the waiting loop conditions - en.cond.Broadcast() - - wg.Wait() -} diff --git a/polygon/polygoncommon/observers.go b/polygon/polygoncommon/observers.go deleted file mode 100644 index 53276785b40..00000000000 --- a/polygon/polygoncommon/observers.go +++ /dev/null @@ -1,79 +0,0 @@ -package polygoncommon - -import ( - "sync" -) - -type Observer[TEvent any] func(event TEvent) -type UnregisterFunc func() - -type Observers[TEvent any] struct { - observers map[uint64]Observer[TEvent] - observerIdSequence uint64 - observersMu sync.Mutex -} - -func NewObservers[TEvent any]() *Observers[TEvent] { - return &Observers[TEvent]{ - observers: map[uint64]Observer[TEvent]{}, - } -} - -func (o *Observers[TEvent]) nextObserverId() uint64 { - o.observerIdSequence++ - return o.observerIdSequence -} - -// Register an observer. Call the returned function to unregister it. -func (o *Observers[TEvent]) Register(observer Observer[TEvent]) UnregisterFunc { - o.observersMu.Lock() - defer o.observersMu.Unlock() - - observerId := o.nextObserverId() - o.observers[observerId] = observer - return o.unregisterFunc(observerId) -} - -func (o *Observers[TEvent]) unregisterFunc(observerId uint64) UnregisterFunc { - return func() { - o.observersMu.Lock() - defer o.observersMu.Unlock() - - delete(o.observers, observerId) - } -} - -// Close unregisters all observers. -func (o *Observers[TEvent]) Close() { - o.observersMu.Lock() - defer o.observersMu.Unlock() - - o.observers = map[uint64]Observer[TEvent]{} -} - -// Notify all observers in parallel without waiting for them to process the event. -func (o *Observers[TEvent]) Notify(event TEvent) { - o.observersMu.Lock() - defer o.observersMu.Unlock() - - for _, observer := range o.observers { - go observer(event) - } -} - -// NotifySync all observers in parallel and wait until all of them process the event. -func (o *Observers[TEvent]) NotifySync(event TEvent) { - o.observersMu.Lock() - defer o.observersMu.Unlock() - - var wg sync.WaitGroup - for _, observer := range o.observers { - wg.Add(1) - go func(observer Observer[TEvent]) { - defer wg.Done() - observer(event) - }(observer) - } - - wg.Wait() -} diff --git a/polygon/sync/service.go b/polygon/sync/service.go index 4ff910c0fb9..eb7bcce6826 100644 --- a/polygon/sync/service.go +++ b/polygon/sync/service.go @@ -2,7 +2,6 @@ package sync import ( "context" - "time" lru "github.com/hashicorp/golang-lru/arc/v2" "github.com/ledgerwatch/log/v3" @@ -10,7 +9,7 @@ import ( "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/direct" - "github.com/ledgerwatch/erigon-lib/gointerfaces/executionproto" + executionproto "github.com/ledgerwatch/erigon-lib/gointerfaces/executionproto" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/p2p/sentry" @@ -29,14 +28,11 @@ type service struct { p2pService p2p.Service store Store events *TipEvents - - heimdallScraper *heimdall.Scraper } func NewService( logger log.Logger, chainConfig *chain.Config, - tmpDir string, sentryClient direct.SentryClient, maxPeers int, statusDataProvider *sentry.StatusDataProvider, @@ -51,12 +47,6 @@ func NewService( p2pService := p2p.NewService(maxPeers, logger, sentryClient, statusDataProvider.GetStatusData) heimdallClient := heimdall.NewHeimdallClient(heimdallUrl, logger) heimdallService := heimdall.NewHeimdall(heimdallClient, logger) - heimdallScraper := heimdall.NewScraperTODO( - heimdallClient, - 1*time.Second, - tmpDir, - logger, - ) blockDownloader := NewBlockDownloader( logger, p2pService, @@ -105,8 +95,6 @@ func NewService( p2pService: p2pService, store: store, events: events, - - heimdallScraper: heimdallScraper, } } @@ -136,14 +124,6 @@ func (s *service) Run(ctx context.Context) error { } }() - go func() { - err := s.heimdallScraper.Run(ctx) - if (err != nil) && (ctx.Err() == nil) { - serviceErr = err - cancel() - } - }() - go func() { err := s.sync.Run(ctx) if (err != nil) && (ctx.Err() == nil) { diff --git a/polygon/tracer/bor_state_sync_txn_tracer.go b/polygon/tracer/bor_state_sync_txn_tracer.go index 1beea7c312e..bdaf72c8632 100644 --- a/polygon/tracer/bor_state_sync_txn_tracer.go +++ b/polygon/tracer/bor_state_sync_txn_tracer.go @@ -6,21 +6,39 @@ import ( "github.com/holiman/uint256" libcommon "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon/core/state" - "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/core/tracing" + "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/tracers" ) func NewBorStateSyncTxnTracer( - tracer vm.EVMLogger, + tracer *tracers.Tracer, stateSyncEventsCount int, stateReceiverContractAddress libcommon.Address, -) tracers.Tracer { - return &borStateSyncTxnTracer{ - EVMLogger: tracer, +) *tracers.Tracer { + l := &borStateSyncTxnTracer{ + Tracer: tracer, stateSyncEventsCount: stateSyncEventsCount, stateReceiverContractAddress: stateReceiverContractAddress, } + return &tracers.Tracer{ + Hooks: &tracing.Hooks{ + OnTxStart: l.OnTxStart, + OnTxEnd: l.OnTxEnd, + OnEnter: l.OnEnter, + OnExit: l.OnExit, + OnOpcode: l.OnOpcode, + OnFault: l.OnFault, + OnGasChange: l.OnGasChange, + OnBalanceChange: l.OnBalanceChange, + OnNonceChange: l.OnNonceChange, + OnCodeChange: l.OnCodeChange, + OnStorageChange: l.OnStorageChange, + OnLog: l.OnLog, + }, + GetResult: l.GetResult, + Stop: l.Stop, + } } // borStateSyncTxnTracer is a special tracer which is used only for tracing bor state sync transactions. Bor state sync @@ -33,45 +51,25 @@ func NewBorStateSyncTxnTracer( // state sync events at end of each sprint these are synthetically executed as if they were sub-calls of the // state sync events bor transaction. type borStateSyncTxnTracer struct { - vm.EVMLogger + Tracer *tracers.Tracer captureStartCalledOnce bool stateSyncEventsCount int stateReceiverContractAddress libcommon.Address } -func (bsstt *borStateSyncTxnTracer) CaptureTxStart(_ uint64) { - bsstt.EVMLogger.CaptureTxStart(0) -} - -func (bsstt *borStateSyncTxnTracer) CaptureTxEnd(_ uint64) { - bsstt.EVMLogger.CaptureTxEnd(0) +func (bsstt *borStateSyncTxnTracer) OnTxStart(env *tracing.VMContext, tx types.Transaction, from libcommon.Address) { + if bsstt.Tracer.OnTxStart != nil { + bsstt.Tracer.OnTxStart(env, tx, from) + } } -func (bsstt *borStateSyncTxnTracer) CaptureStart( - env *vm.EVM, - from libcommon.Address, - to libcommon.Address, - precompile bool, - create bool, - input []byte, - gas uint64, - value *uint256.Int, - code []byte, -) { - if !bsstt.captureStartCalledOnce { - // first event execution started - // perform a CaptureStart for the synthetic state sync transaction - from := state.SystemAddress - to := bsstt.stateReceiverContractAddress - bsstt.EVMLogger.CaptureStart(env, from, to, false, false, nil, 0, uint256.NewInt(0), nil) - bsstt.captureStartCalledOnce = true +func (bsstt *borStateSyncTxnTracer) OnTxEnd(receipt *types.Receipt, err error) { + if bsstt.Tracer.OnTxEnd != nil { + bsstt.Tracer.OnTxEnd(receipt, err) } - - // trick the tracer to think it is a CaptureEnter - bsstt.EVMLogger.CaptureEnter(vm.CALL, from, to, precompile, create, input, gas, value, code) } -func (bsstt *borStateSyncTxnTracer) CaptureEnd(output []byte, usedGas uint64, err error) { +func (bsstt *borStateSyncTxnTracer) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) { if bsstt.stateSyncEventsCount == 0 { // guard against unexpected use panic("unexpected extra call to borStateSyncTxnTracer.CaptureEnd") @@ -80,55 +78,96 @@ func (bsstt *borStateSyncTxnTracer) CaptureEnd(output []byte, usedGas uint64, er // finished executing 1 event bsstt.stateSyncEventsCount-- - // trick tracer to think it is a CaptureExit - bsstt.EVMLogger.CaptureExit(output, usedGas, err) + if bsstt.Tracer.OnExit != nil { + // trick tracer to think it is a CaptureExit + bsstt.Tracer.OnExit(depth, output, gasUsed, err, reverted) + } +} - if bsstt.stateSyncEventsCount == 0 { - // reached last event - // perform a CaptureEnd for the synthetic state sync transaction - bsstt.EVMLogger.CaptureEnd(nil, 0, nil) - } -} - -func (bsstt *borStateSyncTxnTracer) CaptureState( - pc uint64, - op vm.OpCode, - gas uint64, - cost uint64, - scope *vm.ScopeContext, - rData []byte, - depth int, - err error, -) { - // trick tracer to think it is 1 level deeper - bsstt.EVMLogger.CaptureState(pc, op, gas, cost, scope, rData, depth+1, err) -} - -func (bsstt *borStateSyncTxnTracer) CaptureFault( - pc uint64, - op vm.OpCode, - gas uint64, - cost uint64, - scope *vm.ScopeContext, - depth int, - err error, -) { - // trick tracer to think it is 1 level deeper - bsstt.EVMLogger.CaptureFault(pc, op, gas, cost, scope, depth+1, err) +func (bsstt *borStateSyncTxnTracer) OnEnter(depth int, typ byte, from libcommon.Address, to libcommon.Address, precompile bool, input []byte, gas uint64, value *uint256.Int, code []byte) { + if bsstt.Tracer.OnEnter != nil { + bsstt.Tracer.OnEnter(depth, typ, from, to, precompile, input, gas, value, code) + } +} + +func (bsstt *borStateSyncTxnTracer) OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) { + if bsstt.Tracer.OnOpcode != nil { + // trick tracer to think it is 1 level deeper + bsstt.Tracer.OnOpcode(pc, op, gas, cost, scope, rData, depth+1, err) + } +} + +func (bsstt *borStateSyncTxnTracer) OnFault(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, depth int, err error) { + if bsstt.Tracer.OnFault != nil { + // trick tracer to think it is 1 level deeper + bsstt.Tracer.OnFault(pc, op, gas, cost, scope, depth+1, err) + } } func (bsstt *borStateSyncTxnTracer) GetResult() (json.RawMessage, error) { - if tracer, ok := bsstt.EVMLogger.(tracers.Tracer); ok { - return tracer.GetResult() - } else { - panic("unexpected usage - borStateSyncTxnTracer.GetResult called on a wrapped tracer which does not support it") + if bsstt.Tracer.GetResult != nil { + return bsstt.Tracer.GetResult() } + return nil, nil } func (bsstt *borStateSyncTxnTracer) Stop(err error) { - if tracer, ok := bsstt.EVMLogger.(tracers.Tracer); ok { - tracer.Stop(err) - } else { - panic("unexpected usage - borStateSyncTxnTracer.Stop called on a wrapped tracer which does not support it") + if bsstt.Tracer.Stop != nil { + bsstt.Tracer.Stop(err) + } +} + +// OnGasChange is called when gas is either consumed or refunded. +func (bsstt *borStateSyncTxnTracer) OnGasChange(old, new uint64, reason tracing.GasChangeReason) { + if bsstt.Tracer.OnGasChange != nil { + bsstt.Tracer.OnGasChange(old, new, reason) + } +} + +func (bsstt *borStateSyncTxnTracer) OnBlockStart(event tracing.BlockEvent) { + if bsstt.Tracer.OnBlockStart != nil { + bsstt.Tracer.OnBlockStart(event) + } +} + +func (bsstt *borStateSyncTxnTracer) OnBlockEnd(err error) { + if bsstt.Tracer.OnBlockEnd != nil { + bsstt.Tracer.OnBlockEnd(err) + } +} + +func (bsstt *borStateSyncTxnTracer) OnGenesisBlock(b *types.Block, alloc types.GenesisAlloc) { + if bsstt.Tracer.OnGenesisBlock != nil { + bsstt.Tracer.OnGenesisBlock(b, alloc) + } +} + +func (bsstt *borStateSyncTxnTracer) OnBalanceChange(a libcommon.Address, prev, new *uint256.Int, reason tracing.BalanceChangeReason) { + if bsstt.Tracer.OnBalanceChange != nil { + bsstt.Tracer.OnBalanceChange(a, prev, new, reason) + } +} + +func (bsstt *borStateSyncTxnTracer) OnNonceChange(a libcommon.Address, prev, new uint64) { + if bsstt.Tracer.OnNonceChange != nil { + bsstt.Tracer.OnNonceChange(a, prev, new) + } +} + +func (bsstt *borStateSyncTxnTracer) OnCodeChange(a libcommon.Address, prevCodeHash libcommon.Hash, prev []byte, codeHash libcommon.Hash, code []byte) { + if bsstt.Tracer.OnCodeChange != nil { + bsstt.Tracer.OnCodeChange(a, prevCodeHash, prev, codeHash, code) + } +} + +func (bsstt *borStateSyncTxnTracer) OnStorageChange(a libcommon.Address, k *libcommon.Hash, prev, new uint256.Int) { + if bsstt.Tracer.OnStorageChange != nil { + bsstt.Tracer.OnStorageChange(a, k, prev, new) + } +} + +func (bsstt *borStateSyncTxnTracer) OnLog(log *types.Log) { + if bsstt.Tracer.OnLog != nil { + bsstt.Tracer.OnLog(log) } } diff --git a/polygon/tracer/trace_bor_state_sync_txn.go b/polygon/tracer/trace_bor_state_sync_txn.go index ad08ce6a9f9..45ca28e4286 100644 --- a/polygon/tracer/trace_bor_state_sync_txn.go +++ b/polygon/tracer/trace_bor_state_sync_txn.go @@ -17,6 +17,7 @@ import ( "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/core/vm/evmtypes" "github.com/ledgerwatch/erigon/eth/tracers" + tracerConfig "github.com/ledgerwatch/erigon/eth/tracers/config" "github.com/ledgerwatch/erigon/polygon/bor/borcfg" bortypes "github.com/ledgerwatch/erigon/polygon/bor/types" "github.com/ledgerwatch/erigon/rlp" @@ -28,7 +29,7 @@ func TraceBorStateSyncTxnDebugAPI( ctx context.Context, dbTx kv.Tx, chainConfig *chain.Config, - traceConfig *tracers.TraceConfig, + traceConfig *tracerConfig.TraceConfig, ibs *state.IntraBlockState, blockReader services.FullBlockReader, blockHash libcommon.Hash, @@ -75,6 +76,7 @@ func TraceBorStateSyncTxnTraceAPI( blockHash libcommon.Hash, blockNum uint64, blockTime uint64, + tracer *tracers.Tracer, ) (*core.ExecutionResult, error) { stateSyncEvents, err := blockReader.EventsByBlock(ctx, dbTx, blockHash, blockNum) if err != nil { @@ -83,7 +85,7 @@ func TraceBorStateSyncTxnTraceAPI( stateReceiverContract := libcommon.HexToAddress(chainConfig.Bor.(*borcfg.BorConfig).StateReceiverContract) if vmConfig.Tracer != nil { - vmConfig.Tracer = NewBorStateSyncTxnTracer(vmConfig.Tracer, len(stateSyncEvents), stateReceiverContract) + vmConfig.Tracer = NewBorStateSyncTxnTracer(tracer, len(stateSyncEvents), stateReceiverContract).Hooks } txCtx := initStateSyncTxContext(blockNum, blockHash) diff --git a/rlp/decode.go b/rlp/decode.go index 1c16d3fd3fe..4824946e558 100644 --- a/rlp/decode.go +++ b/rlp/decode.go @@ -672,11 +672,6 @@ func NewListStream(r io.Reader, len uint64) *Stream { return s } -// Remaining returns number of bytes remaining to be read -func (s *Stream) Remaining() uint64 { - return s.remaining -} - // Bytes reads an RLP string and returns its contents as a byte slice. // If the input does not contain an RLP string, the returned // error will be ErrExpectedString. diff --git a/tests/bor/helper/miner.go b/tests/bor/helper/miner.go index 9a1b4d61665..01708222a26 100644 --- a/tests/bor/helper/miner.go +++ b/tests/bor/helper/miner.go @@ -163,7 +163,7 @@ func InitMiner(ctx context.Context, genesis *types.Genesis, privKey *ecdsa.Priva ethCfg.DeprecatedTxPool.AccountSlots = 1000000 ethCfg.DeprecatedTxPool.GlobalSlots = 1000000 - ethBackend, err := eth.New(ctx, stack, ethCfg, logger) + ethBackend, err := eth.New(ctx, stack, ethCfg, logger, nil) if err != nil { return nil, nil, err } diff --git a/tests/state_test.go b/tests/state_test.go index 0b94123788a..4f0f549a4db 100644 --- a/tests/state_test.go +++ b/tests/state_test.go @@ -58,7 +58,7 @@ func TestState(t *testing.T) { //if ethconfig.EnableHistoryV3InTest { //} - db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) + _, db, _ := temporaltest.NewTestDB(t, datadir.New(t.TempDir())) st.walk(t, stateTestDir, func(t *testing.T, name string, test *StateTest) { for _, subtest := range test.Subtests() { subtest := subtest diff --git a/tests/state_test_util.go b/tests/state_test_util.go index 3e3992d84ac..2c85dd4cde9 100644 --- a/tests/state_test_util.go +++ b/tests/state_test_util.go @@ -43,6 +43,7 @@ import ( "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/crypto" @@ -186,7 +187,7 @@ func (t *StateTest) RunNoVerify(tx kv.RwTx, subtest StateSubtest, vmconfig vm.Co return nil, libcommon.Hash{}, UnsupportedForkError{subtest.Fork} } vmconfig.ExtraEips = eips - block, _, err := core.GenesisToBlock(t.genesis(config), "", log.Root()) + block, _, err := core.GenesisToBlock(t.genesis(config), "", log.Root(), nil) if err != nil { return nil, libcommon.Hash{}, UnsupportedForkError{subtest.Fork} } @@ -212,8 +213,8 @@ func (t *StateTest) RunNoVerify(tx kv.RwTx, subtest StateSubtest, vmconfig vm.Co defer domains.Close() txc.Doms = domains } - r = rpchelper.NewLatestStateReader(tx) - w = rpchelper.NewLatestStateWriter(txc, writeBlockNr) + r = rpchelper.NewLatestStateReader(tx, config3.EnableHistoryV4InTest) + w = rpchelper.NewLatestStateWriter(txc, writeBlockNr, config3.EnableHistoryV4InTest) statedb := state.New(r) var baseFee *big.Int @@ -327,7 +328,7 @@ func (t *StateTest) RunNoVerify(tx kv.RwTx, subtest StateSubtest, vmconfig vm.Co } func MakePreState(rules *chain.Rules, tx kv.RwTx, accounts types.GenesisAlloc, blockNr uint64, histV3 bool) (*state.IntraBlockState, error) { - r := rpchelper.NewLatestStateReader(tx) + r := rpchelper.NewLatestStateReader(tx, histV3) statedb := state.New(r) for addr, a := range accounts { statedb.SetCode(addr, a.Code) @@ -336,7 +337,7 @@ func MakePreState(rules *chain.Rules, tx kv.RwTx, accounts types.GenesisAlloc, b if a.Balance != nil { balance, _ = uint256.FromBig(a.Balance) } - statedb.SetBalance(addr, balance) + statedb.SetBalance(addr, balance, tracing.BalanceChangeUnspecified) for k, v := range a.Storage { key := k val := uint256.NewInt(0).SetBytes(v.Bytes()) @@ -368,7 +369,7 @@ func MakePreState(rules *chain.Rules, tx kv.RwTx, accounts types.GenesisAlloc, b defer domains.Flush(context2.Background(), tx) txc.Doms = domains } - w = rpchelper.NewLatestStateWriter(txc, blockNr-1) + w = rpchelper.NewLatestStateWriter(txc, blockNr-1, histV3) // Commit and re-open to start with a clean state. if err := statedb.FinalizeTx(rules, w); err != nil { diff --git a/tests/statedb_insert_chain_transaction_test.go b/tests/statedb_insert_chain_transaction_test.go index e4499feace1..63edc94e617 100644 --- a/tests/statedb_insert_chain_transaction_test.go +++ b/tests/statedb_insert_chain_transaction_test.go @@ -52,7 +52,7 @@ func TestInsertIncorrectStateRootDifferentAccounts(t *testing.T) { t.Fatal("roots are the same") } - incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[0].Transactions(), chain.Blocks[0].Uncles(), chain.Receipts[0], nil, nil) + incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[0].Transactions(), chain.Blocks[0].Uncles(), chain.Receipts[0], nil) incorrectChain := &core.ChainPack{Blocks: []*types.Block{incorrectBlock}, Headers: []*types.Header{&incorrectHeader}, TopBlock: incorrectBlock} if err = m.InsertChain(incorrectChain); err == nil { t.Fatal("should fail") @@ -119,7 +119,7 @@ func TestInsertIncorrectStateRootSameAccount(t *testing.T) { t.Fatal("roots are the same") } - incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[0].Transactions(), chain.Blocks[0].Uncles(), chain.Receipts[0], nil, nil) + incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[0].Transactions(), chain.Blocks[0].Uncles(), chain.Receipts[0], nil) incorrectChain := &core.ChainPack{Blocks: []*types.Block{incorrectBlock}, Headers: []*types.Header{&incorrectHeader}, TopBlock: incorrectBlock} if err = m.InsertChain(incorrectChain); err == nil { t.Fatal("should fail") @@ -181,7 +181,7 @@ func TestInsertIncorrectStateRootSameAccountSameAmount(t *testing.T) { incorrectHeader := *chain.Headers[0] // Copy header, not just pointer incorrectHeader.Root = chain.Headers[1].Root - incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[0].Transactions(), chain.Blocks[0].Uncles(), chain.Receipts[0], nil, nil) + incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[0].Transactions(), chain.Blocks[0].Uncles(), chain.Receipts[0], nil) incorrectChain := &core.ChainPack{Blocks: []*types.Block{incorrectBlock}, Headers: []*types.Header{&incorrectHeader}, TopBlock: incorrectBlock} if err = m.InsertChain(incorrectChain); err == nil { t.Fatal("should fail") @@ -243,7 +243,7 @@ func TestInsertIncorrectStateRootAllFundsRoot(t *testing.T) { incorrectHeader := *chain.Headers[0] // Copy header, not just pointer incorrectHeader.Root = chain.Headers[1].Root - incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[0].Transactions(), chain.Blocks[0].Uncles(), chain.Receipts[0], nil, nil) + incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[0].Transactions(), chain.Blocks[0].Uncles(), chain.Receipts[0], nil) incorrectChain := &core.ChainPack{Blocks: []*types.Block{incorrectBlock}, Headers: []*types.Header{&incorrectHeader}, TopBlock: incorrectBlock} if err = m.InsertChain(incorrectChain); err == nil { t.Fatal("should fail") @@ -304,7 +304,7 @@ func TestInsertIncorrectStateRootAllFunds(t *testing.T) { // BLOCK 1 incorrectHeader := *chain.Headers[0] // Copy header, not just pointer incorrectHeader.Root = chain.Headers[1].Root - incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[0].Transactions(), chain.Blocks[0].Uncles(), chain.Receipts[0], nil, nil) + incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[0].Transactions(), chain.Blocks[0].Uncles(), chain.Receipts[0], nil) incorrectChain := &core.ChainPack{Blocks: []*types.Block{incorrectBlock}, Headers: []*types.Header{&incorrectHeader}, TopBlock: incorrectBlock} if err = m.InsertChain(incorrectChain); err == nil { @@ -385,7 +385,7 @@ func TestAccountDeployIncorrectRoot(t *testing.T) { incorrectHeader := *chain.Headers[1] // Copy header, not just pointer incorrectHeader.Root = chain.Headers[0].Root - incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[1].Transactions(), chain.Blocks[1].Uncles(), chain.Receipts[1], nil, nil) + incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[1].Transactions(), chain.Blocks[1].Uncles(), chain.Receipts[1], nil) incorrectChain := &core.ChainPack{Blocks: []*types.Block{incorrectBlock}, Headers: []*types.Header{&incorrectHeader}, TopBlock: incorrectBlock} // BLOCK 2 - INCORRECT @@ -492,7 +492,7 @@ func TestAccountCreateIncorrectRoot(t *testing.T) { // BLOCK 3 - INCORRECT incorrectHeader := *chain.Headers[2] // Copy header, not just pointer incorrectHeader.Root = chain.Headers[1].Root - incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[2].Transactions(), chain.Blocks[2].Uncles(), chain.Receipts[2], nil, nil) + incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[2].Transactions(), chain.Blocks[2].Uncles(), chain.Receipts[2], nil) incorrectChain := &core.ChainPack{Blocks: []*types.Block{incorrectBlock}, Headers: []*types.Header{&incorrectHeader}, TopBlock: incorrectBlock} if err = m.InsertChain(incorrectChain); err == nil { @@ -581,7 +581,7 @@ func TestAccountUpdateIncorrectRoot(t *testing.T) { // BLOCK 4 - INCORRECT incorrectHeader := *chain.Headers[3] // Copy header, not just pointer incorrectHeader.Root = chain.Headers[1].Root - incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[3].Transactions(), chain.Blocks[3].Uncles(), chain.Receipts[3], nil, nil) + incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[3].Transactions(), chain.Blocks[3].Uncles(), chain.Receipts[3], nil) incorrectChain := &core.ChainPack{Blocks: []*types.Block{incorrectBlock}, Headers: []*types.Header{&incorrectHeader}, TopBlock: incorrectBlock} if err = m.InsertChain(incorrectChain); err == nil { @@ -669,7 +669,7 @@ func TestAccountDeleteIncorrectRoot(t *testing.T) { // BLOCK 4 - INCORRECT incorrectHeader := *chain.Headers[3] // Copy header, not just pointer incorrectHeader.Root = chain.Headers[1].Root - incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[3].Transactions(), chain.Blocks[3].Uncles(), chain.Receipts[3], nil, nil) + incorrectBlock := types.NewBlock(&incorrectHeader, chain.Blocks[3].Transactions(), chain.Blocks[3].Uncles(), chain.Receipts[3], nil) incorrectChain := &core.ChainPack{Blocks: []*types.Block{incorrectBlock}, Headers: []*types.Header{&incorrectHeader}, TopBlock: incorrectBlock} if err = m.InsertChain(incorrectChain); err == nil { t.Fatal("should fail") diff --git a/turbo/adapter/ethapi/api.go b/turbo/adapter/ethapi/api.go index f5b9bd0245b..3f2f84888c3 100644 --- a/turbo/adapter/ethapi/api.go +++ b/turbo/adapter/ethapi/api.go @@ -159,6 +159,68 @@ func (args *CallArgs) ToMessage(globalGasCap uint64, baseFee *uint256.Int) (type return msg, nil } +// ToTransaction converts CallArgs to the Transaction type used by the core evm +func (args *CallArgs) ToTransaction(globalGasCap uint64, baseFee *uint256.Int) (types.Transaction, error) { + chainID, overflow := uint256.FromBig((*big.Int)(args.ChainID)) + if overflow { + return nil, fmt.Errorf("chainId field caused an overflow (uint256)") + } + + msg, err := args.ToMessage(globalGasCap, baseFee) + if err != nil { + return nil, err + } + + var tx types.Transaction + switch { + case args.MaxFeePerGas != nil: + al := types2.AccessList{} + if args.AccessList != nil { + al = *args.AccessList + } + tx = &types.DynamicFeeTransaction{ + CommonTx: types.CommonTx{ + Nonce: msg.Nonce(), + Gas: msg.Gas(), + To: args.To, + Value: msg.Value(), + Data: msg.Data(), + }, + ChainID: chainID, + FeeCap: msg.FeeCap(), + Tip: msg.Tip(), + AccessList: al, + } + case args.AccessList != nil: + tx = &types.AccessListTx{ + LegacyTx: types.LegacyTx{ + CommonTx: types.CommonTx{ + Nonce: msg.Nonce(), + Gas: msg.Gas(), + To: args.To, + Value: msg.Value(), + Data: msg.Data(), + }, + GasPrice: msg.GasPrice(), + }, + ChainID: chainID, + AccessList: *args.AccessList, + } + default: + tx = &types.LegacyTx{ + CommonTx: types.CommonTx{ + Nonce: msg.Nonce(), + Gas: msg.Gas(), + To: args.To, + Value: msg.Value(), + Data: msg.Data(), + }, + GasPrice: msg.GasPrice(), + } + } + return tx, nil +} + // account indicates the overriding fields of account during the execution of // a message call. // Note, state and stateDiff can't be specified at the same time. If state is diff --git a/turbo/adapter/ethapi/state_overrides.go b/turbo/adapter/ethapi/state_overrides.go index 1ce2655acad..19c9467058e 100644 --- a/turbo/adapter/ethapi/state_overrides.go +++ b/turbo/adapter/ethapi/state_overrides.go @@ -8,6 +8,7 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/tracing" ) type StateOverrides map[libcommon.Address]Account @@ -29,7 +30,7 @@ func (overrides *StateOverrides) Override(state *state.IntraBlockState) error { if overflow { return fmt.Errorf("account.Balance higher than 2^256-1") } - state.SetBalance(addr, balance) + state.SetBalance(addr, balance, tracing.BalanceChangeUnspecified) } if account.State != nil && account.StateDiff != nil { return fmt.Errorf("account %s has both 'state' and 'stateDiff'", addr.Hex()) diff --git a/turbo/app/backup_cmd.go b/turbo/app/backup_cmd.go index 5d74ea84ff9..933a4f18da4 100644 --- a/turbo/app/backup_cmd.go +++ b/turbo/app/backup_cmd.go @@ -77,7 +77,7 @@ CloudDrives (and ssd) have bad-latency and good-parallel-throughput - then havin ) func doBackup(cliCtx *cli.Context) error { - logger, _, _, err := debug.Setup(cliCtx, true /* rootLogger */) + logger, _, _, _, err := debug.Setup(cliCtx, true /* rootLogger */) if err != nil { return err } diff --git a/turbo/app/import_cmd.go b/turbo/app/import_cmd.go index 8e65aabaa61..24a32d0ea7e 100644 --- a/turbo/app/import_cmd.go +++ b/turbo/app/import_cmd.go @@ -60,7 +60,7 @@ func importChain(cliCtx *cli.Context) error { if cliCtx.NArg() < 1 { utils.Fatalf("This command requires an argument.") } - logger, _, _, err := debug.Setup(cliCtx, true /* rootLogger */) + logger, tracer, _, _, err := debug.Setup(cliCtx, true /* rootLogger */) if err != nil { return err } @@ -71,7 +71,7 @@ func importChain(cliCtx *cli.Context) error { stack := makeConfigNode(cliCtx.Context, nodeCfg, logger) defer stack.Close() - ethereum, err := eth.New(cliCtx.Context, stack, ethCfg, logger) + ethereum, err := eth.New(cliCtx.Context, stack, ethCfg, logger, tracer) if err != nil { return err } diff --git a/turbo/app/init_cmd.go b/turbo/app/init_cmd.go index 497d0d83080..1db9ceb872e 100644 --- a/turbo/app/init_cmd.go +++ b/turbo/app/init_cmd.go @@ -4,7 +4,9 @@ import ( "encoding/json" "os" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/tracers" "github.com/ledgerwatch/erigon/turbo/debug" "github.com/ledgerwatch/log/v3" "github.com/urfave/cli/v2" @@ -36,8 +38,9 @@ It expects the genesis file as argument.`, // the zero'd block (i.e. genesis) or will fail hard if it can't succeed. func initGenesis(cliCtx *cli.Context) error { var logger log.Logger + var tracer *tracers.Tracer var err error - if logger, _, _, err = debug.Setup(cliCtx, true /* rootLogger */); err != nil { + if logger, tracer, _, _, err = debug.Setup(cliCtx, true /* rootLogger */); err != nil { return err } // Make sure we have a valid genesis JSON @@ -65,7 +68,14 @@ func initGenesis(cliCtx *cli.Context) error { if err != nil { utils.Fatalf("Failed to open database: %v", err) } - _, hash, err := core.CommitGenesisBlock(chaindb, genesis, "", logger) + var tracingHooks *tracing.Hooks + if tracer != nil { + tracingHooks = tracer.Hooks + if tracer.Hooks != nil && tracer.Hooks.OnBlockchainInit != nil { + tracer.Hooks.OnBlockchainInit(genesis.Config) + } + } + _, hash, err := core.CommitGenesisBlock(chaindb, genesis, "", logger, tracingHooks) if err != nil { utils.Fatalf("Failed to write genesis block: %v", err) } diff --git a/turbo/app/make_app.go b/turbo/app/make_app.go index a3e7af1a614..5a918bbb0f9 100644 --- a/turbo/app/make_app.go +++ b/turbo/app/make_app.go @@ -12,6 +12,7 @@ import ( "github.com/ledgerwatch/erigon-lib/common/datadir" "github.com/ledgerwatch/erigon/turbo/logging" enode "github.com/ledgerwatch/erigon/turbo/node" + "github.com/ledgerwatch/erigon/turbo/tracing" "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/node" @@ -75,6 +76,7 @@ func appFlags(cliFlags []cli.Flag) []cli.Flag { flags := append(cliFlags, debug.Flags...) // debug flags are required flags = append(flags, utils.MetricFlags...) flags = append(flags, logging.Flags...) + flags = append(flags, tracing.Flags...) flags = append(flags, &utils.ConfigFlag) // remove exact duplicate flags, keeping only the first one. this will allow easier composition later down the line diff --git a/turbo/app/snapshots_cmd.go b/turbo/app/snapshots_cmd.go index 9110e6c044e..7c99f26d15a 100644 --- a/turbo/app/snapshots_cmd.go +++ b/turbo/app/snapshots_cmd.go @@ -33,6 +33,7 @@ import ( "github.com/ledgerwatch/erigon-lib/common/dir" "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" "github.com/ledgerwatch/erigon-lib/metrics" @@ -48,6 +49,7 @@ import ( "github.com/ledgerwatch/erigon/eth/ethconfig/estimate" "github.com/ledgerwatch/erigon/eth/integrity" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/eth/tracers" "github.com/ledgerwatch/erigon/params" erigoncli "github.com/ledgerwatch/erigon/turbo/cli" "github.com/ledgerwatch/erigon/turbo/debug" @@ -70,7 +72,7 @@ var snapshotCommand = cli.Command{ Before: func(cliCtx *cli.Context) error { go mem.LogMemStats(cliCtx.Context, log.New()) go disk.UpdateDiskStats(cliCtx.Context, log.New()) - _, _, _, err := debug.Setup(cliCtx, true /* rootLogger */) + _, _, _, _, err := debug.Setup(cliCtx, true /* rootLogger */) if err != nil { return err } @@ -288,7 +290,7 @@ var ( ) func doBtSearch(cliCtx *cli.Context) error { - logger, _, _, err := debug.Setup(cliCtx, true /* root logger */) + logger, _, _, _, err := debug.Setup(cliCtx, true /* root logger */) if err != nil { return err } @@ -332,7 +334,7 @@ func doBtSearch(cliCtx *cli.Context) error { } func doDebugKey(cliCtx *cli.Context) error { - logger, _, _, err := debug.Setup(cliCtx, true /* root logger */) + logger, _, _, _, err := debug.Setup(cliCtx, true /* root logger */) if err != nil { return err } @@ -372,7 +374,7 @@ func doDebugKey(cliCtx *cli.Context) error { } func doIntegrity(cliCtx *cli.Context) error { - logger, _, _, err := debug.Setup(cliCtx, true /* root logger */) + logger, _, _, _, err := debug.Setup(cliCtx, true /* root logger */) if err != nil { return err } @@ -488,7 +490,7 @@ func doMeta(cliCtx *cli.Context) error { } func doDecompressSpeed(cliCtx *cli.Context) error { - logger, _, _, err := debug.Setup(cliCtx, true /* rootLogger */) + logger, _, _, _, err := debug.Setup(cliCtx, true /* rootLogger */) if err != nil { return err } @@ -528,7 +530,7 @@ func doDecompressSpeed(cliCtx *cli.Context) error { } func doIndicesCommand(cliCtx *cli.Context) error { - logger, _, _, err := debug.Setup(cliCtx, true /* rootLogger */) + logger, _, _, _, err := debug.Setup(cliCtx, true /* rootLogger */) if err != nil { return err } @@ -615,7 +617,7 @@ func openSnaps(ctx context.Context, cfg ethconfig.BlocksFreezing, dirs datadir.D } blockReader := freezeblocks.NewBlockReader(blockSnaps, borSnaps) - blockWriter := blockio.NewBlockWriter() + blockWriter := blockio.NewBlockWriter(fromdb.HistV3(chainDB)) blockSnapBuildSema := semaphore.NewWeighted(int64(dbg.BuildSnapshotAllowance)) agg.SetSnapshotBuildSema(blockSnapBuildSema) @@ -626,7 +628,7 @@ func openSnaps(ctx context.Context, cfg ethconfig.BlocksFreezing, dirs datadir.D func doUncompress(cliCtx *cli.Context) error { var logger log.Logger var err error - if logger, _, _, err = debug.Setup(cliCtx, true /* rootLogger */); err != nil { + if logger, _, _, _, err = debug.Setup(cliCtx, true /* rootLogger */); err != nil { return err } ctx := cliCtx.Context @@ -679,7 +681,7 @@ func doUncompress(cliCtx *cli.Context) error { func doCompress(cliCtx *cli.Context) error { var err error var logger log.Logger - if logger, _, _, err = debug.Setup(cliCtx, true /* rootLogger */); err != nil { + if logger, _, _, _, err = debug.Setup(cliCtx, true /* rootLogger */); err != nil { return err } ctx := cliCtx.Context @@ -727,7 +729,7 @@ func doCompress(cliCtx *cli.Context) error { return nil } func doRetireCommand(cliCtx *cli.Context) error { - logger, _, _, err := debug.Setup(cliCtx, true /* rootLogger */) + logger, _, _, _, err := debug.Setup(cliCtx, true /* rootLogger */) if err != nil { return err } @@ -782,7 +784,7 @@ func doRetireCommand(cliCtx *cli.Context) error { } logger.Info("Params", "from", from, "to", to, "every", every) - if err := br.RetireBlocks(ctx, 0, forwardProgress, log.LvlInfo, nil, nil, nil); err != nil { + if err := br.RetireBlocks(ctx, 0, forwardProgress, log.LvlInfo, nil, nil); err != nil { return err } @@ -809,21 +811,27 @@ func doRetireCommand(cliCtx *cli.Context) error { } } + if !kvcfg.HistoryV3.FromDB(db) { + return nil + } + db, err = temporal.New(db, agg) if err != nil { return err } logger.Info("Prune state history") - ac := agg.BeginFilesRo() - defer ac.Close() for hasMoreToPrune := true; hasMoreToPrune; { - hasMoreToPrune, err = ac.PruneSmallBatchesDb(ctx, 2*time.Minute, db) - if err != nil { + if err := db.UpdateNosync(ctx, func(tx kv.RwTx) error { + ac := agg.BeginFilesRo() + defer ac.Close() + + hasMoreToPrune, err = ac.PruneSmallBatches(ctx, 2*time.Minute, tx) + return err + }); err != nil { return err } } - ac.Close() logger.Info("Work on state history snapshots") indexWorkers := estimate.IndexSnapshot.Workers() @@ -870,16 +878,17 @@ func doRetireCommand(cliCtx *cli.Context) error { }); err != nil { return err } - - ac = agg.BeginFilesRo() - defer ac.Close() for hasMoreToPrune := true; hasMoreToPrune; { - hasMoreToPrune, err = ac.PruneSmallBatchesDb(context.Background(), 2*time.Minute, db) - if err != nil { + if err := db.UpdateNosync(ctx, func(tx kv.RwTx) error { + ac := agg.BeginFilesRo() + defer ac.Close() + + hasMoreToPrune, err = ac.PruneSmallBatches(context.Background(), 2*time.Minute, tx) + return err + }); err != nil { return err } } - ac.Close() if err = agg.MergeLoop(ctx); err != nil { return err @@ -911,11 +920,12 @@ func doRetireCommand(cliCtx *cli.Context) error { func doUploaderCommand(cliCtx *cli.Context) error { var logger log.Logger + var tracer *tracers.Tracer var err error var metricsMux *http.ServeMux var pprofMux *http.ServeMux - if logger, metricsMux, pprofMux, err = debug.Setup(cliCtx, true /* root logger */); err != nil { + if logger, tracer, metricsMux, pprofMux, err = debug.Setup(cliCtx, true /* root logger */); err != nil { return err } @@ -932,7 +942,7 @@ func doUploaderCommand(cliCtx *cli.Context) error { ethCfg := node.NewEthConfigUrfave(cliCtx, nodeCfg, logger) - ethNode, err := node.New(cliCtx.Context, nodeCfg, ethCfg, logger) + ethNode, err := node.New(cliCtx.Context, nodeCfg, ethCfg, logger, tracer) if err != nil { log.Error("Erigon startup", "err", err) return err diff --git a/turbo/app/support_cmd.go b/turbo/app/support_cmd.go index c2db45cdc71..7ca13688f0c 100644 --- a/turbo/app/support_cmd.go +++ b/turbo/app/support_cmd.go @@ -67,7 +67,7 @@ var supportCommand = cli.Command{ Usage: "Connect Erigon instance to a diagnostics system for support", ArgsUsage: "--diagnostics.addr --ids --metrics.urls ", Before: func(cliCtx *cli.Context) error { - _, _, _, err := debug.Setup(cliCtx, true /* rootLogger */) + _, _, _, _, err := debug.Setup(cliCtx, true /* rootLogger */) if err != nil { return err } diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index 162bae95cfd..b06965b0d9e 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -28,12 +28,10 @@ var DefaultFlags = []cli.Flag{ &utils.TxPoolTraceSendersFlag, &utils.TxPoolCommitEveryFlag, &PruneFlag, - &PruneBlocksFlag, &PruneHistoryFlag, &PruneReceiptFlag, &PruneTxIndexFlag, &PruneCallTracesFlag, - &PruneBlocksBeforeFlag, &PruneHistoryBeforeFlag, &PruneReceiptBeforeFlag, &PruneTxIndexBeforeFlag, diff --git a/turbo/cli/flags.go b/turbo/cli/flags.go index 7458a6d16ea..1f4f5b6e4fd 100644 --- a/turbo/cli/flags.go +++ b/turbo/cli/flags.go @@ -2,7 +2,6 @@ package cli import ( "fmt" - "math" "time" "github.com/ledgerwatch/erigon-lib/common/hexutil" @@ -77,10 +76,6 @@ var ( Example: --prune=htc`, Value: "disabled", } - PruneBlocksFlag = cli.Uint64Flag{ - Name: "prune.b.older", - Usage: `Prune data older than this number of blocks from the tip of the chain (if --prune flag has 'b', then default is 90K)`, - } PruneHistoryFlag = cli.Uint64Flag{ Name: "prune.h.older", Usage: `Prune data older than this number of blocks from the tip of the chain (if --prune flag has 'h', then default is 90K)`, @@ -114,10 +109,6 @@ var ( Name: "prune.c.before", Usage: `Prune data before this block`, } - PruneBlocksBeforeFlag = cli.Uint64Flag{ - Name: "prune.b.before", - Usage: `Prune data before this block`, - } ExperimentsFlag = cli.StringFlag{ Name: "experiments", @@ -267,15 +258,10 @@ func ApplyFlagsForEthConfig(ctx *cli.Context, cfg *ethconfig.Config, logger log. if cfg.Genesis != nil { chainId = cfg.Genesis.Config.ChainID.Uint64() } - minimal := ctx.String(PruneFlag.Name) == "minimal" - pruneFlagString := ctx.String(PruneFlag.Name) - if minimal { - pruneFlagString = "htrcb" - } + mode, err := prune.FromCli( chainId, - pruneFlagString, - ctx.Uint64(PruneBlocksFlag.Name), + ctx.String(PruneFlag.Name), ctx.Uint64(PruneHistoryFlag.Name), ctx.Uint64(PruneReceiptFlag.Name), ctx.Uint64(PruneTxIndexFlag.Name), @@ -284,13 +270,8 @@ func ApplyFlagsForEthConfig(ctx *cli.Context, cfg *ethconfig.Config, logger log. ctx.Uint64(PruneReceiptBeforeFlag.Name), ctx.Uint64(PruneTxIndexBeforeFlag.Name), ctx.Uint64(PruneCallTracesBeforeFlag.Name), - ctx.Uint64(PruneBlocksBeforeFlag.Name), libcommon.CliString2Array(ctx.String(ExperimentsFlag.Name)), ) - if err != nil { - utils.Fatalf(fmt.Sprintf("error while parsing mode: %v", err)) - } - if err != nil { utils.Fatalf(fmt.Sprintf("error while parsing mode: %v", err)) } @@ -312,15 +293,6 @@ func ApplyFlagsForEthConfig(ctx *cli.Context, cfg *ethconfig.Config, logger log. etl.BufferOptimalSize = *size } - if minimal { - // Prune them all. - cfg.Prune.Blocks = prune.Before(math.MaxUint64) - cfg.Prune.History = prune.Before(math.MaxUint64) - cfg.Prune.Receipts = prune.Before(math.MaxUint64) - cfg.Prune.TxIndex = prune.Before(math.MaxUint64) - cfg.Prune.CallTraces = prune.Before(math.MaxUint64) - } - cfg.StateStream = !ctx.Bool(StateStreamDisableFlag.Name) if ctx.String(BodyCacheLimitFlag.Name) != "" { err := cfg.Sync.BodyCacheLimit.UnmarshalText([]byte(ctx.String(BodyCacheLimitFlag.Name))) @@ -393,10 +365,7 @@ func ApplyFlagsForEthConfigCobra(f *pflag.FlagSet, cfg *ethconfig.Config) { if exp := f.StringSlice(ExperimentsFlag.Name, nil, ExperimentsFlag.Usage); exp != nil { experiments = *exp } - var exactB, exactH, exactR, exactT, exactC uint64 - if v := f.Uint64(PruneBlocksFlag.Name, PruneBlocksFlag.Value, PruneBlocksFlag.Usage); v != nil { - exactB = *v - } + var exactH, exactR, exactT, exactC uint64 if v := f.Uint64(PruneHistoryFlag.Name, PruneHistoryFlag.Value, PruneHistoryFlag.Usage); v != nil { exactH = *v } @@ -410,10 +379,7 @@ func ApplyFlagsForEthConfigCobra(f *pflag.FlagSet, cfg *ethconfig.Config) { exactC = *v } - var beforeB, beforeH, beforeR, beforeT, beforeC uint64 - if v := f.Uint64(PruneBlocksBeforeFlag.Name, PruneBlocksBeforeFlag.Value, PruneBlocksBeforeFlag.Usage); v != nil { - beforeB = *v - } + var beforeH, beforeR, beforeT, beforeC uint64 if v := f.Uint64(PruneHistoryBeforeFlag.Name, PruneHistoryBeforeFlag.Value, PruneHistoryBeforeFlag.Usage); v != nil { beforeH = *v } @@ -432,7 +398,7 @@ func ApplyFlagsForEthConfigCobra(f *pflag.FlagSet, cfg *ethconfig.Config) { chainId = cfg.Genesis.Config.ChainID.Uint64() } - mode, err := prune.FromCli(chainId, *v, exactB, exactH, exactR, exactT, exactC, beforeH, beforeR, beforeT, beforeC, beforeB, experiments) + mode, err := prune.FromCli(chainId, *v, exactH, exactR, exactT, exactC, beforeH, beforeR, beforeT, beforeC, experiments) if err != nil { utils.Fatalf(fmt.Sprintf("error while parsing mode: %v", err)) } diff --git a/turbo/debug/flags.go b/turbo/debug/flags.go index 421da286d9d..9a1189c99b8 100644 --- a/turbo/debug/flags.go +++ b/turbo/debug/flags.go @@ -35,7 +35,10 @@ import ( "gopkg.in/yaml.v2" "github.com/ledgerwatch/erigon/common/fdlimit" + "github.com/ledgerwatch/erigon/eth/tracers" + "github.com/ledgerwatch/erigon/turbo/logging" + "github.com/ledgerwatch/erigon/turbo/tracing" ) var ( @@ -184,7 +187,7 @@ func SetupCobra(cmd *cobra.Command, filePrefix string) log.Logger { // Setup initializes profiling and logging based on the CLI flags. // It should be called as early as possible in the program. -func Setup(ctx *cli.Context, rootLogger bool) (log.Logger, *http.ServeMux, *http.ServeMux, error) { +func Setup(ctx *cli.Context, rootLogger bool) (log.Logger, *tracers.Tracer, *http.ServeMux, *http.ServeMux, error) { // ensure we've read in config file details before setting up metrics etc. if err := SetFlagsFromConfigFile(ctx); err != nil { log.Warn("failed setting config flags from yaml/toml file", "err", err) @@ -193,16 +196,20 @@ func Setup(ctx *cli.Context, rootLogger bool) (log.Logger, *http.ServeMux, *http RaiseFdLimit() logger := logging.SetupLoggerCtx("erigon", ctx, log.LvlInfo, log.LvlInfo, rootLogger) + tracer, err := tracing.SetupTracerCtx(ctx) + if err != nil { + return logger, tracer, nil, nil, err + } if traceFile := ctx.String(traceFlag.Name); traceFile != "" { if err := Handler.StartGoTrace(traceFile); err != nil { - return logger, nil, nil, err + return logger, tracer, nil, nil, err } } if cpuFile := ctx.String(cpuprofileFlag.Name); cpuFile != "" { if err := Handler.StartCPUProfile(cpuFile); err != nil { - return logger, nil, nil, err + return logger, tracer, nil, nil, err } } pprofEnabled := ctx.Bool(pprofFlag.Name) @@ -226,11 +233,11 @@ func Setup(ctx *cli.Context, rootLogger bool) (log.Logger, *http.ServeMux, *http metricsMux = StartPProf(address, metricsMux) } else { pprofMux := StartPProf(address, nil) - return logger, metricsMux, pprofMux, nil + return logger, tracer, metricsMux, pprofMux, nil } } - return logger, metricsMux, nil, nil + return logger, tracer, metricsMux, nil, nil } func StartPProf(address string, metricsMux *http.ServeMux) *http.ServeMux { diff --git a/turbo/engineapi/engine_block_downloader/block_downloader.go b/turbo/engineapi/engine_block_downloader/block_downloader.go index 510c5cf4fa4..fd6d35a083a 100644 --- a/turbo/engineapi/engine_block_downloader/block_downloader.go +++ b/turbo/engineapi/engine_block_downloader/block_downloader.go @@ -126,25 +126,11 @@ func (e *EngineBlockDownloader) scheduleHeadersDownload( } // waitForEndOfHeadersDownload waits until the download of headers ends and returns the outcome. -func (e *EngineBlockDownloader) waitForEndOfHeadersDownload(ctx context.Context) (headerdownload.SyncStatus, error) { - ticker := time.NewTicker(10 * time.Millisecond) - defer ticker.Stop() - - logEvery := time.NewTicker(30 * time.Second) - defer logEvery.Stop() - - for { - select { - case <-ticker.C: - if e.hd.PosStatus() != headerdownload.Syncing { - return e.hd.PosStatus(), nil - } - case <-ctx.Done(): - return e.hd.PosStatus(), ctx.Err() - case <-logEvery.C: - e.logger.Info("[EngineBlockDownloader] Waiting for headers download to finish") - } +func (e *EngineBlockDownloader) waitForEndOfHeadersDownload() headerdownload.SyncStatus { + for e.hd.PosStatus() == headerdownload.Syncing { + time.Sleep(10 * time.Millisecond) } + return e.hd.PosStatus() } // waitForEndOfHeadersDownload waits until the download of headers ends and returns the outcome. @@ -264,7 +250,7 @@ func (e *EngineBlockDownloader) insertHeadersAndBodies(ctx context.Context, tx k if body == nil { return fmt.Errorf("missing body at block=%d", number) } - blocksBatch = append(blocksBatch, types.NewBlockFromStorage(hash, header, body.Transactions, body.Uncles, body.Withdrawals, body.Requests)) + blocksBatch = append(blocksBatch, types.NewBlockFromStorage(hash, header, body.Transactions, body.Uncles, body.Withdrawals)) if number%uint64(blockWrittenLogSize) == 0 { e.logger.Info("[insertHeadersAndBodies] Written blocks", "progress", number, "to", toBlock) } diff --git a/turbo/engineapi/engine_block_downloader/core.go b/turbo/engineapi/engine_block_downloader/core.go index ade141f3c6c..6f07321b762 100644 --- a/turbo/engineapi/engine_block_downloader/core.go +++ b/turbo/engineapi/engine_block_downloader/core.go @@ -22,12 +22,7 @@ func (e *EngineBlockDownloader) download(ctx context.Context, hashToDownload lib return } // see the outcome of header download - headersStatus, err := e.waitForEndOfHeadersDownload(ctx) - if err != nil { - e.logger.Warn("[EngineBlockDownloader] Could not finish headers download", "err", err) - e.status.Store(headerdownload.Idle) - return - } + headersStatus := e.waitForEndOfHeadersDownload() if headersStatus != headerdownload.Synced { // Could not sync. Set to idle diff --git a/turbo/engineapi/engine_helpers/fork_validator.go b/turbo/engineapi/engine_helpers/fork_validator.go index 2f152379bfd..18e580f8426 100644 --- a/turbo/engineapi/engine_helpers/fork_validator.go +++ b/turbo/engineapi/engine_helpers/fork_validator.go @@ -23,6 +23,7 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" "github.com/ledgerwatch/erigon-lib/kv/membatchwithdb" "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon-lib/wrap" @@ -159,16 +160,21 @@ func (fv *ForkValidator) ValidatePayload(tx kv.Tx, header *types.Header, body *t log.Debug("Execution ForkValidator.ValidatePayload", "extendCanonical", extendCanonical) if extendCanonical { + histV3, err := kvcfg.HistoryV3.Enabled(tx) + if err != nil { + return "", [32]byte{}, nil, err + } var txc wrap.TxContainer m := membatchwithdb.NewMemoryBatch(tx, fv.tmpDir, logger) defer m.Close() txc.Tx = m - var err error - txc.Doms, err = state.NewSharedDomains(tx, logger) - if err != nil { - return "", [32]byte{}, nil, err + if histV3 { + txc.Doms, err = state.NewSharedDomains(tx, logger) + if err != nil { + return "", [32]byte{}, nil, err + } + defer txc.Doms.Close() } - defer txc.Doms.Close() fv.extendingForkNotifications = &shards.Notifications{ Events: shards.NewEvents(), Accumulator: shards.NewAccumulator(), @@ -259,15 +265,21 @@ func (fv *ForkValidator) ValidatePayload(tx kv.Tx, header *types.Header, body *t unwindPoint = 0 } var txc wrap.TxContainer + histV3, err := kvcfg.HistoryV3.Enabled(tx) + if err != nil { + return "", [32]byte{}, nil, err + } batch := membatchwithdb.NewMemoryBatch(tx, fv.tmpDir, logger) defer batch.Rollback() txc.Tx = batch - sd, err := state.NewSharedDomains(tx, logger) - if err != nil { - return "", [32]byte{}, nil, err + if histV3 { + sd, err := state.NewSharedDomains(tx, logger) + if err != nil { + return "", [32]byte{}, nil, err + } + defer sd.Close() + txc.Doms = sd } - defer sd.Close() - txc.Doms = sd notifications := &shards.Notifications{ Events: shards.NewEvents(), Accumulator: shards.NewAccumulator(), diff --git a/turbo/engineapi/engine_server.go b/turbo/engineapi/engine_server.go index 328cff188b0..eb0fdde6b04 100644 --- a/turbo/engineapi/engine_server.go +++ b/turbo/engineapi/engine_server.go @@ -115,9 +115,9 @@ func (e *EngineServer) Start( } } -func (s *EngineServer) checkWithdrawalsPresence(time uint64, withdrawals types.Withdrawals) error { +func (s *EngineServer) checkWithdrawalsPresence(time uint64, withdrawals []*types.Withdrawal) error { if !s.config.IsShanghai(time) && withdrawals != nil { - return &rpc.InvalidParamsError{Message: "withdrawals before Shanghai"} + return &rpc.InvalidParamsError{Message: "withdrawals before shanghai"} } if s.config.IsShanghai(time) && withdrawals == nil { return &rpc.InvalidParamsError{Message: "missing withdrawals list"} @@ -125,16 +125,6 @@ func (s *EngineServer) checkWithdrawalsPresence(time uint64, withdrawals types.W return nil } -func (s *EngineServer) checkRequestsPresence(time uint64, requests types.Requests) error { - if !s.config.IsPrague(time) && requests != nil { - return &rpc.InvalidParamsError{Message: "requests before Prague"} - } - if s.config.IsPrague(time) && requests == nil { - return &rpc.InvalidParamsError{Message: "missing requests list"} - } - return nil -} - // EngineNewPayload validates and possibly executes payload func (s *EngineServer) newPayload(ctx context.Context, req *engine_types.ExecutionPayload, expectedBlobHashes []libcommon.Hash, parentBeaconBlockRoot *libcommon.Hash, version clparams.StateVersion, @@ -169,30 +159,19 @@ func (s *EngineServer) newPayload(ctx context.Context, req *engine_types.Executi ReceiptHash: req.ReceiptsRoot, TxHash: types.DeriveSha(types.BinaryTransactions(txs)), } - - var withdrawals types.Withdrawals + var withdrawals []*types.Withdrawal if version >= clparams.CapellaVersion { withdrawals = req.Withdrawals } - if err := s.checkWithdrawalsPresence(header.Time, withdrawals); err != nil { - return nil, err - } + if withdrawals != nil { - wh := types.DeriveSha(withdrawals) + wh := types.DeriveSha(types.Withdrawals(withdrawals)) header.WithdrawalsHash = &wh } - var requests types.Requests - if version >= clparams.ElectraVersion && req.DepositRequests != nil { - requests = req.DepositRequests.ToRequests() - } - if err := s.checkRequestsPresence(header.Time, requests); err != nil { + if err := s.checkWithdrawalsPresence(header.Time, withdrawals); err != nil { return nil, err } - if requests != nil { - rh := types.DeriveSha(requests) - header.RequestsRoot = &rh - } if version <= clparams.CapellaVersion { if req.BlobGasUsed != nil { @@ -281,7 +260,7 @@ func (s *EngineServer) newPayload(ctx context.Context, req *engine_types.Executi defer s.lock.Unlock() s.logger.Debug("[NewPayload] sending block", "height", header.Number, "hash", blockHash) - block := types.NewBlockFromStorage(blockHash, &header, transactions, nil /* uncles */, withdrawals, requests) + block := types.NewBlockFromStorage(blockHash, &header, transactions, nil /* uncles */, withdrawals) payloadStatus, err := s.HandleNewPayload(ctx, "NewPayload", block, expectedBlobHashes) if err != nil { @@ -638,14 +617,6 @@ func (e *EngineServer) GetPayloadV3(ctx context.Context, payloadID hexutility.By return e.getPayload(ctx, decodedPayloadId, clparams.DenebVersion) } -// Same as [GetPayloadV3], but returning ExecutionPayloadV4 (= ExecutionPayloadV3 + requests) -// See https://github.com/ethereum/execution-apis/blob/main/src/engine/prague.md#engine_getpayloadv4 -func (e *EngineServer) GetPayloadV4(ctx context.Context, payloadID hexutility.Bytes) (*engine_types.GetPayloadResponse, error) { - decodedPayloadId := binary.BigEndian.Uint64(payloadID) - e.logger.Info("Received GetPayloadV4", "payloadId", decodedPayloadId) - return e.getPayload(ctx, decodedPayloadId, clparams.ElectraVersion) -} - // Updates the forkchoice state after validating the headBlockHash // Additionally, builds and returns a unique identifier for an initial version of a payload // (asynchronously updated with transactions), if payloadAttributes is not nil and passes validation @@ -685,15 +656,6 @@ func (e *EngineServer) NewPayloadV3(ctx context.Context, payload *engine_types.E return e.newPayload(ctx, payload, expectedBlobHashes, parentBeaconBlockRoot, clparams.DenebVersion) } -// NewPayloadV4 processes new payloads (blocks) from the beacon chain with withdrawals, blob gas and requests. -// See https://github.com/ethereum/execution-apis/blob/main/src/engine/prague.md#engine_newpayloadv4 -func (e *EngineServer) NewPayloadV4(ctx context.Context, payload *engine_types.ExecutionPayload, - expectedBlobHashes []libcommon.Hash, parentBeaconBlockRoot *libcommon.Hash) (*engine_types.PayloadStatus, error) { - // TODO(racytech): add proper version or refactor this part - // add all version ralated checks here so the newpayload doesn't have to deal with checks - return e.newPayload(ctx, payload, expectedBlobHashes, parentBeaconBlockRoot, clparams.ElectraVersion) -} - // Receives consensus layer's transition configuration and checks if the execution layer has the correct configuration. // Can also be used to ping the execution layer (heartbeats). // See https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.1/src/engine/specification.md#engine_exchangetransitionconfigurationv1 @@ -748,11 +710,9 @@ var ourCapabilities = []string{ "engine_newPayloadV1", "engine_newPayloadV2", "engine_newPayloadV3", - "engine_newPayloadV4", "engine_getPayloadV1", "engine_getPayloadV2", "engine_getPayloadV3", - "engine_getPayloadV4", "engine_exchangeTransitionConfigurationV1", "engine_getPayloadBodiesByHashV1", "engine_getPayloadBodiesByRangeV1", diff --git a/turbo/engineapi/engine_types/jsonrpc.go b/turbo/engineapi/engine_types/jsonrpc.go index 2cf3334c8b9..1eb40d177bf 100644 --- a/turbo/engineapi/engine_types/jsonrpc.go +++ b/turbo/engineapi/engine_types/jsonrpc.go @@ -17,24 +17,23 @@ import ( // ExecutionPayload represents an execution payload (aka block) type ExecutionPayload struct { - ParentHash common.Hash `json:"parentHash" gencodec:"required"` - FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"` - StateRoot common.Hash `json:"stateRoot" gencodec:"required"` - ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"` - LogsBloom hexutility.Bytes `json:"logsBloom" gencodec:"required"` - PrevRandao common.Hash `json:"prevRandao" gencodec:"required"` - BlockNumber hexutil.Uint64 `json:"blockNumber" gencodec:"required"` - GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` - GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` - Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"` - ExtraData hexutility.Bytes `json:"extraData" gencodec:"required"` - BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` - BlockHash common.Hash `json:"blockHash" gencodec:"required"` - Transactions []hexutility.Bytes `json:"transactions" gencodec:"required"` - Withdrawals []*types.Withdrawal `json:"withdrawals"` - BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"` - ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"` - DepositRequests types.Deposits `json:"depositRequests"` // do not forget to add it into erigon-lib/gointerfaces/types if needed + ParentHash common.Hash `json:"parentHash" gencodec:"required"` + FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"` + StateRoot common.Hash `json:"stateRoot" gencodec:"required"` + ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"` + LogsBloom hexutility.Bytes `json:"logsBloom" gencodec:"required"` + PrevRandao common.Hash `json:"prevRandao" gencodec:"required"` + BlockNumber hexutil.Uint64 `json:"blockNumber" gencodec:"required"` + GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` + GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` + Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"` + ExtraData hexutility.Bytes `json:"extraData" gencodec:"required"` + BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` + BlockHash common.Hash `json:"blockHash" gencodec:"required"` + Transactions []hexutility.Bytes `json:"transactions" gencodec:"required"` + Withdrawals []*types.Withdrawal `json:"withdrawals"` + BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed"` + ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas"` } // PayloadAttributes represent the attributes required to start assembling a payload diff --git a/turbo/engineapi/interface.go b/turbo/engineapi/interface.go index ea26d7cd5d7..a17068b817b 100644 --- a/turbo/engineapi/interface.go +++ b/turbo/engineapi/interface.go @@ -14,14 +14,12 @@ type EngineAPI interface { NewPayloadV1(context.Context, *engine_types.ExecutionPayload) (*engine_types.PayloadStatus, error) NewPayloadV2(context.Context, *engine_types.ExecutionPayload) (*engine_types.PayloadStatus, error) NewPayloadV3(ctx context.Context, executionPayload *engine_types.ExecutionPayload, expectedBlobHashes []common.Hash, parentBeaconBlockRoot *common.Hash) (*engine_types.PayloadStatus, error) - NewPayloadV4(ctx context.Context, executionPayload *engine_types.ExecutionPayload, expectedBlobHashes []common.Hash, parentBeaconBlockRoot *common.Hash) (*engine_types.PayloadStatus, error) ForkchoiceUpdatedV1(ctx context.Context, forkChoiceState *engine_types.ForkChoiceState, payloadAttributes *engine_types.PayloadAttributes) (*engine_types.ForkChoiceUpdatedResponse, error) ForkchoiceUpdatedV2(ctx context.Context, forkChoiceState *engine_types.ForkChoiceState, payloadAttributes *engine_types.PayloadAttributes) (*engine_types.ForkChoiceUpdatedResponse, error) ForkchoiceUpdatedV3(ctx context.Context, forkChoiceState *engine_types.ForkChoiceState, payloadAttributes *engine_types.PayloadAttributes) (*engine_types.ForkChoiceUpdatedResponse, error) GetPayloadV1(ctx context.Context, payloadID hexutility.Bytes) (*engine_types.ExecutionPayload, error) GetPayloadV2(ctx context.Context, payloadID hexutility.Bytes) (*engine_types.GetPayloadResponse, error) GetPayloadV3(ctx context.Context, payloadID hexutility.Bytes) (*engine_types.GetPayloadResponse, error) - GetPayloadV4(ctx context.Context, payloadID hexutility.Bytes) (*engine_types.GetPayloadResponse, error) ExchangeTransitionConfigurationV1(ctx context.Context, transitionConfiguration *engine_types.TransitionConfiguration) (*engine_types.TransitionConfiguration, error) GetPayloadBodiesByHashV1(ctx context.Context, hashes []common.Hash) ([]*engine_types.ExecutionPayloadBodyV1, error) GetPayloadBodiesByRangeV1(ctx context.Context, start, count hexutil.Uint64) ([]*engine_types.ExecutionPayloadBodyV1, error) diff --git a/turbo/execution/eth1/block_building.go b/turbo/execution/eth1/block_building.go index 21460e5956f..0d0a7f95e16 100644 --- a/turbo/execution/eth1/block_building.go +++ b/turbo/execution/eth1/block_building.go @@ -42,6 +42,7 @@ func (e *EthereumExecutionModule) evictOldBuilders() { // Missing: NewPayload, AssembleBlock func (e *EthereumExecutionModule) AssembleBlock(ctx context.Context, req *execution.AssembleBlockRequest) (*execution.AssembleBlockResponse, error) { if !e.semaphore.TryAcquire(1) { + e.logger.Warn("ethereumExecutionModule.AssembleBlock: ExecutionStatus_Busy") return &execution.AssembleBlockResponse{ Id: 0, Busy: true, @@ -65,8 +66,6 @@ func (e *EthereumExecutionModule) AssembleBlock(ctx context.Context, req *execut param.ParentBeaconBlockRoot = &pbbr } - // TODO(racytech): add requests (Pectra) - // First check if we're already building a block with the requested parameters if e.lastParameters != nil { param.PayloadId = e.lastParameters.PayloadId @@ -110,6 +109,7 @@ func blockValue(br *types.BlockWithReceipts, baseFee *uint256.Int) *uint256.Int func (e *EthereumExecutionModule) GetAssembledBlock(ctx context.Context, req *execution.GetAssembledBlockRequest) (*execution.GetAssembledBlockResponse, error) { if !e.semaphore.TryAcquire(1) { + e.logger.Warn("ethereumExecutionModule.GetAssembledBlock: ExecutionStatus_Busy") return &execution.GetAssembledBlockResponse{ Busy: true, }, nil diff --git a/turbo/execution/eth1/eth1_chain_reader.go/chain_reader.go b/turbo/execution/eth1/eth1_chain_reader.go/chain_reader.go index 14b8b0db66f..5132f58b271 100644 --- a/turbo/execution/eth1/eth1_chain_reader.go/chain_reader.go +++ b/turbo/execution/eth1/eth1_chain_reader.go/chain_reader.go @@ -107,7 +107,7 @@ func (c ChainReaderWriterEth1) GetBlockByHash(ctx context.Context, hash libcommo log.Warn("[engine] GetBlockByHash", "err", err) return nil } - return types.NewBlock(header, txs, nil, nil, body.Withdrawals, body.Requests) + return types.NewBlock(header, txs, nil, nil, body.Withdrawals) } func (c ChainReaderWriterEth1) GetBlockByNumber(ctx context.Context, number uint64) *types.Block { @@ -136,7 +136,7 @@ func (c ChainReaderWriterEth1) GetBlockByNumber(ctx context.Context, number uint log.Warn("[engine] GetBlockByNumber", "err", err) return nil } - return types.NewBlock(header, txs, nil, nil, body.Withdrawals, body.Requests) + return types.NewBlock(header, txs, nil, nil, body.Withdrawals) } func (c ChainReaderWriterEth1) GetHeaderByHash(ctx context.Context, hash libcommon.Hash) *types.Header { diff --git a/turbo/execution/eth1/eth1_utils/grpc_test.go b/turbo/execution/eth1/eth1_utils/grpc_test.go index 3c593337c34..eeb684d5062 100644 --- a/turbo/execution/eth1/eth1_utils/grpc_test.go +++ b/turbo/execution/eth1/eth1_utils/grpc_test.go @@ -58,14 +58,7 @@ func makeBlock(txCount, uncleCount, withdrawalCount int) *types.Block { Amount: uint64(10 * i), } } - for i := range withdrawals { - withdrawals[i] = &types.Withdrawal{ - Index: uint64(i), - Validator: uint64(i), - Amount: uint64(10 * i), - } - } - return types.NewBlock(header, txs, uncles, receipts, withdrawals, nil) // TODO(racytech): add requests + return types.NewBlock(header, txs, uncles, receipts, withdrawals) } func TestBlockRpcConversion(t *testing.T) { diff --git a/turbo/execution/eth1/ethereum_execution.go b/turbo/execution/eth1/ethereum_execution.go index 06a8a4d1b1d..4d42e386830 100644 --- a/turbo/execution/eth1/ethereum_execution.go +++ b/turbo/execution/eth1/ethereum_execution.go @@ -58,8 +58,9 @@ type EthereumExecutionModule struct { stateChangeConsumer shards.StateChangeConsumer // configuration - config *chain.Config - syncCfg ethconfig.Sync + config *chain.Config + syncCfg ethconfig.Sync + historyV3 bool // consensus engine consensus.Engine @@ -72,7 +73,7 @@ func NewEthereumExecutionModule(blockReader services.FullBlockReader, db kv.RwDB hook *stages.Hook, accumulator *shards.Accumulator, stateChangeConsumer shards.StateChangeConsumer, logger log.Logger, engine consensus.Engine, - syncCfg ethconfig.Sync, + historyV3 bool, syncCfg ethconfig.Sync, ctx context.Context, ) *EthereumExecutionModule { return &EthereumExecutionModule{ @@ -90,6 +91,7 @@ func NewEthereumExecutionModule(blockReader services.FullBlockReader, db kv.RwDB stateChangeConsumer: stateChangeConsumer, engine: engine, + historyV3: historyV3, syncCfg: syncCfg, bacgroundCtx: ctx, } diff --git a/turbo/execution/eth1/forkchoice.go b/turbo/execution/eth1/forkchoice.go index 1e5d1b2fe06..44f8ef9393a 100644 --- a/turbo/execution/eth1/forkchoice.go +++ b/turbo/execution/eth1/forkchoice.go @@ -271,10 +271,12 @@ func (e *EthereumExecutionModule) updateForkChoice(ctx context.Context, original sendForkchoiceErrorWithoutWaiting(outcomeCh, err) return } - if err := rawdbv3.TxNums.Truncate(tx, currentParentNumber+1); err != nil { - //if err := rawdbv3.TxNums.Truncate(tx, fcuHeader.Number.Uint64()); err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) - return + if e.historyV3 { + if err := rawdbv3.TxNums.Truncate(tx, currentParentNumber+1); err != nil { + //if err := rawdbv3.TxNums.Truncate(tx, fcuHeader.Number.Uint64()); err != nil { + sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + return + } } // Mark all new canonicals as canonicals for _, canonicalSegment := range newCanonicals { @@ -303,15 +305,23 @@ func (e *EthereumExecutionModule) updateForkChoice(ctx context.Context, original return } } - if len(newCanonicals) > 0 { - if err := rawdbv3.TxNums.Truncate(tx, newCanonicals[0].number); err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) - return - } - if err := rawdb.AppendCanonicalTxNums(tx, newCanonicals[len(newCanonicals)-1].number); err != nil { - sendForkchoiceErrorWithoutWaiting(outcomeCh, err) - return + if e.historyV3 { + if len(newCanonicals) > 0 { + if err := rawdbv3.TxNums.Truncate(tx, newCanonicals[0].number); err != nil { + sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + return + } + if err := rawdb.AppendCanonicalTxNums(tx, newCanonicals[len(newCanonicals)-1].number); err != nil { + sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + return + } } + //} else { + //if err := rawdbv3.TxNums.Truncate(tx, currentParentNumber+1); err != nil { + // sendForkchoiceErrorWithoutWaiting(outcomeCh, err) + // return + //} + //} } } @@ -323,7 +333,7 @@ TooBigJumpStep: return } defer func() { - if tx != nil { + if tx == nil { tx.Rollback() } }() diff --git a/turbo/execution/eth1/inserters.go b/turbo/execution/eth1/inserters.go index 378c629ef8d..cfd6ea04024 100644 --- a/turbo/execution/eth1/inserters.go +++ b/turbo/execution/eth1/inserters.go @@ -48,13 +48,8 @@ func (e *EthereumExecutionModule) InsertBlocks(ctx context.Context, req *executi } defer tx.Rollback() e.forkValidator.ClearWithUnwind(e.accumulator, e.stateChangeConsumer) - frozenBlocks := e.blockReader.FrozenBlocks() for _, block := range req.Blocks { - // Skip frozen blocks. - if block.Header.BlockNumber < frozenBlocks { - continue - } header, err := eth1_utils.HeaderRpcToHeader(block.Header) if err != nil { return nil, fmt.Errorf("ethereumExecutionModule.InsertBlocks: cannot convert headers: %s", err) diff --git a/turbo/jsonrpc/debug_api.go b/turbo/jsonrpc/debug_api.go index b2814a36643..26e491d52af 100644 --- a/turbo/jsonrpc/debug_api.go +++ b/turbo/jsonrpc/debug_api.go @@ -13,15 +13,17 @@ import ( "github.com/ledgerwatch/erigon-lib/kv/order" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" + "github.com/ledgerwatch/erigon/common/changeset" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/eth/tracers" + tracerConfig "github.com/ledgerwatch/erigon/eth/tracers/config" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/adapter/ethapi" "github.com/ledgerwatch/erigon/turbo/rpchelper" + "github.com/ledgerwatch/erigon/turbo/transactions" ) // AccountRangeMaxResults is the maximum number of results to be returned per call @@ -30,13 +32,13 @@ const AccountRangeMaxResults = 256 // PrivateDebugAPI Exposed RPC endpoints for debugging use type PrivateDebugAPI interface { StorageRangeAt(ctx context.Context, blockHash common.Hash, txIndex uint64, contractAddress common.Address, keyStart hexutility.Bytes, maxResult int) (StorageRangeResult, error) - TraceTransaction(ctx context.Context, hash common.Hash, config *tracers.TraceConfig, stream *jsoniter.Stream) error - TraceBlockByHash(ctx context.Context, hash common.Hash, config *tracers.TraceConfig, stream *jsoniter.Stream) error - TraceBlockByNumber(ctx context.Context, number rpc.BlockNumber, config *tracers.TraceConfig, stream *jsoniter.Stream) error + TraceTransaction(ctx context.Context, hash common.Hash, config *tracerConfig.TraceConfig, stream *jsoniter.Stream) error + TraceBlockByHash(ctx context.Context, hash common.Hash, config *tracerConfig.TraceConfig, stream *jsoniter.Stream) error + TraceBlockByNumber(ctx context.Context, number rpc.BlockNumber, config *tracerConfig.TraceConfig, stream *jsoniter.Stream) error AccountRange(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash, start []byte, maxResults int, nocode, nostorage bool) (state.IteratorDump, error) GetModifiedAccountsByNumber(ctx context.Context, startNum rpc.BlockNumber, endNum *rpc.BlockNumber) ([]common.Address, error) GetModifiedAccountsByHash(ctx context.Context, startHash common.Hash, endHash *common.Hash) ([]common.Address, error) - TraceCall(ctx context.Context, args ethapi.CallArgs, blockNrOrHash rpc.BlockNumberOrHash, config *tracers.TraceConfig, stream *jsoniter.Stream) error + TraceCall(ctx context.Context, args ethapi.CallArgs, blockNrOrHash rpc.BlockNumberOrHash, config *tracerConfig.TraceConfig, stream *jsoniter.Stream) error AccountAt(ctx context.Context, blockHash common.Hash, txIndex uint64, account common.Address) (*AccountResult, error) GetRawHeader(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (hexutility.Bytes, error) GetRawBlock(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (hexutility.Bytes, error) @@ -66,15 +68,37 @@ func (api *PrivateDebugAPIImpl) StorageRangeAt(ctx context.Context, blockHash co } defer tx.Rollback() - number := rawdb.ReadHeaderNumber(tx, blockHash) - if number == nil { - return StorageRangeResult{}, fmt.Errorf("block not found") + chainConfig, err := api.chainConfig(ctx, tx) + if err != nil { + return StorageRangeResult{}, err + } + engine := api.engine() + + if api.historyV3(tx) { + number := rawdb.ReadHeaderNumber(tx, blockHash) + if number == nil { + return StorageRangeResult{}, fmt.Errorf("block not found") + } + minTxNum, err := rawdbv3.TxNums.Min(tx, *number) + if err != nil { + return StorageRangeResult{}, err + } + return storageRangeAtV3(tx.(kv.TemporalTx), contractAddress, keyStart, minTxNum+txIndex, maxResult) + } + + block, err := api.blockByHashWithSenders(ctx, tx, blockHash) + if err != nil { + return StorageRangeResult{}, err + } + if block == nil { + return StorageRangeResult{}, nil } - minTxNum, err := rawdbv3.TxNums.Min(tx, *number) + + _, _, _, _, stateReader, err := transactions.ComputeTxEnv(ctx, engine, block, chainConfig, api._blockReader, tx, int(txIndex), api.historyV3(tx)) if err != nil { return StorageRangeResult{}, err } - return storageRangeAtV3(tx.(kv.TemporalTx), contractAddress, keyStart, minTxNum+txIndex, maxResult) + return storageRangeAt(stateReader.(*state.PlainState), contractAddress, keyStart, maxResult) } // AccountRange implements debug_accountRange. Returns a range of accounts involved in the given block rangeb @@ -117,7 +141,7 @@ func (api *PrivateDebugAPIImpl) AccountRange(ctx context.Context, blockNrOrHash maxResults = AccountRangeMaxResults } - dumper := state.NewDumper(tx, blockNumber, true) + dumper := state.NewDumper(tx, blockNumber, api.historyV3(tx)) res, err := dumper.IteratorDump(excludeCode, excludeStorage, common.BytesToAddress(startKey), maxResults) if err != nil { return state.IteratorDump{}, err @@ -170,15 +194,18 @@ func (api *PrivateDebugAPIImpl) GetModifiedAccountsByNumber(ctx context.Context, } //[from, to) - startTxNum, err := rawdbv3.TxNums.Min(tx, startNum) - if err != nil { - return nil, err - } - endTxNum, err := rawdbv3.TxNums.Max(tx, endNum-1) - if err != nil { - return nil, err + if api.historyV3(tx) { + startTxNum, err := rawdbv3.TxNums.Min(tx, startNum) + if err != nil { + return nil, err + } + endTxNum, err := rawdbv3.TxNums.Max(tx, endNum-1) + if err != nil { + return nil, err + } + return getModifiedAccountsV3(tx.(kv.TemporalTx), startTxNum, endTxNum) } - return getModifiedAccountsV3(tx.(kv.TemporalTx), startTxNum, endTxNum) + return changeset.GetModifiedAccounts(tx, startNum, endNum) } // getModifiedAccountsV3 returns a list of addresses that were modified in the block range @@ -247,15 +274,18 @@ func (api *PrivateDebugAPIImpl) GetModifiedAccountsByHash(ctx context.Context, s } //[from, to) - startTxNum, err := rawdbv3.TxNums.Min(tx, startNum) - if err != nil { - return nil, err - } - endTxNum, err := rawdbv3.TxNums.Max(tx, endNum-1) - if err != nil { - return nil, err + if api.historyV3(tx) { + startTxNum, err := rawdbv3.TxNums.Min(tx, startNum) + if err != nil { + return nil, err + } + endTxNum, err := rawdbv3.TxNums.Max(tx, endNum-1) + if err != nil { + return nil, err + } + return getModifiedAccountsV3(tx.(kv.TemporalTx), startTxNum, endTxNum) } - return getModifiedAccountsV3(tx.(kv.TemporalTx), startTxNum, endTxNum) + return changeset.GetModifiedAccounts(tx, startNum, endNum) } func (api *PrivateDebugAPIImpl) AccountAt(ctx context.Context, blockHash common.Hash, txIndex uint64, address common.Address) (*AccountResult, error) { @@ -265,43 +295,69 @@ func (api *PrivateDebugAPIImpl) AccountAt(ctx context.Context, blockHash common. } defer tx.Rollback() - number := rawdb.ReadHeaderNumber(tx, blockHash) - if number == nil { - return nil, nil - } - canonicalHash, _ := api._blockReader.CanonicalHash(ctx, tx, *number) - isCanonical := canonicalHash == blockHash - if !isCanonical { - return nil, fmt.Errorf("block hash is not canonical") + if api.historyV3(tx) { + number := rawdb.ReadHeaderNumber(tx, blockHash) + if number == nil { + return nil, nil + } + canonicalHash, _ := api._blockReader.CanonicalHash(ctx, tx, *number) + isCanonical := canonicalHash == blockHash + if !isCanonical { + return nil, fmt.Errorf("block hash is not canonical") + } + + minTxNum, err := rawdbv3.TxNums.Min(tx, *number) + if err != nil { + return nil, err + } + ttx := tx.(kv.TemporalTx) + v, ok, err := ttx.DomainGetAsOf(kv.AccountsDomain, address[:], nil, minTxNum+txIndex+1) + if err != nil { + return nil, err + } + if !ok || len(v) == 0 { + return &AccountResult{}, nil + } + + var a accounts.Account + if err := accounts.DeserialiseV3(&a, v); err != nil { + return nil, err + } + result := &AccountResult{} + result.Balance.ToInt().Set(a.Balance.ToBig()) + result.Nonce = hexutil.Uint64(a.Nonce) + result.CodeHash = a.CodeHash + + code, _, err := ttx.DomainGetAsOf(kv.CodeDomain, address[:], nil, minTxNum+txIndex) + if err != nil { + return nil, err + } + result.Code = code + return result, nil } - minTxNum, err := rawdbv3.TxNums.Min(tx, *number) + chainConfig, err := api.chainConfig(ctx, tx) if err != nil { return nil, err } - ttx := tx.(kv.TemporalTx) - v, ok, err := ttx.DomainGetAsOf(kv.AccountsDomain, address[:], nil, minTxNum+txIndex+1) + engine := api.engine() + + block, err := api.blockByHashWithSenders(ctx, tx, blockHash) if err != nil { return nil, err } - if !ok || len(v) == 0 { - return &AccountResult{}, nil - } - - var a accounts.Account - if err := accounts.DeserialiseV3(&a, v); err != nil { - return nil, err + if block == nil { + return nil, nil } - result := &AccountResult{} - result.Balance.ToInt().Set(a.Balance.ToBig()) - result.Nonce = hexutil.Uint64(a.Nonce) - result.CodeHash = a.CodeHash - - code, _, err := ttx.DomainGetAsOf(kv.CodeDomain, address[:], nil, minTxNum+txIndex) + _, _, _, ibs, _, err := transactions.ComputeTxEnv(ctx, engine, block, chainConfig, api._blockReader, tx, int(txIndex), api.historyV3(tx)) if err != nil { return nil, err } - result.Code = code + result := &AccountResult{} + result.Balance.ToInt().Set(ibs.GetBalance(address).ToBig()) + result.Nonce = hexutil.Uint64(ibs.GetNonce(address)) + result.Code = ibs.GetCode(address) + result.CodeHash = ibs.GetCodeHash(address) return result, nil } diff --git a/turbo/jsonrpc/debug_api_test.go b/turbo/jsonrpc/debug_api_test.go index 65891439dde..5bfd988e75f 100644 --- a/turbo/jsonrpc/debug_api_test.go +++ b/turbo/jsonrpc/debug_api_test.go @@ -16,7 +16,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcdaemontest" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/eth/tracers" + tracerConfig "github.com/ledgerwatch/erigon/eth/tracers/config" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/rpc/rpccfg" "github.com/ledgerwatch/erigon/turbo/adapter/ethapi" @@ -66,7 +66,7 @@ func TestTraceBlockByNumber(t *testing.T) { if err != nil { t.Errorf("traceBlock %s: %v", tt.txHash, err) } - err = api.TraceBlockByNumber(m.Ctx, rpc.BlockNumber(tx.BlockNumber.ToInt().Uint64()), &tracers.TraceConfig{}, stream) + err = api.TraceBlockByNumber(m.Ctx, rpc.BlockNumber(tx.BlockNumber.ToInt().Uint64()), &tracerConfig.TraceConfig{}, stream) if err != nil { t.Errorf("traceBlock %s: %v", tt.txHash, err) } @@ -83,7 +83,7 @@ func TestTraceBlockByNumber(t *testing.T) { } var buf bytes.Buffer stream := jsoniter.NewStream(jsoniter.ConfigDefault, &buf, 4096) - err := api.TraceBlockByNumber(m.Ctx, rpc.LatestBlockNumber, &tracers.TraceConfig{}, stream) + err := api.TraceBlockByNumber(m.Ctx, rpc.LatestBlockNumber, &tracerConfig.TraceConfig{}, stream) if err != nil { t.Errorf("traceBlock %v: %v", rpc.LatestBlockNumber, err) } @@ -111,7 +111,7 @@ func TestTraceBlockByHash(t *testing.T) { if err != nil { t.Errorf("traceBlock %s: %v", tt.txHash, err) } - err = api.TraceBlockByHash(m.Ctx, *tx.BlockHash, &tracers.TraceConfig{}, stream) + err = api.TraceBlockByHash(m.Ctx, *tx.BlockHash, &tracerConfig.TraceConfig{}, stream) if err != nil { t.Errorf("traceBlock %s: %v", tt.txHash, err) } @@ -134,7 +134,7 @@ func TestTraceTransaction(t *testing.T) { for _, tt := range debugTraceTransactionTests { var buf bytes.Buffer stream := jsoniter.NewStream(jsoniter.ConfigDefault, &buf, 4096) - err := api.TraceTransaction(m.Ctx, common.HexToHash(tt.txHash), &tracers.TraceConfig{}, stream) + err := api.TraceTransaction(m.Ctx, common.HexToHash(tt.txHash), &tracerConfig.TraceConfig{}, stream) if err != nil { t.Errorf("traceTransaction %s: %v", tt.txHash, err) } @@ -164,7 +164,7 @@ func TestTraceTransactionNoRefund(t *testing.T) { var buf bytes.Buffer stream := jsoniter.NewStream(jsoniter.ConfigDefault, &buf, 4096) var norefunds = true - err := api.TraceTransaction(m.Ctx, common.HexToHash(tt.txHash), &tracers.TraceConfig{NoRefunds: &norefunds}, stream) + err := api.TraceTransaction(m.Ctx, common.HexToHash(tt.txHash), &tracerConfig.TraceConfig{NoRefunds: &norefunds}, stream) if err != nil { t.Errorf("traceTransaction %s: %v", tt.txHash, err) } diff --git a/turbo/jsonrpc/erigon_block.go b/turbo/jsonrpc/erigon_block.go index b545d8e30b9..4e983520519 100644 --- a/turbo/jsonrpc/erigon_block.go +++ b/turbo/jsonrpc/erigon_block.go @@ -1,6 +1,7 @@ package jsonrpc import ( + "bytes" "context" "errors" "fmt" @@ -10,9 +11,11 @@ import ( "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/order" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" + "github.com/ledgerwatch/erigon-lib/kv/temporal/historyv2" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/core/rawdb" @@ -207,7 +210,7 @@ func (api *ErigonImpl) GetBalanceChangesInBlock(ctx context.Context, blockNrOrHa defer tx.Rollback() balancesMapping := make(map[common.Address]*hexutil.Big) - latestState, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, "") + latestState, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, api.historyV3(tx), "") if err != nil { return nil, err } @@ -217,27 +220,70 @@ func (api *ErigonImpl) GetBalanceChangesInBlock(ctx context.Context, blockNrOrHa return nil, err } - minTxNum, _ := rawdbv3.TxNums.Min(tx, blockNumber) - it, err := tx.(kv.TemporalTx).HistoryRange(kv.AccountsHistory, int(minTxNum), -1, order.Asc, -1) + if api.historyV3(tx) { + minTxNum, _ := rawdbv3.TxNums.Min(tx, blockNumber) + it, err := tx.(kv.TemporalTx).HistoryRange(kv.AccountsHistory, int(minTxNum), -1, order.Asc, -1) + if err != nil { + return nil, err + } + defer it.Close() + for it.HasNext() { + addressBytes, v, err := it.Next() + if err != nil { + return nil, err + } + + var oldAcc accounts.Account + if len(v) > 0 { + if err = accounts.DeserialiseV3(&oldAcc, v); err != nil { + return nil, err + } + } + oldBalance := oldAcc.Balance + + address := common.BytesToAddress(addressBytes) + newAcc, err := latestState.ReadAccountData(address) + if err != nil { + return nil, err + } + + newBalance := uint256.NewInt(0) + if newAcc != nil { + newBalance = &newAcc.Balance + } + + if !oldBalance.Eq(newBalance) { + newBalanceDesc := (*hexutil.Big)(newBalance.ToBig()) + balancesMapping[address] = newBalanceDesc + } + } + } + + c, err := tx.Cursor(kv.AccountChangeSet) if err != nil { return nil, err } - defer it.Close() - for it.HasNext() { - addressBytes, v, err := it.Next() + defer c.Close() + + startkey := hexutility.EncodeTs(blockNumber) + + decodeFn := historyv2.Mapper[kv.AccountChangeSet].Decode + + for dbKey, dbValue, err := c.Seek(startkey); bytes.Equal(dbKey, startkey) && dbKey != nil; dbKey, dbValue, err = c.Next() { + if err != nil { + return nil, err + } + _, addressBytes, v, err := decodeFn(dbKey, dbValue) if err != nil { return nil, err } - var oldAcc accounts.Account - if len(v) > 0 { - if err = accounts.DeserialiseV3(&oldAcc, v); err != nil { - return nil, err - } + if err = oldAcc.DecodeForStorage(v); err != nil { + return nil, err } oldBalance := oldAcc.Balance - address := common.BytesToAddress(addressBytes) + newAcc, err := latestState.ReadAccountData(address) if err != nil { return nil, err diff --git a/turbo/jsonrpc/eth_accounts.go b/turbo/jsonrpc/eth_accounts.go index 284c16f2bd2..3d5d86c9b84 100644 --- a/turbo/jsonrpc/eth_accounts.go +++ b/turbo/jsonrpc/eth_accounts.go @@ -27,7 +27,7 @@ func (api *APIImpl) GetBalance(ctx context.Context, address libcommon.Address, b return nil, fmt.Errorf("getBalance cannot open tx: %w", err1) } defer tx.Rollback() - reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, "") + reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, api.historyV3(tx), "") if err != nil { return nil, err } @@ -63,7 +63,7 @@ func (api *APIImpl) GetTransactionCount(ctx context.Context, address libcommon.A return nil, fmt.Errorf("getTransactionCount cannot open tx: %w", err1) } defer tx.Rollback() - reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, "") + reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, api.historyV3(tx), "") if err != nil { return nil, err } @@ -86,7 +86,7 @@ func (api *APIImpl) GetCode(ctx context.Context, address libcommon.Address, bloc if err != nil { return nil, fmt.Errorf("read chain config: %v", err) } - reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, chainConfig.ChainName) + reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, api.historyV3(tx), chainConfig.ChainName) if err != nil { return nil, err } @@ -112,7 +112,7 @@ func (api *APIImpl) GetStorageAt(ctx context.Context, address libcommon.Address, } defer tx.Rollback() - reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, "") + reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, api.historyV3(tx), "") if err != nil { return hexutility.Encode(common.LeftPadBytes(empty, 32)), err } @@ -137,7 +137,7 @@ func (api *APIImpl) Exist(ctx context.Context, address libcommon.Address, blockN } defer tx.Rollback() - reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, "") + reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, api.historyV3(tx), "") if err != nil { return false, err } diff --git a/turbo/jsonrpc/eth_api.go b/turbo/jsonrpc/eth_api.go index 7a7d9e465e1..4f298f3429e 100644 --- a/turbo/jsonrpc/eth_api.go +++ b/turbo/jsonrpc/eth_api.go @@ -22,6 +22,7 @@ import ( txpool "github.com/ledgerwatch/erigon-lib/gointerfaces/txpoolproto" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" + "github.com/ledgerwatch/erigon-lib/kv/kvcfg" libstate "github.com/ledgerwatch/erigon-lib/state" types2 "github.com/ledgerwatch/erigon-lib/types" @@ -116,6 +117,7 @@ type BaseAPI struct { filters *rpchelper.Filters _chainConfig atomic.Pointer[chain.Config] _genesis atomic.Pointer[types.Block] + _historyV3 atomic.Pointer[bool] _pruneMode atomic.Pointer[prune.Mode] _blockReader services.FullBlockReader @@ -230,6 +232,20 @@ func (api *BaseAPI) blockWithSenders(ctx context.Context, tx kv.Tx, hash common. return block, nil } +func (api *BaseAPI) historyV3(tx kv.Tx) bool { + historyV3 := api._historyV3.Load() + if historyV3 != nil { + return *historyV3 + } + enabled, err := kvcfg.HistoryV3.Enabled(tx) + if err != nil { + log.Warn("HisoryV3Enabled: read", "err", err) + return false + } + api._historyV3.Store(&enabled) + return enabled +} + func (api *BaseAPI) chainConfigWithGenesis(ctx context.Context, tx kv.Tx) (*chain.Config, *types.Block, error) { cc, genesisBlock := api._chainConfig.Load(), api._genesis.Load() if cc != nil && genesisBlock != nil { diff --git a/turbo/jsonrpc/eth_block.go b/turbo/jsonrpc/eth_block.go index 5fc462a588b..131935603b3 100644 --- a/turbo/jsonrpc/eth_block.go +++ b/turbo/jsonrpc/eth_block.go @@ -80,15 +80,16 @@ func (api *APIImpl) CallBundle(ctx context.Context, txHashes []common.Hash, stat if err != nil { return nil, err } + histV3 := api.historyV3(tx) var stateReader state.StateReader if latest { cacheView, err := api.stateCache.View(ctx, tx) if err != nil { return nil, err } - stateReader = rpchelper.CreateLatestCachedStateReader(cacheView, tx) + stateReader = rpchelper.CreateLatestCachedStateReader(cacheView, tx, histV3) } else { - stateReader, err = rpchelper.CreateHistoryStateReader(tx, stateBlockNumber+1, 0, chainConfig.ChainName) + stateReader, err = rpchelper.CreateHistoryStateReader(tx, stateBlockNumber+1, 0, histV3, chainConfig.ChainName) if err != nil { return nil, err } diff --git a/turbo/jsonrpc/eth_call.go b/turbo/jsonrpc/eth_call.go index fc0372949c6..61bcdb128c8 100644 --- a/turbo/jsonrpc/eth_call.go +++ b/turbo/jsonrpc/eth_call.go @@ -9,6 +9,7 @@ import ( "github.com/ledgerwatch/erigon-lib/common/hexutil" "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/kv/membatchwithdb" "github.com/ledgerwatch/log/v3" "google.golang.org/grpc" @@ -25,12 +26,14 @@ import ( "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/tracers/logger" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc" ethapi2 "github.com/ledgerwatch/erigon/turbo/adapter/ethapi" "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/transactions" + "github.com/ledgerwatch/erigon/turbo/trie" ) var latestNumOrHash = rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber) @@ -65,7 +68,7 @@ func (api *APIImpl) Call(ctx context.Context, args ethapi2.CallArgs, blockNrOrHa return nil, nil } - stateReader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, chainConfig.ChainName) + stateReader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, api.historyV3(tx), chainConfig.ChainName) if err != nil { return nil, err } @@ -183,7 +186,7 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi2.CallArgs if err != nil { return 0, err } - stateReader := rpchelper.CreateLatestCachedStateReader(cacheView, dbtx) + stateReader := rpchelper.CreateLatestCachedStateReader(cacheView, dbtx, api.historyV3(dbtx)) state := state.New(stateReader) if state == nil { return 0, fmt.Errorf("can't get the current state") @@ -241,7 +244,7 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi2.CallArgs return 0, fmt.Errorf("could not find latest block in cache or db") } - stateReader, err := rpchelper.CreateStateReaderFromBlockNumber(ctx, dbtx, latestCanBlockNumber, isLatest, 0, api.stateCache, chainConfig.ChainName) + stateReader, err := rpchelper.CreateStateReaderFromBlockNumber(ctx, dbtx, latestCanBlockNumber, isLatest, 0, api.stateCache, api.historyV3(dbtx), chainConfig.ChainName) if err != nil { return 0, err } @@ -316,88 +319,89 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi2.CallArgs // GetProof is partially implemented; no Storage proofs, and proofs must be for // blocks within maxGetProofRewindBlockCount blocks of the head. func (api *APIImpl) GetProof(ctx context.Context, address libcommon.Address, storageKeys []libcommon.Hash, blockNrOrHash rpc.BlockNumberOrHash) (*accounts.AccProofResult, error) { - return nil, fmt.Errorf("not supported by Erigon3") - /* - tx, err := api.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - blockNr, _, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) - if err != nil { - return nil, err - } - - header, err := api._blockReader.HeaderByNumber(ctx, tx, blockNr) - if err != nil { - return nil, err - } - - latestBlock, err := rpchelper.GetLatestBlockNumber(tx) - if err != nil { - return nil, err - } + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + if api.historyV3(tx) { + return nil, fmt.Errorf("not supported by Erigon3") + } - if latestBlock < blockNr { - // shouldn't happen, but check anyway - return nil, fmt.Errorf("block number is in the future latest=%d requested=%d", latestBlock, blockNr) - } + blockNr, _, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) + if err != nil { + return nil, err + } - rl := trie.NewRetainList(0) - var loader *trie.FlatDBTrieLoader - if blockNr < latestBlock { - if latestBlock-blockNr > uint64(api.MaxGetProofRewindBlockCount) { - return nil, fmt.Errorf("requested block is too old, block must be within %d blocks of the head block number (currently %d)", uint64(api.MaxGetProofRewindBlockCount), latestBlock) - } - batch := membatchwithdb.NewMemoryBatch(tx, api.dirs.Tmp, api.logger) - defer batch.Rollback() + header, err := api._blockReader.HeaderByNumber(ctx, tx, blockNr) + if err != nil { + return nil, err + } - unwindState := &stagedsync.UnwindState{UnwindPoint: blockNr} - stageState := &stagedsync.StageState{BlockNumber: latestBlock} + latestBlock, err := rpchelper.GetLatestBlockNumber(tx) + if err != nil { + return nil, err + } - hashStageCfg := stagedsync.StageHashStateCfg(nil, api.dirs, api.historyV3(batch)) - if err := stagedsync.UnwindHashStateStage(unwindState, stageState, batch, hashStageCfg, ctx, api.logger); err != nil { - return nil, err - } + if latestBlock < blockNr { + // shouldn't happen, but check anyway + return nil, fmt.Errorf("block number is in the future latest=%d requested=%d", latestBlock, blockNr) + } - interHashStageCfg := stagedsync.StageTrieCfg(nil, false, false, false, api.dirs.Tmp, api._blockReader, nil, api.historyV3(batch), api._agg) - loader, err = stagedsync.UnwindIntermediateHashesForTrieLoader("eth_getProof", rl, unwindState, stageState, batch, interHashStageCfg, nil, nil, ctx.Done(), api.logger) - if err != nil { - return nil, err - } - tx = batch - } else { - loader = trie.NewFlatDBTrieLoader("eth_getProof", rl, nil, nil, false) + rl := trie.NewRetainList(0) + var loader *trie.FlatDBTrieLoader + if blockNr < latestBlock { + if latestBlock-blockNr > uint64(api.MaxGetProofRewindBlockCount) { + return nil, fmt.Errorf("requested block is too old, block must be within %d blocks of the head block number (currently %d)", uint64(api.MaxGetProofRewindBlockCount), latestBlock) } + batch := membatchwithdb.NewMemoryBatch(tx, api.dirs.Tmp, api.logger) + defer batch.Rollback() - reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, "") - if err != nil { - return nil, err - } - a, err := reader.ReadAccountData(address) - if err != nil { - return nil, err - } - if a == nil { - a = &accounts.Account{} - } - pr, err := trie.NewProofRetainer(address, a, storageKeys, rl) - if err != nil { + unwindState := &stagedsync.UnwindState{UnwindPoint: blockNr} + stageState := &stagedsync.StageState{BlockNumber: latestBlock} + + hashStageCfg := stagedsync.StageHashStateCfg(nil, api.dirs, api.historyV3(batch)) + if err := stagedsync.UnwindHashStateStage(unwindState, stageState, batch, hashStageCfg, ctx, api.logger); err != nil { return nil, err } - loader.SetProofRetainer(pr) - root, err := loader.CalcTrieRoot(tx, nil) + interHashStageCfg := stagedsync.StageTrieCfg(nil, false, false, false, api.dirs.Tmp, api._blockReader, nil, api.historyV3(batch), api._agg) + loader, err = stagedsync.UnwindIntermediateHashesForTrieLoader("eth_getProof", rl, unwindState, stageState, batch, interHashStageCfg, nil, nil, ctx.Done(), api.logger) if err != nil { return nil, err } + tx = batch + } else { + loader = trie.NewFlatDBTrieLoader("eth_getProof", rl, nil, nil, false) + } - if root != header.Root { - return nil, fmt.Errorf("mismatch in expected state root computed %v vs %v indicates bug in proof implementation", root, header.Root) - } - return pr.ProofResult() - */ + reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, api.historyV3(tx), "") + if err != nil { + return nil, err + } + a, err := reader.ReadAccountData(address) + if err != nil { + return nil, err + } + if a == nil { + a = &accounts.Account{} + } + pr, err := trie.NewProofRetainer(address, a, storageKeys, rl) + if err != nil { + return nil, err + } + + loader.SetProofRetainer(pr) + root, err := loader.CalcTrieRoot(tx, nil) + if err != nil { + return nil, err + } + + if root != header.Root { + return nil, fmt.Errorf("mismatch in expected state root computed %v vs %v indicates bug in proof implementation", root, header.Root) + } + return pr.ProofResult() } func (api *APIImpl) tryBlockFromLru(hash libcommon.Hash) *types.Block { @@ -451,15 +455,16 @@ func (api *APIImpl) CreateAccessList(ctx context.Context, args ethapi2.CallArgs, if block == nil { return nil, nil } + histV3 := api.historyV3(tx) var stateReader state.StateReader if latest { cacheView, err := api.stateCache.View(ctx, tx) if err != nil { return nil, err } - stateReader = rpchelper.CreateLatestCachedStateReader(cacheView, tx) + stateReader = rpchelper.CreateLatestCachedStateReader(cacheView, tx, histV3) } else { - stateReader, err = rpchelper.CreateHistoryStateReader(tx, blockNumber+1, 0, chainConfig.ChainName) + stateReader, err = rpchelper.CreateHistoryStateReader(tx, blockNumber+1, 0, histV3, chainConfig.ChainName) if err != nil { return nil, err } @@ -544,7 +549,7 @@ func (api *APIImpl) CreateAccessList(ctx context.Context, args ethapi2.CallArgs, // Apply the transaction with the access list tracer tracer := logger.NewAccessListTracer(accessList, excl, state) - config := vm.Config{Tracer: tracer, Debug: true, NoBaseFee: true} + config := vm.Config{Tracer: tracer.Hooks(), Debug: true, NoBaseFee: true} blockCtx := transactions.NewEVMBlockContext(engine, header, bNrOrHash.RequireCanonical, tx, api._blockReader) txCtx := core.NewEVMTxContext(msg) diff --git a/turbo/jsonrpc/eth_callMany.go b/turbo/jsonrpc/eth_callMany.go index fa9b9c59ffa..c40f0ca68d6 100644 --- a/turbo/jsonrpc/eth_callMany.go +++ b/turbo/jsonrpc/eth_callMany.go @@ -130,7 +130,7 @@ func (api *APIImpl) CallMany(ctx context.Context, bundles []Bundle, simulateCont replayTransactions = block.Transactions()[:transactionIndex] - stateReader, err := rpchelper.CreateStateReader(ctx, tx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blockNum-1)), 0, api.filters, api.stateCache, chainConfig.ChainName) + stateReader, err := rpchelper.CreateStateReader(ctx, tx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blockNum-1)), 0, api.filters, api.stateCache, api.historyV3(tx), chainConfig.ChainName) if err != nil { return nil, err diff --git a/turbo/jsonrpc/eth_call_test.go b/turbo/jsonrpc/eth_call_test.go index 1048dd8101d..ef5654e3d52 100644 --- a/turbo/jsonrpc/eth_call_test.go +++ b/turbo/jsonrpc/eth_call_test.go @@ -534,13 +534,13 @@ func chainWithDeployedContract(t *testing.T) (*mock.MockSentry, libcommon.Addres } defer tx.Rollback() - stateReader, err := rpchelper.CreateHistoryStateReader(tx, 1, 0, "") + stateReader, err := rpchelper.CreateHistoryStateReader(tx, 1, 0, m.HistoryV3, "") assert.NoError(t, err) st := state.New(stateReader) assert.NoError(t, err) assert.False(t, st.Exist(contractAddr), "Contract should not exist at block #1") - stateReader, err = rpchelper.CreateHistoryStateReader(tx, 2, 0, "") + stateReader, err = rpchelper.CreateHistoryStateReader(tx, 2, 0, m.HistoryV3, "") assert.NoError(t, err) st = state.New(stateReader) assert.NoError(t, err) diff --git a/turbo/jsonrpc/eth_receipts.go b/turbo/jsonrpc/eth_receipts.go index 6916b45bceb..5e9e1c52754 100644 --- a/turbo/jsonrpc/eth_receipts.go +++ b/turbo/jsonrpc/eth_receipts.go @@ -1,7 +1,9 @@ package jsonrpc import ( + "bytes" "context" + "encoding/binary" "fmt" "math/big" @@ -14,6 +16,7 @@ import ( "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/hexutility" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/bitmapdb" "github.com/ledgerwatch/erigon-lib/kv/iter" @@ -29,6 +32,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/filters" + "github.com/ledgerwatch/erigon/ethdb/cbor" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/transactions" @@ -51,7 +55,7 @@ func (api *BaseAPI) getReceipts(ctx context.Context, tx kv.Tx, block *types.Bloc return nil, err } - _, _, _, ibs, _, err := transactions.ComputeTxEnv(ctx, engine, block, chainConfig, api._blockReader, tx, 0) + _, _, _, ibs, _, err := transactions.ComputeTxEnv(ctx, engine, block, chainConfig, api._blockReader, tx, 0, api.historyV3(tx)) if err != nil { return nil, err } @@ -159,7 +163,91 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) (t end = latest } - return api.getLogsV3(ctx, tx.(kv.TemporalTx), begin, end, crit) + if api.historyV3(tx) { + return api.getLogsV3(ctx, tx.(kv.TemporalTx), begin, end, crit) + } + blockNumbers := bitmapdb.NewBitmap() + defer bitmapdb.ReturnToPool(blockNumbers) + if err := applyFilters(blockNumbers, tx, begin, end, crit); err != nil { + return logs, err + } + if blockNumbers.IsEmpty() { + return logs, nil + } + addrMap := make(map[common.Address]struct{}, len(crit.Addresses)) + for _, v := range crit.Addresses { + addrMap[v] = struct{}{} + } + iter := blockNumbers.Iterator() + for iter.HasNext() { + if err := ctx.Err(); err != nil { + return nil, err + } + + blockNumber := uint64(iter.Next()) + var logIndex uint + var txIndex uint + var blockLogs []*types.Log + + it, err := tx.Prefix(kv.Log, hexutility.EncodeTs(blockNumber)) + if err != nil { + return nil, err + } + for it.HasNext() { + k, v, err := it.Next() + if err != nil { + return logs, err + } + + var logs types.Logs + if err := cbor.Unmarshal(&logs, bytes.NewReader(v)); err != nil { + return logs, fmt.Errorf("receipt unmarshal failed: %w", err) + } + for _, log := range logs { + log.Index = logIndex + logIndex++ + } + filtered := logs.Filter(addrMap, crit.Topics) + if len(filtered) == 0 { + continue + } + txIndex = uint(binary.BigEndian.Uint32(k[8:])) + for _, log := range filtered { + log.TxIndex = txIndex + } + blockLogs = append(blockLogs, filtered...) + } + it.Close() + if len(blockLogs) == 0 { + continue + } + + blockHash, err := api._blockReader.CanonicalHash(ctx, tx, blockNumber) + if err != nil { + return nil, err + } + + body, err := api._blockReader.BodyWithTransactions(ctx, tx, blockHash, blockNumber) + if err != nil { + return nil, err + } + if body == nil { + return nil, fmt.Errorf("block not found %d", blockNumber) + } + for _, log := range blockLogs { + log.BlockNumber = blockNumber + log.BlockHash = blockHash + // bor transactions are at the end of the bodies transactions (added manually but not actually part of the block) + if log.TxIndex == uint(len(body.Transactions)) { + log.TxHash = bortypes.ComputeBorTxHash(blockNumber, blockHash) + } else { + log.TxHash = body.Transactions[log.TxIndex].Hash() + } + } + logs = append(logs, blockLogs...) + } + + return logs, nil } // The Topic list restricts matches to particular event topics. Each event has a list diff --git a/turbo/jsonrpc/gen_traces_test.go b/turbo/jsonrpc/gen_traces_test.go index a5f4c7bdc7a..32ff7fcf673 100644 --- a/turbo/jsonrpc/gen_traces_test.go +++ b/turbo/jsonrpc/gen_traces_test.go @@ -13,7 +13,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/cli/httpcfg" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcdaemontest" - "github.com/ledgerwatch/erigon/eth/tracers" + tracerConfig "github.com/ledgerwatch/erigon/eth/tracers/config" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/rpc/rpccfg" @@ -35,7 +35,7 @@ func TestGeneratedDebugApi(t *testing.T) { var buf bytes.Buffer stream := jsoniter.NewStream(jsoniter.ConfigDefault, &buf, 4096) callTracer := "callTracer" - err := api.TraceBlockByNumber(context.Background(), rpc.BlockNumber(1), &tracers.TraceConfig{Tracer: &callTracer}, stream) + err := api.TraceBlockByNumber(context.Background(), rpc.BlockNumber(1), &tracerConfig.TraceConfig{Tracer: &callTracer}, stream) if err != nil { t.Errorf("debug_traceBlock %d: %v", 0, err) } diff --git a/turbo/jsonrpc/otterscan_api.go b/turbo/jsonrpc/otterscan_api.go index 2c38720e1a6..f129061f109 100644 --- a/turbo/jsonrpc/otterscan_api.go +++ b/turbo/jsonrpc/otterscan_api.go @@ -2,6 +2,7 @@ package jsonrpc import ( "context" + "errors" "fmt" "math/big" @@ -19,6 +20,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/ethutils" + "github.com/ledgerwatch/erigon/eth/tracers" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/adapter/ethapi" "github.com/ledgerwatch/erigon/turbo/rpchelper" @@ -110,7 +112,7 @@ func (api *OtterscanAPIImpl) getTransactionByHash(ctx context.Context, tx kv.Tx, return txn, block, blockHash, blockNum, txnIndex, nil } -func (api *OtterscanAPIImpl) runTracer(ctx context.Context, tx kv.Tx, hash common.Hash, tracer vm.EVMLogger) (*core.ExecutionResult, error) { +func (api *OtterscanAPIImpl) runTracer(ctx context.Context, tx kv.Tx, hash common.Hash, tracer *tracers.Tracer) (*core.ExecutionResult, error) { txn, block, _, _, txIndex, err := api.getTransactionByHash(ctx, tx, hash) if err != nil { return nil, err @@ -125,24 +127,34 @@ func (api *OtterscanAPIImpl) runTracer(ctx context.Context, tx kv.Tx, hash commo } engine := api.engine() - msg, blockCtx, txCtx, ibs, _, err := transactions.ComputeTxEnv(ctx, engine, block, chainConfig, api._blockReader, tx, int(txIndex)) + msg, blockCtx, txCtx, ibs, _, err := transactions.ComputeTxEnv(ctx, engine, block, chainConfig, api._blockReader, tx, int(txIndex), api.historyV3(tx)) if err != nil { return nil, err } + ibs.SetLogger(tracer.Hooks) var vmConfig vm.Config if tracer == nil { vmConfig = vm.Config{} } else { - vmConfig = vm.Config{Debug: true, Tracer: tracer} + vmConfig = vm.Config{Debug: true, Tracer: tracer.Hooks} } vmenv := vm.NewEVM(blockCtx, txCtx, ibs, chainConfig, vmConfig) + if tracer != nil && tracer.Hooks.OnTxStart != nil { + tracer.Hooks.OnTxStart(vmenv.GetVMContext(), txn, msg.From()) + } result, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas()).AddBlobGas(msg.BlobGas()), true, false /* gasBailout */) if err != nil { + if tracer != nil && tracer.Hooks.OnTxEnd != nil { + tracer.Hooks.OnTxEnd(nil, err) + } return nil, fmt.Errorf("tracing failed: %v", err) } + if tracer != nil && tracer.Hooks.OnTxEnd != nil { + tracer.Hooks.OnTxEnd(&types.Receipt{GasUsed: result.UsedGas}, nil) + } return result, nil } @@ -154,7 +166,7 @@ func (api *OtterscanAPIImpl) GetInternalOperations(ctx context.Context, hash com defer tx.Rollback() tracer := NewOperationsTracer(ctx) - if _, err := api.runTracer(ctx, tx, hash, tracer); err != nil { + if _, err := api.runTracer(ctx, tx, hash, tracer.Tracer()); err != nil { return nil, err } @@ -180,7 +192,76 @@ func (api *OtterscanAPIImpl) SearchTransactionsBefore(ctx context.Context, addr } defer dbtx.Rollback() - return api.searchTransactionsBeforeV3(dbtx.(kv.TemporalTx), ctx, addr, blockNum, pageSize) + if api.historyV3(dbtx) { + return api.searchTransactionsBeforeV3(dbtx.(kv.TemporalTx), ctx, addr, blockNum, pageSize) + } + + callFromCursor, err := dbtx.Cursor(kv.CallFromIndex) + if err != nil { + return nil, err + } + defer callFromCursor.Close() + + callToCursor, err := dbtx.Cursor(kv.CallToIndex) + if err != nil { + return nil, err + } + defer callToCursor.Close() + + chainConfig, err := api.chainConfig(ctx, dbtx) + if err != nil { + return nil, err + } + + isFirstPage := false + if blockNum == 0 { + isFirstPage = true + } else { + // Internal search code considers blockNum [including], so adjust the value + blockNum-- + } + + // Initialize search cursors at the first shard >= desired block number + callFromProvider := NewCallCursorBackwardBlockProvider(callFromCursor, addr, blockNum) + callToProvider := NewCallCursorBackwardBlockProvider(callToCursor, addr, blockNum) + callFromToProvider := newCallFromToBlockProvider(false, callFromProvider, callToProvider) + + txs := make([]*RPCTransaction, 0, pageSize) + receipts := make([]map[string]interface{}, 0, pageSize) + + resultCount := uint16(0) + hasMore := true + for { + if resultCount >= pageSize || !hasMore { + break + } + + var results []*TransactionsWithReceipts + results, hasMore, err = api.traceBlocks(ctx, addr, chainConfig, pageSize, resultCount, callFromToProvider) + if err != nil { + return nil, err + } + + for _, r := range results { + if r == nil { + return nil, errors.New("internal error during search tracing") + } + + for i := len(r.Txs) - 1; i >= 0; i-- { + txs = append(txs, r.Txs[i]) + } + for i := len(r.Receipts) - 1; i >= 0; i-- { + receipts = append(receipts, r.Receipts[i]) + } + + resultCount += uint16(len(r.Txs)) + if resultCount >= pageSize { + break + } + } + } + + return &TransactionsWithReceipts{txs, receipts, isFirstPage, !hasMore}, nil } // Search transactions that touch a certain address. @@ -202,7 +283,78 @@ func (api *OtterscanAPIImpl) SearchTransactionsAfter(ctx context.Context, addr c } defer dbtx.Rollback() - return api.searchTransactionsAfterV3(dbtx.(kv.TemporalTx), ctx, addr, blockNum, pageSize) + if api.historyV3(dbtx) { + return api.searchTransactionsAfterV3(dbtx.(kv.TemporalTx), ctx, addr, blockNum, pageSize) + } + + callFromCursor, err := dbtx.Cursor(kv.CallFromIndex) + if err != nil { + return nil, err + } + defer callFromCursor.Close() + + callToCursor, err := dbtx.Cursor(kv.CallToIndex) + if err != nil { + return nil, err + } + defer callToCursor.Close() + + chainConfig, err := api.chainConfig(ctx, dbtx) + if err != nil { + return nil, err + } + + isLastPage := false + if blockNum == 0 { + isLastPage = true + } else { + // Internal search code considers blockNum [including], so adjust the value + blockNum++ + } + + // Initialize search cursors at the first shard >= desired block number + callFromProvider := NewCallCursorForwardBlockProvider(callFromCursor, addr, blockNum) + callToProvider := NewCallCursorForwardBlockProvider(callToCursor, addr, blockNum) + callFromToProvider := newCallFromToBlockProvider(true, callFromProvider, callToProvider) + + txs := make([]*RPCTransaction, 0, pageSize) + receipts := make([]map[string]interface{}, 0, pageSize) + + resultCount := uint16(0) + hasMore := true + for { + if resultCount >= pageSize || !hasMore { + break + } + + var results []*TransactionsWithReceipts + results, hasMore, err = api.traceBlocks(ctx, addr, chainConfig, pageSize, resultCount, callFromToProvider) + if err != nil { + return nil, err + } + + for _, r := range results { + if r == nil { + return nil, errors.New("internal error during search tracing") + } + + txs = append(txs, r.Txs...) + receipts = append(receipts, r.Receipts...) + + resultCount += uint16(len(r.Txs)) + if resultCount >= pageSize { + break + } + } + } + + // Reverse results + lentxs := len(txs) + for i := 0; i < lentxs/2; i++ { + txs[i], txs[lentxs-1-i] = txs[lentxs-1-i], txs[i] + receipts[i], receipts[lentxs-1-i] = receipts[lentxs-1-i], receipts[i] + } + return &TransactionsWithReceipts{txs, receipts, !hasMore, isLastPage}, nil } func (api *OtterscanAPIImpl) traceBlocks(ctx context.Context, addr common.Address, chainConfig *chain.Config, pageSize, resultCount uint16, callFromToProvider BlockProvider) ([]*TransactionsWithReceipts, bool, error) { diff --git a/turbo/jsonrpc/otterscan_contract_creator.go b/turbo/jsonrpc/otterscan_contract_creator.go index f64abf4828e..2029cfc1135 100644 --- a/turbo/jsonrpc/otterscan_contract_creator.go +++ b/turbo/jsonrpc/otterscan_contract_creator.go @@ -1,6 +1,7 @@ package jsonrpc import ( + "bytes" "context" "fmt" "sort" @@ -8,8 +9,10 @@ import ( "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/cmp" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/bitmapdb" "github.com/ledgerwatch/erigon-lib/kv/order" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" + "github.com/ledgerwatch/erigon-lib/kv/temporal/historyv2" "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon/core/types/accounts" @@ -28,7 +31,7 @@ func (api *OtterscanAPIImpl) GetContractCreator(ctx context.Context, addr common } defer tx.Rollback() - latestState := rpchelper.NewLatestStateReader(tx) + latestState := rpchelper.NewLatestStateReader(tx, api.historyV3(tx)) plainStateAcc, err := latestState.ReadAccountData(addr) if err != nil { return nil, err @@ -50,125 +53,252 @@ func (api *OtterscanAPIImpl) GetContractCreator(ctx context.Context, addr common } var acc accounts.Account - ttx := tx.(kv.TemporalTx) + if api.historyV3(tx) { + ttx := tx.(kv.TemporalTx) + + // Contract; search for creation tx; navigate forward on AccountsHistory/ChangeSets + // + // We traversing history Index - because it's cheaper than traversing History + // and probe History periodically. In result will have small range of blocks. For binary search or full-scan. + // + // popular contracts may have dozens of states changes due to ETH deposits/withdraw after contract creation, + // so it is optimal to search from the beginning even if the contract has multiple + // incarnations. + var prevTxnID, nextTxnID uint64 + it, err := ttx.IndexRange(kv.AccountsHistoryIdx, addr[:], 0, -1, order.Asc, kv.Unlim) + if err != nil { + return nil, err + } + defer it.Close() + for i := 0; it.HasNext(); i++ { + txnID, err := it.Next() + if err != nil { + return nil, err + } + + if i%4096 != 0 { // probe history periodically, not on every change + nextTxnID = txnID + continue + } + + v, ok, err := ttx.HistoryGet(kv.AccountsHistory, addr[:], txnID) + if err != nil { + log.Error("Unexpected error, couldn't find changeset", "txNum", txnID, "addr", addr) + return nil, err + } + + if !ok { + err = fmt.Errorf("couldn't find history txnID=%v addr=%v", txnID, addr) + log.Error("[rpc] Unexpected error", "err", err) + return nil, err + } + if len(v) == 0 { // creation, but maybe not our Incarnation + prevTxnID = txnID + continue + } + + if err := accounts.DeserialiseV3(&acc, v); err != nil { + return nil, err + } + // Found the shard where the incarnation change happens; ignore all next index values + if acc.Incarnation >= plainStateAcc.Incarnation { + nextTxnID = txnID + break + } + prevTxnID = txnID + } + + // The sort.Search function finds the first block where the incarnation has + // changed to the desired one, so we get the previous block from the bitmap; + // however if the creationTxnID block is already the first one from the bitmap, it means + // the block we want is the max block from the previous shard. + var creationTxnID uint64 + var searchErr error + + if nextTxnID == 0 { + nextTxnID = prevTxnID + 1 + } + // Binary search in [prevTxnID, nextTxnID] range; get first block where desired incarnation appears + // can be replaced by full-scan over ttx.HistoryRange([prevTxnID, nextTxnID])? + idx := sort.Search(int(nextTxnID-prevTxnID), func(i int) bool { + txnID := uint64(i) + prevTxnID + v, ok, err := ttx.HistoryGet(kv.AccountsHistory, addr[:], txnID) + if err != nil { + log.Error("[rpc] Unexpected error, couldn't find changeset", "txNum", i, "addr", addr) + panic(err) + } + if !ok { + return false + } + if len(v) == 0 { + creationTxnID = cmp.Max(creationTxnID, txnID) + return false + } + + if err := accounts.DeserialiseV3(&acc, v); err != nil { + searchErr = err + return false + } + if acc.Incarnation < plainStateAcc.Incarnation { + creationTxnID = cmp.Max(creationTxnID, txnID) + return false + } + return true + }) + if searchErr != nil { + return nil, searchErr + } + if creationTxnID == 0 { + return nil, fmt.Errorf("binary search between %d-%d doesn't find anything", nextTxnID, prevTxnID) + } + + ok, bn, err := rawdbv3.TxNums.FindBlockNum(tx, creationTxnID) + if err != nil { + return nil, err + } + if !ok { + return nil, fmt.Errorf("block not found by txnID=%d", creationTxnID) + } + minTxNum, err := rawdbv3.TxNums.Min(tx, bn) + if err != nil { + return nil, err + } + txIndex := int(creationTxnID) - int(minTxNum) - 1 /* system-contract */ + if txIndex == -1 { + txIndex = (idx + int(prevTxnID)) - int(minTxNum) - 1 + } + + // Trace block, find tx and contract creator + tracer := NewCreateTracer(ctx, addr) + if err := api.genericTracer(tx, ctx, bn, creationTxnID, txIndex, chainConfig, tracer); err != nil { + return nil, err + } + return &ContractCreatorData{ + Tx: tracer.Tx.Hash(), + Creator: tracer.Creator, + }, nil + } // Contract; search for creation tx; navigate forward on AccountsHistory/ChangeSets // - // We traversing history Index - because it's cheaper than traversing History - // and probe History periodically. In result will have small range of blocks. For binary search or full-scan. - // - // popular contracts may have dozens of states changes due to ETH deposits/withdraw after contract creation, + // We search shards in forward order on purpose because popular contracts may have + // dozens of states changes due to ETH deposits/withdraw after contract creation, // so it is optimal to search from the beginning even if the contract has multiple // incarnations. - var prevTxnID, nextTxnID uint64 - it, err := ttx.IndexRange(kv.AccountsHistoryIdx, addr[:], 0, -1, order.Asc, kv.Unlim) + accHistory, err := tx.Cursor(kv.E2AccountsHistory) if err != nil { return nil, err } - defer it.Close() - for i := 0; it.HasNext(); i++ { - txnID, err := it.Next() - if err != nil { - return nil, err - } + defer accHistory.Close() + + accCS, err := tx.CursorDupSort(kv.AccountChangeSet) + if err != nil { + return nil, err + } + defer accCS.Close() - if i%4096 != 0 { // probe history periodically, not on every change - nextTxnID = txnID - continue + // Locate shard that contains the block where incarnation changed + acs := historyv2.Mapper[kv.AccountChangeSet] + k, v, err := accHistory.Seek(acs.IndexChunkKey(addr.Bytes(), 0)) + if err != nil { + return nil, err + } + if !bytes.HasPrefix(k, addr.Bytes()) { + log.Error("Couldn't find any shard for account history", "addr", addr) + return nil, fmt.Errorf("could't find any shard for account history addr=%v", addr) + } + + bm := bitmapdb.NewBitmap64() + defer bitmapdb.ReturnToPool64(bm) + prevShardMaxBl := uint64(0) + for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: } - v, ok, err := ttx.HistorySeek(kv.AccountsHistory, addr[:], txnID) + _, err := bm.ReadFrom(bytes.NewReader(v)) if err != nil { - log.Error("Unexpected error, couldn't find changeset", "txNum", txnID, "addr", addr) return nil, err } - if !ok { - err = fmt.Errorf("couldn't find history txnID=%v addr=%v", txnID, addr) - log.Error("[rpc] Unexpected error", "err", err) + // Shortcut precheck + st, err := acs.Find(accCS, bm.Maximum(), addr.Bytes()) + if err != nil { return nil, err } - if len(v) == 0 { // creation, but maybe not our Incarnation - prevTxnID = txnID - continue + if st == nil { + log.Error("Unexpected error, couldn't find changeset", "block", bm.Maximum(), "addr", addr) + return nil, fmt.Errorf("unexpected error, couldn't find changeset block=%v addr=%v", bm.Maximum(), addr) } - if err := accounts.DeserialiseV3(&acc, v); err != nil { + // Found the shard where the incarnation change happens; ignore all + // next shards + if err := acc.DecodeForStorage(st); err != nil { return nil, err } - // Found the shard where the incarnation change happens; ignore all next index values if acc.Incarnation >= plainStateAcc.Incarnation { - nextTxnID = txnID break } - prevTxnID = txnID - } + prevShardMaxBl = bm.Maximum() - // The sort.Search function finds the first block where the incarnation has - // changed to the desired one, so we get the previous block from the bitmap; - // however if the creationTxnID block is already the first one from the bitmap, it means - // the block we want is the max block from the previous shard. - var creationTxnID uint64 - var searchErr error + k, v, err = accHistory.Next() + if err != nil { + return nil, err + } - if nextTxnID == 0 { - nextTxnID = prevTxnID + 1 + // No more shards; it means the max bl from previous shard + // contains the incarnation change + if !bytes.HasPrefix(k, addr.Bytes()) { + break + } } - // Binary search in [prevTxnID, nextTxnID] range; get first block where desired incarnation appears - // can be replaced by full-scan over ttx.HistoryRange([prevTxnID, nextTxnID])? - idx := sort.Search(int(nextTxnID-prevTxnID), func(i int) bool { - txnID := uint64(i) + prevTxnID - v, ok, err := ttx.HistorySeek(kv.AccountsHistory, addr[:], txnID) + + // Binary search block number inside shard; get first block where desired + // incarnation appears + blocks := bm.ToArray() + var searchErr error + r := sort.Search(len(blocks), func(i int) bool { + bl := blocks[i] + st, err := acs.Find(accCS, bl, addr.Bytes()) if err != nil { - log.Error("[rpc] Unexpected error, couldn't find changeset", "txNum", i, "addr", addr) - panic(err) - } - if !ok { + searchErr = err return false } - if len(v) == 0 { - creationTxnID = cmp.Max(creationTxnID, txnID) + if st == nil { + log.Error("Unexpected error, couldn't find changeset", "block", bl, "addr", addr) return false } - if err := accounts.DeserialiseV3(&acc, v); err != nil { + if err := acc.DecodeForStorage(st); err != nil { searchErr = err return false } if acc.Incarnation < plainStateAcc.Incarnation { - creationTxnID = cmp.Max(creationTxnID, txnID) return false } return true }) + if searchErr != nil { return nil, searchErr } - if creationTxnID == 0 { - return nil, fmt.Errorf("binary search between %d-%d doesn't find anything", nextTxnID, prevTxnID) - } - ok, bn, err := rawdbv3.TxNums.FindBlockNum(tx, creationTxnID) - if err != nil { - return nil, err - } - if !ok { - return nil, fmt.Errorf("block not found by txnID=%d", creationTxnID) - } - minTxNum, err := rawdbv3.TxNums.Min(tx, bn) - if err != nil { - return nil, err - } - txIndex := int(creationTxnID) - int(minTxNum) - 1 /* system-contract */ - if txIndex == -1 { - txIndex = (idx + int(prevTxnID)) - int(minTxNum) - 1 + // The sort.Search function finds the first block where the incarnation has + // changed to the desired one, so we get the previous block from the bitmap; + // however if the found block is already the first one from the bitmap, it means + // the block we want is the max block from the previous shard. + blockFound := prevShardMaxBl + if r > 0 { + blockFound = blocks[r-1] } - // Trace block, find tx and contract creator tracer := NewCreateTracer(ctx, addr) - if err := api.genericTracer(tx, ctx, bn, creationTxnID, txIndex, chainConfig, tracer); err != nil { + if err := api.genericTracer(tx, ctx, blockFound, 0, 0, chainConfig, tracer); err != nil { return nil, err } + return &ContractCreatorData{ Tx: tracer.Tx.Hash(), Creator: tracer.Creator, diff --git a/turbo/jsonrpc/otterscan_default_tracer.go b/turbo/jsonrpc/otterscan_default_tracer.go index 4c8807eb3f5..a03f7f73bd6 100644 --- a/turbo/jsonrpc/otterscan_default_tracer.go +++ b/turbo/jsonrpc/otterscan_default_tracer.go @@ -1,36 +1,7 @@ package jsonrpc -import ( - "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon-lib/common" - - "github.com/ledgerwatch/erigon/core/vm" -) - // Helper implementation of vm.Tracer; since the interface is big and most // custom tracers implement just a few of the methods, this is a base struct // to avoid lots of empty boilerplate code type DefaultTracer struct { } - -func (t *DefaultTracer) CaptureTxStart(gasLimit uint64) {} - -func (t *DefaultTracer) CaptureTxEnd(restGas uint64) {} - -func (t *DefaultTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { -} - -func (t *DefaultTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { -} - -func (t *DefaultTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { -} - -func (t *DefaultTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { -} - -func (t *DefaultTracer) CaptureEnd(output []byte, usedGas uint64, err error) { -} - -func (t *DefaultTracer) CaptureExit(output []byte, usedGas uint64, err error) { -} diff --git a/turbo/jsonrpc/otterscan_generic_tracer.go b/turbo/jsonrpc/otterscan_generic_tracer.go index 64dd19e5dfc..07fc5525be2 100644 --- a/turbo/jsonrpc/otterscan_generic_tracer.go +++ b/turbo/jsonrpc/otterscan_generic_tracer.go @@ -4,46 +4,125 @@ import ( "context" "github.com/ledgerwatch/erigon-lib/chain" + "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/cmd/state/exec3" + "github.com/ledgerwatch/erigon/eth/tracers" + "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/turbo/shards" ) type GenericTracer interface { - vm.EVMLogger + Tracer() *tracers.Tracer SetTransaction(tx types.Transaction) Found() bool } func (api *OtterscanAPIImpl) genericTracer(dbtx kv.Tx, ctx context.Context, blockNum, txnID uint64, txIndex int, chainConfig *chain.Config, tracer GenericTracer) error { - ttx := dbtx.(kv.TemporalTx) - executor := exec3.NewTraceWorker(ttx, chainConfig, api.engine(), api._blockReader, tracer) + if api.historyV3(dbtx) { + ttx := dbtx.(kv.TemporalTx) + executor := exec3.NewTraceWorker(ttx, chainConfig, api.engine(), api._blockReader, tracer) - // if block number changed, calculate all related field - header, err := api._blockReader.HeaderByNumber(ctx, ttx, blockNum) - if err != nil { - return err - } - if header == nil { - log.Warn("[rpc] header is nil", "blockNum", blockNum) + // if block number changed, calculate all related field + header, err := api._blockReader.HeaderByNumber(ctx, ttx, blockNum) + if err != nil { + return err + } + if header == nil { + log.Warn("[rpc] header is nil", "blockNum", blockNum) + return nil + } + executor.ChangeBlock(header) + + txn, err := api._txnReader.TxnByIdxInBlock(ctx, ttx, blockNum, txIndex) + if err != nil { + return err + } + if txn == nil { + log.Warn("[rpc genericTracer] tx is nil", "blockNum", blockNum, "txIndex", txIndex) + return nil + } + _, err = executor.ExecTxn(txnID, txIndex, txn) + if err != nil { + return err + } return nil } - executor.ChangeBlock(header) - txn, err := api._txnReader.TxnByIdxInBlock(ctx, ttx, blockNum, txIndex) + reader, err := rpchelper.CreateHistoryStateReader(dbtx, blockNum, txIndex, api.historyV3(dbtx), chainConfig.ChainName) if err != nil { return err } - if txn == nil { - log.Warn("[rpc genericTracer] tx is nil", "blockNum", blockNum, "txIndex", txIndex) - return nil + stateCache := shards.NewStateCache(32, 0 /* no limit */) + cachedReader := state.NewCachedReader(reader, stateCache) + noop := state.NewNoopWriter() + cachedWriter := state.NewCachedWriter(noop, stateCache) + + ibs := state.New(cachedReader) + ibs.SetLogger(tracer.Tracer().Hooks) + + getHeader := func(hash common.Hash, number uint64) *types.Header { + h, e := api._blockReader.Header(ctx, dbtx, hash, number) + if e != nil { + log.Error("getHeader error", "number", number, "hash", hash, "err", e) + } + return h } - _, err = executor.ExecTxn(txnID, txIndex, txn) + engine := api.engine() + block, err := api.blockByNumberWithSenders(ctx, dbtx, blockNum) if err != nil { return err } + if block == nil { + return nil + } + + header := block.Header() + rules := chainConfig.Rules(block.NumberU64(), header.Time) + signer := types.MakeSigner(chainConfig, blockNum, header.Time) + for idx, tx := range block.Transactions() { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + ibs.SetTxContext(tx.Hash(), block.Hash(), idx) + + msg, _ := tx.AsMessage(*signer, header.BaseFee, rules) + + BlockContext := core.NewEVMBlockContext(header, core.GetHashFn(header, getHeader), engine, nil) + TxContext := core.NewEVMTxContext(msg) + + vmenv := vm.NewEVM(BlockContext, TxContext, ibs, chainConfig, vm.Config{Debug: true, Tracer: tracer.Tracer().Hooks}) + if tracer != nil && tracer.Tracer().Hooks.OnTxStart != nil { + tracer.Tracer().Hooks.OnTxStart(vmenv.GetVMContext(), tx, msg.From()) + } + res, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.GetGas()).AddBlobGas(tx.GetBlobGas()), true /* refunds */, false /* gasBailout */) + if err != nil { + if tracer != nil && tracer.Tracer().Hooks.OnTxEnd != nil { + tracer.Tracer().Hooks.OnTxEnd(nil, err) + } + return err + } + + if tracer != nil && tracer.Tracer().Hooks.OnTxEnd != nil { + tracer.Tracer().Hooks.OnTxEnd(&types.Receipt{GasUsed: res.UsedGas}, nil) + } + + _ = ibs.FinalizeTx(rules, cachedWriter) + + if tracer.Found() { + tracer.SetTransaction(tx) + return nil + } + } + return nil } diff --git a/turbo/jsonrpc/otterscan_has_code.go b/turbo/jsonrpc/otterscan_has_code.go index e7e18ecdcea..af442e8d000 100644 --- a/turbo/jsonrpc/otterscan_has_code.go +++ b/turbo/jsonrpc/otterscan_has_code.go @@ -26,7 +26,7 @@ func (api *OtterscanAPIImpl) HasCode(ctx context.Context, address common.Address return false, err } - reader, err := rpchelper.CreateHistoryStateReader(tx, blockNumber, 0, chainConfig.ChainName) + reader, err := rpchelper.CreateHistoryStateReader(tx, blockNumber, 0, api.historyV3(tx), chainConfig.ChainName) if err != nil { return false, err } diff --git a/turbo/jsonrpc/otterscan_search_trace.go b/turbo/jsonrpc/otterscan_search_trace.go index 57f5682df5f..1d597ec5c7f 100644 --- a/turbo/jsonrpc/otterscan_search_trace.go +++ b/turbo/jsonrpc/otterscan_search_trace.go @@ -52,7 +52,7 @@ func (api *OtterscanAPIImpl) traceBlock(dbtx kv.Tx, ctx context.Context, blockNu return false, nil, err } - reader, err := rpchelper.CreateHistoryStateReader(dbtx, blockNum, 0, chainConfig.ChainName) + reader, err := rpchelper.CreateHistoryStateReader(dbtx, blockNum, 0, api.historyV3(dbtx), chainConfig.ChainName) if err != nil { return false, nil, err } @@ -91,13 +91,26 @@ func (api *OtterscanAPIImpl) traceBlock(dbtx kv.Tx, ctx context.Context, blockNu msg, _ := tx.AsMessage(*signer, header.BaseFee, rules) tracer := NewTouchTracer(searchAddr) + ibs.SetLogger(tracer.Tracer().Hooks) BlockContext := core.NewEVMBlockContext(header, core.GetHashFn(header, getHeader), engine, nil) TxContext := core.NewEVMTxContext(msg) - vmenv := vm.NewEVM(BlockContext, TxContext, ibs, chainConfig, vm.Config{Debug: true, Tracer: tracer}) - if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.GetGas()).AddBlobGas(tx.GetBlobGas()), true /* refunds */, false /* gasBailout */); err != nil { + vmenv := vm.NewEVM(BlockContext, TxContext, ibs, chainConfig, vm.Config{Debug: true, Tracer: tracer.Tracer().Hooks}) + + if tracer != nil && tracer.Tracer().Hooks.OnTxStart != nil { + tracer.Tracer().Hooks.OnTxStart(vmenv.GetVMContext(), tx, msg.From()) + } + res, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.GetGas()).AddBlobGas(tx.GetBlobGas()), true /* refunds */, false /* gasBailout */) + if err != nil { + if tracer != nil && tracer.Tracer().Hooks.OnTxEnd != nil { + tracer.Tracer().Hooks.OnTxEnd(nil, err) + } return false, nil, err } + + if tracer != nil && tracer.Tracer().Hooks.OnTxEnd != nil { + tracer.Tracer().OnTxEnd(&types.Receipt{GasUsed: res.UsedGas}, nil) + } _ = ibs.FinalizeTx(rules, cachedWriter) if tracer.Found { diff --git a/turbo/jsonrpc/otterscan_trace_contract_creator.go b/turbo/jsonrpc/otterscan_trace_contract_creator.go index 3f0bb4b6a36..f57c0d23bcb 100644 --- a/turbo/jsonrpc/otterscan_trace_contract_creator.go +++ b/turbo/jsonrpc/otterscan_trace_contract_creator.go @@ -6,8 +6,10 @@ import ( "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/eth/tracers" ) type CreateTracer struct { @@ -27,6 +29,14 @@ func NewCreateTracer(ctx context.Context, target common.Address) *CreateTracer { } } +func (t *CreateTracer) Tracer() *tracers.Tracer { + return &tracers.Tracer{ + Hooks: &tracing.Hooks{ + OnEnter: t.OnEnter, + }, + } +} + func (t *CreateTracer) SetTransaction(tx types.Transaction) { t.Tx = tx } @@ -50,10 +60,6 @@ func (t *CreateTracer) captureStartOrEnter(from, to common.Address, create bool) t.Creator = from } -func (t *CreateTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { - t.captureStartOrEnter(from, to, create) -} - -func (t *CreateTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { - t.captureStartOrEnter(from, to, create) +func (t *CreateTracer) OnEnter(depth int, typ byte, from common.Address, to common.Address, precompile bool, input []byte, gas uint64, value *uint256.Int, code []byte) { + t.captureStartOrEnter(from, to, vm.OpCode(typ) == vm.CREATE) } diff --git a/turbo/jsonrpc/otterscan_trace_operations.go b/turbo/jsonrpc/otterscan_trace_operations.go index fbdf1fb600e..3dd95cf3129 100644 --- a/turbo/jsonrpc/otterscan_trace_operations.go +++ b/turbo/jsonrpc/otterscan_trace_operations.go @@ -2,13 +2,16 @@ package jsonrpc import ( "context" + "encoding/json" "github.com/ledgerwatch/erigon-lib/common/hexutil" "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/eth/tracers" ) type OperationType int @@ -28,7 +31,6 @@ type InternalOperation struct { } type OperationsTracer struct { - DefaultTracer ctx context.Context Results []*InternalOperation } @@ -40,18 +42,34 @@ func NewOperationsTracer(ctx context.Context) *OperationsTracer { } } -func (t *OperationsTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { - if typ == vm.CALL && value.Uint64() != 0 { +func (t *OperationsTracer) Tracer() *tracers.Tracer { + return &tracers.Tracer{ + Hooks: &tracing.Hooks{ + OnEnter: t.OnEnter, + }, + GetResult: t.GetResult, + Stop: t.Stop, + } +} + +func (t *OperationsTracer) OnEnter(depth int, typ byte, from common.Address, to common.Address, precompile bool, input []byte, gas uint64, value *uint256.Int, code []byte) { + if vm.OpCode(typ) == vm.CALL && value.Uint64() != 0 { t.Results = append(t.Results, &InternalOperation{OP_TRANSFER, from, to, (*hexutil.Big)(value.ToBig())}) return } - if typ == vm.CREATE { + if vm.OpCode(typ) == vm.CREATE { t.Results = append(t.Results, &InternalOperation{OP_CREATE, from, to, (*hexutil.Big)(value.ToBig())}) } - if typ == vm.CREATE2 { + if vm.OpCode(typ) == vm.CREATE2 { t.Results = append(t.Results, &InternalOperation{OP_CREATE2, from, to, (*hexutil.Big)(value.ToBig())}) } - if typ == vm.SELFDESTRUCT { + if vm.OpCode(typ) == vm.SELFDESTRUCT { t.Results = append(t.Results, &InternalOperation{OP_SELF_DESTRUCT, from, to, (*hexutil.Big)(value.ToBig())}) } } + +func (t *OperationsTracer) GetResult() (json.RawMessage, error) { + return json.RawMessage{}, nil +} + +func (t *OperationsTracer) Stop(err error) {} diff --git a/turbo/jsonrpc/otterscan_trace_touch.go b/turbo/jsonrpc/otterscan_trace_touch.go index 17fddfdd9ef..32a09415156 100644 --- a/turbo/jsonrpc/otterscan_trace_touch.go +++ b/turbo/jsonrpc/otterscan_trace_touch.go @@ -6,7 +6,8 @@ import ( "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/common" - "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/core/tracing" + "github.com/ledgerwatch/erigon/eth/tracers" ) type TouchTracer struct { @@ -21,16 +22,20 @@ func NewTouchTracer(searchAddr common.Address) *TouchTracer { } } +func (t *TouchTracer) Tracer() *tracers.Tracer { + return &tracers.Tracer{ + Hooks: &tracing.Hooks{ + OnEnter: t.OnEnter, + }, + } +} + func (t *TouchTracer) captureStartOrEnter(from, to common.Address) { if !t.Found && (bytes.Equal(t.searchAddr.Bytes(), from.Bytes()) || bytes.Equal(t.searchAddr.Bytes(), to.Bytes())) { t.Found = true } } -func (t *TouchTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { - t.captureStartOrEnter(from, to) -} - -func (t *TouchTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { +func (t *TouchTracer) OnEnter(depth int, typ byte, from common.Address, to common.Address, precompile bool, input []byte, gas uint64, value *uint256.Int, code []byte) { t.captureStartOrEnter(from, to) } diff --git a/turbo/jsonrpc/otterscan_trace_transaction.go b/turbo/jsonrpc/otterscan_trace_transaction.go index c6df5d79f67..0f93627696f 100644 --- a/turbo/jsonrpc/otterscan_trace_transaction.go +++ b/turbo/jsonrpc/otterscan_trace_transaction.go @@ -11,7 +11,9 @@ import ( "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/hexutility" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/eth/tracers" ) func (api *OtterscanAPIImpl) TraceTransaction(ctx context.Context, hash common.Hash) ([]*TraceEntry, error) { @@ -22,7 +24,7 @@ func (api *OtterscanAPIImpl) TraceTransaction(ctx context.Context, hash common.H defer tx.Rollback() tracer := NewTransactionTracer(ctx) - if _, err := api.runTracer(ctx, tx, hash, tracer); err != nil { + if _, err := api.runTracer(ctx, tx, hash, tracer.Tracer()); err != nil { return nil, err } @@ -55,6 +57,15 @@ func NewTransactionTracer(ctx context.Context) *TransactionTracer { } } +func (t *TransactionTracer) Tracer() *tracers.Tracer { + return &tracers.Tracer{ + Hooks: &tracing.Hooks{ + OnEnter: t.OnEnter, + OnExit: t.OnExit, + }, + } +} + func (t *TransactionTracer) captureStartOrEnter(typ vm.OpCode, from, to common.Address, precompile bool, input []byte, value *uint256.Int) { inputCopy := make([]byte, len(input)) copy(inputCopy, input) @@ -94,18 +105,18 @@ func (t *TransactionTracer) captureStartOrEnter(typ vm.OpCode, from, to common.A t.stack = append(t.stack, entry) } -func (t *TransactionTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { +func (t *TransactionTracer) CaptureStart(from common.Address, to common.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { t.depth = 0 t.captureStartOrEnter(vm.CALL, from, to, precompile, input, value) } -func (t *TransactionTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { - t.depth++ - t.captureStartOrEnter(typ, from, to, precompile, input, value) +func (t *TransactionTracer) OnEnter(depth int, typ byte, from common.Address, to common.Address, precompile bool, input []byte, gas uint64, value *uint256.Int, code []byte) { + t.depth = depth + t.captureStartOrEnter(vm.OpCode(typ), from, to, precompile, input, value) } -func (t *TransactionTracer) captureEndOrExit(output []byte, usedGas uint64, err error) { - t.depth-- +func (t *TransactionTracer) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) { + t.depth = depth lastIdx := len(t.stack) - 1 pop := t.stack[lastIdx] @@ -115,11 +126,3 @@ func (t *TransactionTracer) captureEndOrExit(output []byte, usedGas uint64, err copy(outputCopy, output) pop.Output = outputCopy } - -func (t *TransactionTracer) CaptureExit(output []byte, usedGas uint64, err error) { - t.captureEndOrExit(output, usedGas, err) -} - -func (t *TransactionTracer) CaptureEnd(output []byte, usedGas uint64, err error) { - t.captureEndOrExit(output, usedGas, err) -} diff --git a/turbo/jsonrpc/otterscan_transaction_by_sender_and_nonce.go b/turbo/jsonrpc/otterscan_transaction_by_sender_and_nonce.go index b0fee2d20a8..321aec8464b 100644 --- a/turbo/jsonrpc/otterscan_transaction_by_sender_and_nonce.go +++ b/turbo/jsonrpc/otterscan_transaction_by_sender_and_nonce.go @@ -1,15 +1,18 @@ package jsonrpc import ( + "bytes" "context" "fmt" "sort" + "github.com/RoaringBitmap/roaring/roaring64" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/cmp" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/order" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" + "github.com/ledgerwatch/erigon-lib/kv/temporal/historyv2" "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon/core/types/accounts" @@ -23,79 +26,220 @@ func (api *OtterscanAPIImpl) GetTransactionBySenderAndNonce(ctx context.Context, defer tx.Rollback() var acc accounts.Account - ttx := tx.(kv.TemporalTx) - it, err := ttx.IndexRange(kv.AccountsHistoryIdx, addr[:], -1, -1, order.Asc, kv.Unlim) - if err != nil { - return nil, err - } - - var prevTxnID, nextTxnID uint64 - for i := 0; it.HasNext(); i++ { - txnID, err := it.Next() + if api.historyV3(tx) { + ttx := tx.(kv.TemporalTx) + it, err := ttx.IndexRange(kv.AccountsHistoryIdx, addr[:], -1, -1, order.Asc, kv.Unlim) if err != nil { return nil, err } - if i%4096 != 0 { // probe history periodically, not on every change - nextTxnID = txnID - continue + var prevTxnID, nextTxnID uint64 + for i := 0; it.HasNext(); i++ { + txnID, err := it.Next() + if err != nil { + return nil, err + } + + if i%4096 != 0 { // probe history periodically, not on every change + nextTxnID = txnID + continue + } + + v, ok, err := ttx.HistoryGet(kv.AccountsHistory, addr[:], txnID) + if err != nil { + log.Error("Unexpected error, couldn't find changeset", "txNum", i, "addr", addr) + return nil, err + } + if !ok { + err = fmt.Errorf("couldn't find history txnID=%v addr=%v", txnID, addr) + log.Error("[rpc] Unexpected error", "err", err) + return nil, err + } + + if len(v) == 0 { // creation, but maybe not our Incarnation + prevTxnID = txnID + continue + } + + if err := accounts.DeserialiseV3(&acc, v); err != nil { + return nil, err + } + // Desired nonce was found in this chunk + if acc.Nonce > nonce { + break + } + prevTxnID = txnID } - v, ok, err := ttx.HistorySeek(kv.AccountsHistory, addr[:], txnID) + // The sort.Search function finds the first block where the incarnation has + // changed to the desired one, so we get the previous block from the bitmap; + // however if the creationTxnID block is already the first one from the bitmap, it means + // the block we want is the max block from the previous shard. + var creationTxnID uint64 + var searchErr error + + if nextTxnID == 0 { + nextTxnID = prevTxnID + 1 + } + // Binary search in [prevTxnID, nextTxnID] range; get first block where desired incarnation appears + // can be replaced by full-scan over ttx.HistoryRange([prevTxnID, nextTxnID])? + idx := sort.Search(int(nextTxnID-prevTxnID), func(i int) bool { + txnID := uint64(i) + prevTxnID + v, ok, err := ttx.HistoryGet(kv.AccountsHistory, addr[:], txnID) + if err != nil { + log.Error("[rpc] Unexpected error, couldn't find changeset", "txNum", i, "addr", addr) + panic(err) + } + if !ok { + return false + } + if len(v) == 0 { + creationTxnID = cmp.Max(creationTxnID, txnID) + return false + } + + if err := accounts.DeserialiseV3(&acc, v); err != nil { + searchErr = err + return false + } + + // Since the state contains the nonce BEFORE the block changes, we look for + // the block when the nonce changed to be > the desired once, which means the + // previous history block contains the actual change; it may contain multiple + // nonce changes. + if acc.Nonce <= nonce { + creationTxnID = cmp.Max(creationTxnID, txnID) + return false + } + return true + }) + if searchErr != nil { + return nil, searchErr + } + if creationTxnID == 0 { + return nil, fmt.Errorf("binary search between %d-%d doesn't find anything", nextTxnID, prevTxnID) + } + ok, bn, err := rawdbv3.TxNums.FindBlockNum(tx, creationTxnID) if err != nil { - log.Error("Unexpected error, couldn't find changeset", "txNum", i, "addr", addr) return nil, err } if !ok { - err = fmt.Errorf("couldn't find history txnID=%v addr=%v", txnID, addr) - log.Error("[rpc] Unexpected error", "err", err) + return nil, fmt.Errorf("block not found by txnID=%d", creationTxnID) + } + minTxNum, err := rawdbv3.TxNums.Min(tx, bn) + if err != nil { return nil, err } + txIndex := int(creationTxnID) - int(minTxNum) - 1 /* system-tx */ + if txIndex == -1 { + txIndex = (idx + int(prevTxnID)) - int(minTxNum) - 1 + } + txn, err := api._txnReader.TxnByIdxInBlock(ctx, ttx, bn, txIndex) + if err != nil { + return nil, err + } + if txn == nil { + log.Warn("[rpc] tx is nil", "blockNum", bn, "txIndex", txIndex) + return nil, nil + } + found := txn.GetNonce() == nonce + if !found { + return nil, nil + } + txHash := txn.Hash() + return &txHash, nil + } - if len(v) == 0 { // creation, but maybe not our Incarnation - prevTxnID = txnID - continue + accHistoryC, err := tx.Cursor(kv.E2AccountsHistory) + if err != nil { + return nil, err + } + defer accHistoryC.Close() + + accChangesC, err := tx.CursorDupSort(kv.AccountChangeSet) + if err != nil { + return nil, err + } + defer accChangesC.Close() + + // Locate the chunk where the nonce happens + acs := historyv2.Mapper[kv.AccountChangeSet] + k, v, err := accHistoryC.Seek(acs.IndexChunkKey(addr.Bytes(), 0)) + if err != nil { + return nil, err + } + + bitmap := roaring64.New() + maxBlPrevChunk := uint64(0) + + for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: } - if err := accounts.DeserialiseV3(&acc, v); err != nil { + if k == nil || !bytes.HasPrefix(k, addr.Bytes()) { + // Check plain state + data, err := tx.GetOne(kv.PlainState, addr.Bytes()) + if err != nil { + return nil, err + } + if err := acc.DecodeForStorage(data); err != nil { + return nil, err + } + + // Nonce changed in plain state, so it means the last block of last chunk + // contains the actual nonce change + if acc.Nonce > nonce { + break + } + // Not found; asked for nonce still not used + return nil, nil + } + + // Inspect block changeset + if _, err := bitmap.ReadFrom(bytes.NewReader(v)); err != nil { + return nil, err + } + maxBl := bitmap.Maximum() + data, err := acs.Find(accChangesC, maxBl, addr.Bytes()) + if err != nil { + return nil, err + } + if err := acc.DecodeForStorage(data); err != nil { return nil, err } + // Desired nonce was found in this chunk if acc.Nonce > nonce { break } - prevTxnID = txnID - } - // The sort.Search function finds the first block where the incarnation has - // changed to the desired one, so we get the previous block from the bitmap; - // however if the creationTxnID block is already the first one from the bitmap, it means - // the block we want is the max block from the previous shard. - var creationTxnID uint64 - var searchErr error - - if nextTxnID == 0 { - nextTxnID = prevTxnID + 1 - } - // Binary search in [prevTxnID, nextTxnID] range; get first block where desired incarnation appears - // can be replaced by full-scan over ttx.HistoryRange([prevTxnID, nextTxnID])? - idx := sort.Search(int(nextTxnID-prevTxnID), func(i int) bool { - txnID := uint64(i) + prevTxnID - v, ok, err := ttx.HistorySeek(kv.AccountsHistory, addr[:], txnID) + maxBlPrevChunk = maxBl + k, v, err = accHistoryC.Next() if err != nil { - log.Error("[rpc] Unexpected error, couldn't find changeset", "txNum", i, "addr", addr) - panic(err) + return nil, err } - if !ok { + } + + // Locate the exact block inside chunk when the nonce changed + blocks := bitmap.ToArray() + var errSearch error = nil + idx := sort.Search(len(blocks), func(i int) bool { + if errSearch != nil { return false } - if len(v) == 0 { - creationTxnID = cmp.Max(creationTxnID, txnID) + + // Locate the block changeset + data, err := acs.Find(accChangesC, blocks[i], addr.Bytes()) + if err != nil { + errSearch = err return false } - if err := accounts.DeserialiseV3(&acc, v); err != nil { - searchErr = err + if err := acc.DecodeForStorage(data); err != nil { + errSearch = err return false } @@ -103,46 +247,27 @@ func (api *OtterscanAPIImpl) GetTransactionBySenderAndNonce(ctx context.Context, // the block when the nonce changed to be > the desired once, which means the // previous history block contains the actual change; it may contain multiple // nonce changes. - if acc.Nonce <= nonce { - creationTxnID = cmp.Max(creationTxnID, txnID) - return false - } - return true + return acc.Nonce > nonce }) - if searchErr != nil { - return nil, searchErr - } - if creationTxnID == 0 { - return nil, fmt.Errorf("binary search between %d-%d doesn't find anything", nextTxnID, prevTxnID) - } - ok, bn, err := rawdbv3.TxNums.FindBlockNum(tx, creationTxnID) - if err != nil { - return nil, err - } - if !ok { - return nil, fmt.Errorf("block not found by txnID=%d", creationTxnID) - } - minTxNum, err := rawdbv3.TxNums.Min(tx, bn) - if err != nil { - return nil, err + if errSearch != nil { + return nil, errSearch } - txIndex := int(creationTxnID) - int(minTxNum) - 1 /* system-tx */ - if txIndex == -1 { - txIndex = (idx + int(prevTxnID)) - int(minTxNum) - 1 + + // Since the changeset contains the state BEFORE the change, we inspect + // the block before the one we found; if it is the first block inside the chunk, + // we use the last block from prev chunk + nonceBlock := maxBlPrevChunk + if idx > 0 { + nonceBlock = blocks[idx-1] } - txn, err := api._txnReader.TxnByIdxInBlock(ctx, ttx, bn, txIndex) + found, txHash, err := api.findNonce(ctx, tx, addr, nonce, nonceBlock) if err != nil { return nil, err } - if txn == nil { - log.Warn("[rpc] tx is nil", "blockNum", bn, "txIndex", txIndex) - return nil, nil - } - found := txn.GetNonce() == nonce if !found { return nil, nil } - txHash := txn.Hash() + return &txHash, nil } diff --git a/turbo/jsonrpc/overlay_api.go b/turbo/jsonrpc/overlay_api.go index 86290ca7717..8bef2d812a7 100644 --- a/turbo/jsonrpc/overlay_api.go +++ b/turbo/jsonrpc/overlay_api.go @@ -135,7 +135,7 @@ func (api *OverlayAPIImpl) CallConstructor(ctx context.Context, address common.A replayTransactions = block.Transactions()[:transactionIndex] - stateReader, err := rpchelper.CreateStateReader(ctx, tx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blockNum-1)), 0, api.filters, api.stateCache, chainConfig.ChainName) + stateReader, err := rpchelper.CreateStateReader(ctx, tx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blockNum-1)), 0, api.filters, api.stateCache, api.historyV3(tx), chainConfig.ChainName) if err != nil { return nil, err } @@ -218,8 +218,9 @@ func (api *OverlayAPIImpl) CallConstructor(ctx context.Context, address common.A } txCtx = core.NewEVMTxContext(msg) ct := OverlayCreateTracer{contractAddress: address, code: *code, gasCap: api.GasCap} - evm = vm.NewEVM(blockCtx, txCtx, evm.IntraBlockState(), chainConfig, vm.Config{Debug: true, Tracer: &ct}) + evm = vm.NewEVM(blockCtx, txCtx, evm.IntraBlockState(), chainConfig, vm.Config{Debug: true, Tracer: ct.Tracer().Hooks}) + ct.evm = evm // Execute the transaction message _, err = core.ApplyMessage(evm, msg, gp, true /* refunds */, true /* gasBailout */) if ct.err != nil { @@ -309,7 +310,7 @@ func (api *OverlayAPIImpl) GetLogs(ctx context.Context, crit filters.FilterCrite } // try to recompute the state - stateReader, err := rpchelper.CreateStateReader(ctx, tx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blockNumber-1)), 0, api.filters, api.stateCache, chainConfig.ChainName) + stateReader, err := rpchelper.CreateStateReader(ctx, tx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blockNumber-1)), 0, api.filters, api.stateCache, api.historyV3(tx), chainConfig.ChainName) if err != nil { results[task.idx] = &blockReplayResult{BlockNumber: task.BlockNumber, Error: err.Error()} continue diff --git a/turbo/jsonrpc/overlay_create_tracer.go b/turbo/jsonrpc/overlay_create_tracer.go index 820b9e9fb5b..a00847fdb52 100644 --- a/turbo/jsonrpc/overlay_create_tracer.go +++ b/turbo/jsonrpc/overlay_create_tracer.go @@ -3,7 +3,9 @@ package jsonrpc import ( "github.com/holiman/uint256" libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/eth/tracers" ) type OverlayCreateTracer struct { @@ -16,25 +18,23 @@ type OverlayCreateTracer struct { evm *vm.EVM } -// Transaction level -func (ct *OverlayCreateTracer) CaptureTxStart(gasLimit uint64) {} -func (ct *OverlayCreateTracer) CaptureTxEnd(restGas uint64) {} - -// Top call frame -func (ct *OverlayCreateTracer) CaptureStart(env *vm.EVM, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { - ct.evm = env +func (ct *OverlayCreateTracer) Tracer() *tracers.Tracer { + return &tracers.Tracer{ + Hooks: &tracing.Hooks{ + OnTxStart: nil, + OnEnter: ct.OnEnter, + }, + } } -func (ct *OverlayCreateTracer) CaptureEnd(output []byte, usedGas uint64, err error) {} -// Rest of the frames -func (ct *OverlayCreateTracer) CaptureEnter(typ vm.OpCode, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { +func (ct *OverlayCreateTracer) OnEnter(depth int, typ byte, from libcommon.Address, to libcommon.Address, precompile bool, input []byte, gas uint64, value *uint256.Int, code []byte) { if ct.isCapturing { return } - if create && to == ct.contractAddress { + if vm.OpCode(typ) == vm.CREATE && to == ct.contractAddress { ct.isCapturing = true - _, _, _, err := ct.evm.OverlayCreate(vm.AccountRef(from), vm.NewCodeAndHash(ct.code), ct.gasCap, value, to, typ, true /* incrementNonce */) + _, _, _, err := ct.evm.OverlayCreate(vm.AccountRef(from), vm.NewCodeAndHash(ct.code), ct.gasCap, value, to, vm.OpCode(typ), true /* incrementNonce */) if err != nil { ct.err = err } else { @@ -42,10 +42,3 @@ func (ct *OverlayCreateTracer) CaptureEnter(typ vm.OpCode, from libcommon.Addres } } } -func (ct *OverlayCreateTracer) CaptureExit(output []byte, usedGas uint64, err error) {} - -// Opcode level -func (ct *OverlayCreateTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { -} -func (ct *OverlayCreateTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { -} diff --git a/turbo/jsonrpc/parity_api.go b/turbo/jsonrpc/parity_api.go index f58b0e98422..b1ef45f50db 100644 --- a/turbo/jsonrpc/parity_api.go +++ b/turbo/jsonrpc/parity_api.go @@ -2,10 +2,12 @@ package jsonrpc import ( "context" + "encoding/binary" "fmt" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/hexutility" + "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/order" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" @@ -50,36 +52,64 @@ func (api *ParityAPIImpl) ListStorageKeys(ctx context.Context, account libcommon return nil, fmt.Errorf("listStorageKeys cannot open tx: %w", err) } defer tx.Rollback() - a, err := rpchelper.NewLatestStateReader(tx).ReadAccountData(account) + a, err := rpchelper.NewLatestStateReader(tx, api.historyV3(tx)).ReadAccountData(account) if err != nil { return nil, err } else if a == nil { return nil, fmt.Errorf("acc not found") } - bn := rawdb.ReadCurrentBlockNumber(tx) - minTxNum, err := rawdbv3.TxNums.Min(tx, *bn) + if api.historyV3(tx) { + bn := rawdb.ReadCurrentBlockNumber(tx) + minTxNum, err := rawdbv3.TxNums.Min(tx, *bn) + if err != nil { + return nil, err + } + + from := account[:] + if offset != nil { + from = append(from, *offset...) + } + to, _ := kv.NextSubtree(account[:]) + r, err := tx.(kv.TemporalTx).DomainRange(kv.StorageDomain, from, to, minTxNum, order.Asc, quantity) + if err != nil { + return nil, err + } + defer r.Close() + for r.HasNext() { + k, _, err := r.Next() + if err != nil { + return nil, err + } + keys = append(keys, libcommon.CopyBytes(k[20:])) + } + return keys, nil + } + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, a.GetIncarnation()) + seekBytes := append(account.Bytes(), b...) + + c, err := tx.CursorDupSort(kv.PlainState) if err != nil { return nil, err } - - from := account[:] + defer c.Close() + var v []byte + var seekVal []byte if offset != nil { - from = append(from, *offset...) + seekVal = *offset + } + + for v, err = c.SeekBothRange(seekBytes, seekVal); v != nil && len(keys) != quantity && err == nil; _, v, err = c.NextDup() { + if len(v) > length.Hash { + keys = append(keys, v[:length.Hash]) + } else { + keys = append(keys, v) + } } - to, _ := kv.NextSubtree(account[:]) - r, err := tx.(kv.TemporalTx).DomainRange(kv.StorageDomain, from, to, minTxNum, order.Asc, quantity) if err != nil { return nil, err } - defer r.Close() - for r.HasNext() { - k, _, err := r.Next() - if err != nil { - return nil, err - } - keys = append(keys, libcommon.CopyBytes(k[20:])) - } return keys, nil } diff --git a/turbo/jsonrpc/trace_adhoc.go b/turbo/jsonrpc/trace_adhoc.go index 6350d87449e..eb21fda8177 100644 --- a/turbo/jsonrpc/trace_adhoc.go +++ b/turbo/jsonrpc/trace_adhoc.go @@ -19,10 +19,12 @@ import ( math2 "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/polygon/tracer" + "github.com/ledgerwatch/erigon/eth/tracers" + ptracer "github.com/ledgerwatch/erigon/polygon/tracer" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/shards" @@ -224,6 +226,61 @@ func (args *TraceCallParam) ToMessage(globalGasCap uint64, baseFee *uint256.Int) return msg, nil } +// ToTransaction converts CallArgs to the Transaction type used by the core evm +func (args *TraceCallParam) ToTransaction(globalGasCap uint64, baseFee *uint256.Int) (types.Transaction, error) { + msg, err := args.ToMessage(globalGasCap, baseFee) + if err != nil { + return nil, err + } + + var tx types.Transaction + switch { + case args.MaxFeePerGas != nil: + al := types2.AccessList{} + if args.AccessList != nil { + al = *args.AccessList + } + tx = &types.DynamicFeeTransaction{ + CommonTx: types.CommonTx{ + Nonce: msg.Nonce(), + Gas: msg.Gas(), + To: args.To, + Value: msg.Value(), + Data: msg.Data(), + }, + FeeCap: msg.FeeCap(), + Tip: msg.Tip(), + AccessList: al, + } + case args.AccessList != nil: + tx = &types.AccessListTx{ + LegacyTx: types.LegacyTx{ + CommonTx: types.CommonTx{ + Nonce: msg.Nonce(), + Gas: msg.Gas(), + To: args.To, + Value: msg.Value(), + Data: msg.Data(), + }, + GasPrice: msg.GasPrice(), + }, + AccessList: *args.AccessList, + } + default: + tx = &types.LegacyTx{ + CommonTx: types.CommonTx{ + Nonce: msg.Nonce(), + Gas: msg.Gas(), + To: args.To, + Value: msg.Value(), + Data: msg.Data(), + }, + GasPrice: msg.GasPrice(), + } + } + return tx, nil +} + // OpenEthereum-style tracer type OeTracer struct { r *TraceCallResult @@ -242,9 +299,17 @@ type OeTracer struct { idx []string // Prefix for the "idx" inside operations, for easier navigation } -func (ot *OeTracer) CaptureTxStart(gasLimit uint64) {} - -func (ot *OeTracer) CaptureTxEnd(restGas uint64) {} +func (ot *OeTracer) Tracer() *tracers.Tracer { + return &tracers.Tracer{ + Hooks: &tracing.Hooks{ + OnEnter: ot.OnEnter, + OnExit: ot.OnExit, + OnOpcode: ot.OnOpcode, + }, + GetResult: ot.GetResult, + Stop: ot.Stop, + } +} func (ot *OeTracer) captureStartOrEnter(deep bool, typ vm.OpCode, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { //fmt.Printf("captureStartOrEnter deep %t, typ %s, from %x, to %x, create %t, input %x, gas %d, value %d, precompile %t\n", deep, typ.String(), from, to, create, input, gas, value, precompile) @@ -352,12 +417,8 @@ func (ot *OeTracer) captureStartOrEnter(deep bool, typ vm.OpCode, from libcommon ot.traceStack = append(ot.traceStack, trace) } -func (ot *OeTracer) CaptureStart(env *vm.EVM, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { - ot.captureStartOrEnter(false /* deep */, vm.CALL, from, to, precompile, create, input, gas, value, code) -} - -func (ot *OeTracer) CaptureEnter(typ vm.OpCode, from libcommon.Address, to libcommon.Address, precompile bool, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) { - ot.captureStartOrEnter(true /* deep */, typ, from, to, precompile, create, input, gas, value, code) +func (ot *OeTracer) OnEnter(depth int, typ byte, from libcommon.Address, to libcommon.Address, precompile bool, input []byte, gas uint64, value *uint256.Int, code []byte) { + ot.captureStartOrEnter(depth != 0 /* deep */, vm.OpCode(typ), from, to, precompile, vm.OpCode(typ) == vm.CREATE, input, gas, value, code) } func (ot *OeTracer) captureEndOrExit(deep bool, output []byte, usedGas uint64, err error) { @@ -429,17 +490,13 @@ func (ot *OeTracer) captureEndOrExit(deep bool, output []byte, usedGas uint64, e } } -func (ot *OeTracer) CaptureEnd(output []byte, usedGas uint64, err error) { - ot.captureEndOrExit(false /* deep */, output, usedGas, err) -} - -func (ot *OeTracer) CaptureExit(output []byte, usedGas uint64, err error) { - ot.captureEndOrExit(true /* deep */, output, usedGas, err) +func (ot *OeTracer) OnExit(depth int, output []byte, gasUsed uint64, err error, reverted bool) { + ot.captureEndOrExit(depth != 0 /* deep */, output, gasUsed, err) } -func (ot *OeTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, opDepth int, err error) { - memory := scope.Memory - st := scope.Stack +func (ot *OeTracer) OnOpcode(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, rData []byte, depth int, err error) { + memory := scope.MemoryData() + st := scope.StackData() if ot.r.VmTrace != nil { var vmTrace *VmTrace @@ -468,8 +525,8 @@ func (ot *OeTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scop showStack = 1 } for i := showStack - 1; i >= 0; i-- { - if st.Len() > i { - ot.lastVmOp.Ex.Push = append(ot.lastVmOp.Ex.Push, st.Back(i).String()) + if len(st) > i { + ot.lastVmOp.Ex.Push = append(ot.lastVmOp.Ex.Push, tracers.StackBack(st, i).String()) } } // Set the "mem" of the last operation @@ -479,7 +536,8 @@ func (ot *OeTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scop setMem = true } if setMem && ot.lastMemLen > 0 { - cpy := memory.GetCopy(int64(ot.lastMemOff), int64(ot.lastMemLen)) + // TODO: error handling + cpy, _ := tracers.GetMemoryCopyPadded(memory, int64(ot.lastMemOff), int64(ot.lastMemLen)) if len(cpy) == 0 { cpy = make([]byte, ot.lastMemLen) } @@ -488,13 +546,13 @@ func (ot *OeTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scop } if ot.lastOffStack != nil { ot.lastOffStack.Ex.Used = int(gas) - if st.Len() > 0 { - ot.lastOffStack.Ex.Push = []string{st.Back(0).String()} + if len(st) > 0 { + ot.lastOffStack.Ex.Push = []string{tracers.StackBack(st, 0).String()} } else { ot.lastOffStack.Ex.Push = []string{} } if ot.lastMemLen > 0 && memory != nil { - cpy := memory.GetCopy(int64(ot.lastMemOff), int64(ot.lastMemLen)) + cpy, _ := tracers.GetMemoryCopyPadded(memory, int64(ot.lastMemOff), int64(ot.lastMemLen)) if len(cpy) == 0 { cpy = make([]byte, ot.lastMemLen) } @@ -502,7 +560,7 @@ func (ot *OeTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scop } ot.lastOffStack = nil } - if ot.lastOp == vm.STOP && op == vm.STOP && len(ot.vmOpStack) == 0 { + if ot.lastOp == vm.STOP && vm.OpCode(op) == vm.STOP && len(ot.vmOpStack) == 0 { // Looks like OE is "optimising away" the second STOP return } @@ -516,52 +574,52 @@ func (ot *OeTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scop } ot.lastVmOp.Idx = fmt.Sprintf("%s%d", sb.String(), len(vmTrace.Ops)-1) } - ot.lastOp = op + ot.lastOp = vm.OpCode(op) ot.lastVmOp.Cost = int(cost) ot.lastVmOp.Pc = int(pc) ot.lastVmOp.Ex.Push = []string{} ot.lastVmOp.Ex.Used = int(gas) - int(cost) if !ot.compat { - ot.lastVmOp.Op = op.String() + ot.lastVmOp.Op = vm.OpCode(op).String() } - switch op { + switch vm.OpCode(op) { case vm.MSTORE, vm.MLOAD: - if st.Len() > 0 { - ot.lastMemOff = st.Back(0).Uint64() + if len(st) > 0 { + ot.lastMemOff = tracers.StackBack(st, 0).Uint64() ot.lastMemLen = 32 } case vm.MSTORE8: - if st.Len() > 0 { - ot.lastMemOff = st.Back(0).Uint64() + if len(st) > 0 { + ot.lastMemOff = tracers.StackBack(st, 0).Uint64() ot.lastMemLen = 1 } case vm.RETURNDATACOPY, vm.CALLDATACOPY, vm.CODECOPY: - if st.Len() > 2 { - ot.lastMemOff = st.Back(0).Uint64() - ot.lastMemLen = st.Back(2).Uint64() + if len(st) > 2 { + ot.lastMemOff = tracers.StackBack(st, 0).Uint64() + ot.lastMemLen = tracers.StackBack(st, 2).Uint64() } case vm.EXTCODECOPY: - if st.Len() > 3 { - ot.lastMemOff = st.Back(1).Uint64() - ot.lastMemLen = st.Back(3).Uint64() + if len(st) > 3 { + ot.lastMemOff = tracers.StackBack(st, 1).Uint64() + ot.lastMemLen = tracers.StackBack(st, 3).Uint64() } case vm.STATICCALL, vm.DELEGATECALL: - if st.Len() > 5 { - ot.memOffStack = append(ot.memOffStack, st.Back(4).Uint64()) - ot.memLenStack = append(ot.memLenStack, st.Back(5).Uint64()) + if len(st) > 5 { + ot.memOffStack = append(ot.memOffStack, tracers.StackBack(st, 4).Uint64()) + ot.memLenStack = append(ot.memLenStack, tracers.StackBack(st, 5).Uint64()) } case vm.CALL, vm.CALLCODE: - if st.Len() > 6 { - ot.memOffStack = append(ot.memOffStack, st.Back(5).Uint64()) - ot.memLenStack = append(ot.memLenStack, st.Back(6).Uint64()) + if len(st) > 6 { + ot.memOffStack = append(ot.memOffStack, tracers.StackBack(st, 5).Uint64()) + ot.memLenStack = append(ot.memLenStack, tracers.StackBack(st, 6).Uint64()) } case vm.CREATE, vm.CREATE2, vm.SELFDESTRUCT: // Effectively disable memory output ot.memOffStack = append(ot.memOffStack, 0) ot.memLenStack = append(ot.memLenStack, 0) case vm.SSTORE: - if st.Len() > 1 { - ot.lastVmOp.Ex.Store = &VmTraceStore{Key: st.Back(0).String(), Val: st.Back(1).String()} + if len(st) > 1 { + ot.lastVmOp.Ex.Store = &VmTraceStore{Key: tracers.StackBack(st, 0).String(), Val: tracers.StackBack(st, 1).String()} } } if ot.lastVmOp.Ex.Used < 0 { @@ -570,9 +628,12 @@ func (ot *OeTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scop } } -func (ot *OeTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, opDepth int, err error) { +func (ot *OeTracer) GetResult() (json.RawMessage, error) { + return json.RawMessage{}, nil } +func (ot *OeTracer) Stop(err error) {} + // Implements core/state/StateWriter to provide state diffs type StateDiff struct { sdMap map[libcommon.Address]*StateDiffAccount @@ -905,7 +966,7 @@ func (api *TraceAPIImpl) Call(ctx context.Context, args TraceCallParam, traceTyp return nil, err } - stateReader, err := rpchelper.CreateStateReader(ctx, tx, *blockNrOrHash, 0, api.filters, api.stateCache, chainConfig.ChainName) + stateReader, err := rpchelper.CreateStateReader(ctx, tx, *blockNrOrHash, 0, api.filters, api.stateCache, api.historyV3(tx), chainConfig.ChainName) if err != nil { return nil, err } @@ -972,13 +1033,18 @@ func (api *TraceAPIImpl) Call(ctx context.Context, args TraceCallParam, traceTyp return nil, err } + txn, err := args.ToTransaction(api.gasCap, baseFee) + if err != nil { + return nil, err + } + blockCtx := transactions.NewEVMBlockContext(engine, header, blockNrOrHash.RequireCanonical, tx, api._blockReader) txCtx := core.NewEVMTxContext(msg) blockCtx.GasLimit = math.MaxUint64 blockCtx.MaxGasLimit = true - evm := vm.NewEVM(blockCtx, txCtx, ibs, chainConfig, vm.Config{Debug: traceTypeTrace, Tracer: &ot}) + evm := vm.NewEVM(blockCtx, txCtx, ibs, chainConfig, vm.Config{Debug: traceTypeTrace, Tracer: ot.Tracer().Hooks}) // Wait for the context to be done and cancel the evm. Even if the // EVM has finished, cancelling may be done (repeatedly) @@ -990,10 +1056,22 @@ func (api *TraceAPIImpl) Call(ctx context.Context, args TraceCallParam, traceTyp gp := new(core.GasPool).AddGas(msg.Gas()).AddBlobGas(msg.BlobGas()) var execResult *core.ExecutionResult ibs.SetTxContext(libcommon.Hash{}, libcommon.Hash{}, 0) + ibs.SetLogger(ot.Tracer().Hooks) + + if ot.Tracer() != nil && ot.Tracer().Hooks.OnTxStart != nil { + ot.Tracer().Hooks.OnTxStart(evm.GetVMContext(), txn, msg.From()) + } execResult, err = core.ApplyMessage(evm, msg, gp, true /* refunds */, true /* gasBailout */) if err != nil { + if ot.Tracer() != nil && ot.Tracer().Hooks.OnTxEnd != nil { + ot.Tracer().Hooks.OnTxEnd(nil, err) + } return nil, err } + + if ot.Tracer() != nil && ot.Tracer().Hooks.OnTxEnd != nil { + ot.Tracer().OnTxEnd(&types.Receipt{GasUsed: execResult.UsedGas}, nil) + } traceResult.Output = libcommon.CopyBytes(execResult.ReturnData) if traceTypeStateDiff { sdMap := make(map[libcommon.Address]*StateDiffAccount) @@ -1090,17 +1168,23 @@ func (api *TraceAPIImpl) CallMany(ctx context.Context, calls json.RawMessage, pa } } msgs := make([]types.Message, len(callParams)) + txns := make([]types.Transaction, len(callParams)) for i, args := range callParams { msgs[i], err = args.ToMessage(api.gasCap, baseFee) if err != nil { return nil, fmt.Errorf("convert callParam to msg: %w", err) } + + txns[i], err = args.ToTransaction(api.gasCap, baseFee) + if err != nil { + return nil, fmt.Errorf("convert callParam to txn: %w", err) + } } - results, _, err := api.doCallMany(ctx, dbtx, msgs, callParams, parentNrOrHash, nil, true /* gasBailout */, -1 /* all tx indices */) + results, _, err := api.doCallMany(ctx, dbtx, txns, msgs, callParams, parentNrOrHash, nil, true /* gasBailout */, -1 /* all tx indices */) return results, err } -func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []types.Message, callParams []TraceCallParam, +func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, txns []types.Transaction, msgs []types.Message, callParams []TraceCallParam, parentNrOrHash *rpc.BlockNumberOrHash, header *types.Header, gasBailout bool, txIndexNeeded int, ) ([]*TraceCallResult, *state.IntraBlockState, error) { chainConfig, err := api.chainConfig(ctx, dbtx) @@ -1117,7 +1201,7 @@ func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []type if err != nil { return nil, nil, err } - stateReader, err := rpchelper.CreateStateReader(ctx, dbtx, *parentNrOrHash, 0, api.filters, api.stateCache, chainConfig.ChainName) + stateReader, err := rpchelper.CreateStateReader(ctx, dbtx, *parentNrOrHash, 0, api.filters, api.stateCache, api.historyV3(dbtx), chainConfig.ChainName) if err != nil { return nil, nil, err } @@ -1182,6 +1266,7 @@ func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []type traceResult := &TraceCallResult{Trace: []*ParityTrace{}, TransactionHash: args.txHash} vmConfig := vm.Config{} + var tracer *tracers.Tracer if (traceTypeTrace && (txIndexNeeded == -1 || txIndex == txIndexNeeded)) || traceTypeVmTrace { var ot OeTracer ot.compat = api.compatibility @@ -1194,7 +1279,8 @@ func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []type traceResult.VmTrace = &VmTrace{Ops: []*VmTraceOp{}} } vmConfig.Debug = true - vmConfig.Tracer = &ot + vmConfig.Tracer = ot.Tracer().Hooks + tracer = ot.Tracer() } blockCtx := transactions.NewEVMBlockContext(engine, header, parentNrOrHash.RequireCanonical, dbtx, api._blockReader) @@ -1228,7 +1314,7 @@ func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []type var execResult *core.ExecutionResult if args.isBorStateSyncTxn { txFinalized = true - execResult, err = tracer.TraceBorStateSyncTxnTraceAPI( + execResult, err = ptracer.TraceBorStateSyncTxnTraceAPI( ctx, dbtx, &vmConfig, @@ -1240,6 +1326,7 @@ func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []type header.Hash(), header.Number.Uint64(), header.Time, + tracer, ) } else { if args.txHash != nil { @@ -1248,15 +1335,27 @@ func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []type ibs.SetTxContext(libcommon.Hash{}, header.Hash(), txIndex) } + if tracer != nil { + ibs.SetLogger(tracer.Hooks) + } txCtx := core.NewEVMTxContext(msg) evm := vm.NewEVM(blockCtx, txCtx, ibs, chainConfig, vmConfig) gp := new(core.GasPool).AddGas(msg.Gas()).AddBlobGas(msg.BlobGas()) + if tracer != nil && tracer.Hooks.OnTxStart != nil { + tracer.Hooks.OnTxStart(evm.GetVMContext(), txns[txIndex], msg.From()) + } execResult, err = core.ApplyMessage(evm, msg, gp, true /* refunds */, gasBailout /* gasBailout */) } if err != nil { + if tracer != nil && tracer.Hooks.OnTxEnd != nil { + tracer.Hooks.OnTxEnd(nil, err) + } return nil, nil, fmt.Errorf("first run for txIndex %d error: %w", txIndex, err) } + if tracer != nil && tracer.Hooks.OnTxEnd != nil { + tracer.Hooks.OnTxEnd(&types.Receipt{GasUsed: execResult.UsedGas}, nil) + } chainRules := chainConfig.Rules(blockCtx.BlockNumber, blockCtx.Time) traceResult.Output = libcommon.CopyBytes(execResult.ReturnData) diff --git a/turbo/jsonrpc/trace_filtering.go b/turbo/jsonrpc/trace_filtering.go index 31f871e8465..1705d1f429c 100644 --- a/turbo/jsonrpc/trace_filtering.go +++ b/turbo/jsonrpc/trace_filtering.go @@ -8,12 +8,14 @@ import ( "github.com/ledgerwatch/erigon-lib/common/hexutil" "github.com/ledgerwatch/erigon/eth/consensuschain" + "github.com/RoaringBitmap/roaring/roaring64" jsoniter "github.com/json-iterator/go" "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/erigon-lib/chain" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/bitmapdb" "github.com/ledgerwatch/erigon-lib/kv/iter" "github.com/ledgerwatch/erigon-lib/kv/order" "github.com/ledgerwatch/erigon-lib/kv/rawdbv3" @@ -233,6 +235,59 @@ func (api *TraceAPIImpl) Block(ctx context.Context, blockNr rpc.BlockNumber, gas return out, err } +func traceFilterBitmaps(tx kv.Tx, req TraceFilterRequest, from, to uint64) (fromAddresses, toAddresses map[common.Address]struct{}, allBlocks *roaring64.Bitmap, err error) { + fromAddresses = make(map[common.Address]struct{}, len(req.FromAddress)) + toAddresses = make(map[common.Address]struct{}, len(req.ToAddress)) + allBlocks = roaring64.New() + var blocksTo roaring64.Bitmap + for _, addr := range req.FromAddress { + if addr != nil { + b, err := bitmapdb.Get64(tx, kv.CallFromIndex, addr.Bytes(), from, to) + if err != nil { + if errors.Is(err, ethdb.ErrKeyNotFound) { + continue + } + return nil, nil, nil, err + } + allBlocks.Or(b) + fromAddresses[*addr] = struct{}{} + } + } + + for _, addr := range req.ToAddress { + if addr != nil { + b, err := bitmapdb.Get64(tx, kv.CallToIndex, addr.Bytes(), from, to) + if err != nil { + if errors.Is(err, ethdb.ErrKeyNotFound) { + continue + } + return nil, nil, nil, err + } + blocksTo.Or(b) + toAddresses[*addr] = struct{}{} + } + } + + switch req.Mode { + case TraceFilterModeIntersection: + allBlocks.And(&blocksTo) + case TraceFilterModeUnion: + fallthrough + default: + allBlocks.Or(&blocksTo) + } + + // Special case - if no addresses specified, take all traces + if len(req.FromAddress) == 0 && len(req.ToAddress) == 0 { + allBlocks.AddRange(from, to) + } else { + allBlocks.RemoveRange(0, from) + allBlocks.RemoveRange(to, uint64(0x100000000)) + } + + return fromAddresses, toAddresses, allBlocks, nil +} + func traceFilterBitmapsV3(tx kv.TemporalTx, req TraceFilterRequest, from, to uint64) (fromAddresses, toAddresses map[common.Address]struct{}, allBlocks iter.U64, err error) { fromAddresses = make(map[common.Address]struct{}, len(req.FromAddress)) toAddresses = make(map[common.Address]struct{}, len(req.ToAddress)) @@ -285,7 +340,6 @@ func traceFilterBitmapsV3(tx kv.TemporalTx, req TraceFilterRequest, from, to uin // Pull blocks which have txs with matching address func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, gasBailOut *bool, stream *jsoniter.Stream) error { if gasBailOut == nil { - //nolint gasBailOut = new(bool) // false by default } dbtx, err1 := api.kv.BeginRo(ctx) @@ -312,10 +366,169 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, gas return fmt.Errorf("invalid parameters: fromBlock cannot be greater than toBlock") } - return api.filterV3(ctx, dbtx.(kv.TemporalTx), fromBlock, toBlock, req, stream, *gasBailOut) + if api.historyV3(dbtx) { + return api.filterV3(ctx, dbtx.(kv.TemporalTx), fromBlock, toBlock, req, stream) + } + toBlock++ //+1 because internally Erigon using semantic [from, to), but some RPC have different semantic + fromAddresses, toAddresses, allBlocks, err := traceFilterBitmaps(dbtx, req, fromBlock, toBlock) + if err != nil { + return err + } + + chainConfig, err := api.chainConfig(ctx, dbtx) + if err != nil { + return err + } + + var json = jsoniter.ConfigCompatibleWithStandardLibrary + stream.WriteArrayStart() + first := true + // Execute all transactions in picked blocks + + count := uint64(^uint(0)) // this just makes it easier to use below + if req.Count != nil { + count = *req.Count + } + after := uint64(0) // this just makes it easier to use below + if req.After != nil { + after = *req.After + } + nSeen := uint64(0) + nExported := uint64(0) + + it := allBlocks.Iterator() + for it.HasNext() { + b := it.Next() + // Extract transactions from block + block, bErr := api.blockByNumberWithSenders(ctx, dbtx, b) + if bErr != nil { + if first { + first = false + } else { + stream.WriteMore() + } + stream.WriteObjectStart() + rpc.HandleError(bErr, stream) + stream.WriteObjectEnd() + continue + } + if block == nil { + if first { + first = false + } else { + stream.WriteMore() + } + stream.WriteObjectStart() + rpc.HandleError(fmt.Errorf("could not find block %d", b), stream) + stream.WriteObjectEnd() + continue + } + + blockHash := block.Hash() + blockNumber := block.NumberU64() + signer := types.MakeSigner(chainConfig, b, block.Time()) + t, syscall, tErr := api.callManyTransactions(ctx, dbtx, block, []string{TraceTypeTrace}, -1 /* all tx indices */, *gasBailOut, signer, chainConfig) + if tErr != nil { + if first { + first = false + } else { + stream.WriteMore() + } + stream.WriteObjectStart() + rpc.HandleError(tErr, stream) + stream.WriteObjectEnd() + continue + } + isIntersectionMode := req.Mode == TraceFilterModeIntersection + includeAll := len(fromAddresses) == 0 && len(toAddresses) == 0 + for i, trace := range t { + txPosition := uint64(i) + // Check if transaction concerns any of the addresses we wanted + for _, pt := range trace.Trace { + if includeAll || filterTrace(pt, fromAddresses, toAddresses, isIntersectionMode) { + nSeen++ + pt.BlockHash = &blockHash + pt.BlockNumber = &blockNumber + pt.TransactionHash = trace.TransactionHash + pt.TransactionPosition = &txPosition + b, err := json.Marshal(pt) + if err != nil { + if first { + first = false + } else { + stream.WriteMore() + } + stream.WriteObjectStart() + rpc.HandleError(err, stream) + stream.WriteObjectEnd() + continue + } + if nSeen > after && nExported < count { + if first { + first = false + } else { + stream.WriteMore() + } + if _, err := stream.Write(b); err != nil { + return err + } + nExported++ + } + } + } + } + + rewards, err := api.engine().CalculateRewards(chainConfig, block.Header(), block.Uncles(), syscall) + if err != nil { + return err + } + + for _, r := range rewards { + if _, ok := toAddresses[r.Beneficiary]; ok || includeAll { + nSeen++ + var tr ParityTrace + rewardAction := &RewardTraceAction{} + rewardAction.Author = r.Beneficiary + rewardAction.RewardType = rewardKindToString(r.Kind) + rewardAction.Value.ToInt().Set(r.Amount.ToBig()) + tr.Action = rewardAction + tr.BlockHash = &common.Hash{} + copy(tr.BlockHash[:], block.Hash().Bytes()) + tr.BlockNumber = new(uint64) + *tr.BlockNumber = block.NumberU64() + tr.Type = "reward" // nolint: goconst + tr.TraceAddress = []int{} + b, err := json.Marshal(tr) + if err != nil { + if first { + first = false + } else { + stream.WriteMore() + } + stream.WriteObjectStart() + rpc.HandleError(err, stream) + stream.WriteObjectEnd() + continue + } + if nSeen > after && nExported < count { + if first { + first = false + } else { + stream.WriteMore() + } + if _, err := stream.Write(b); err != nil { + return err + } + nExported++ + } + } + } + } + stream.WriteArrayEnd() + return stream.Flush() } -func (api *TraceAPIImpl) filterV3(ctx context.Context, dbtx kv.TemporalTx, fromBlock, toBlock uint64, req TraceFilterRequest, stream *jsoniter.Stream, gasBailOut bool) error { +func (api *TraceAPIImpl) filterV3(ctx context.Context, dbtx kv.TemporalTx, fromBlock, toBlock uint64, req TraceFilterRequest, stream *jsoniter.Stream) error { var fromTxNum, toTxNum uint64 var err error if fromBlock > 0 { @@ -567,7 +780,7 @@ func (api *TraceAPIImpl) filterV3(ctx context.Context, dbtx kv.TemporalTx, fromB ot.idx = []string{fmt.Sprintf("%d-", txIndex)} ot.traceAddr = []int{} vmConfig.Debug = true - vmConfig.Tracer = &ot + vmConfig.Tracer = ot.Tracer().Hooks ibs := state.New(cachedReader) blockCtx := transactions.NewEVMBlockContext(engine, lastHeader, true /* requireCanonical */, dbtx, api._blockReader) @@ -576,9 +789,17 @@ func (api *TraceAPIImpl) filterV3(ctx context.Context, dbtx kv.TemporalTx, fromB gp := new(core.GasPool).AddGas(msg.Gas()).AddBlobGas(msg.BlobGas()) ibs.SetTxContext(txHash, lastBlockHash, txIndex) + ibs.SetLogger(ot.Tracer().Hooks) + + if ot.Tracer() != nil && ot.Tracer().Hooks.OnTxStart != nil { + ot.Tracer().Hooks.OnTxStart(evm.GetVMContext(), txn, msg.From()) + } var execResult *core.ExecutionResult - execResult, err = core.ApplyMessage(evm, msg, gp, true /* refunds */, gasBailOut) + execResult, err = core.ApplyMessage(evm, msg, gp, true /* refunds */, false /* gasBailout */) if err != nil { + if ot.Tracer() != nil && ot.Tracer().Hooks.OnTxEnd != nil { + ot.Tracer().Hooks.OnTxEnd(nil, err) + } if first { first = false } else { @@ -589,6 +810,9 @@ func (api *TraceAPIImpl) filterV3(ctx context.Context, dbtx kv.TemporalTx, fromB stream.WriteObjectEnd() continue } + if ot.Tracer() != nil && ot.Tracer().Hooks.OnTxEnd != nil { + ot.Tracer().Hooks.OnTxEnd(&types.Receipt{GasUsed: execResult.UsedGas}, nil) + } traceResult.Output = common.Copy(execResult.ReturnData) if err = ibs.FinalizeTx(evm.ChainRules(), noop); err != nil { if first { @@ -713,7 +937,7 @@ func (api *TraceAPIImpl) callManyTransactions( } callParams := make([]TraceCallParam, 0, len(txs)) - reader, err := rpchelper.CreateHistoryStateReader(dbtx, blockNumber, txIndex, cfg.ChainName) + reader, err := rpchelper.CreateHistoryStateReader(dbtx, blockNumber, txIndex, api.historyV3(dbtx), cfg.ChainName) if err != nil { return nil, nil, err } @@ -726,7 +950,7 @@ func (api *TraceAPIImpl) callManyTransactions( engine := api.engine() consensusHeaderReader := consensuschain.NewReader(cfg, dbtx, nil, nil) logger := log.New("trace_filtering") - err = core.InitializeBlockExecution(engine.(consensus.Engine), consensusHeaderReader, block.HeaderNoCopy(), cfg, initialState, logger) + err = core.InitializeBlockExecution(engine.(consensus.Engine), consensusHeaderReader, block.HeaderNoCopy(), cfg, initialState, logger, nil) if err != nil { return nil, nil, err } @@ -750,7 +974,7 @@ func (api *TraceAPIImpl) callManyTransactions( // gnosis might have a fee free account here if msg.FeeCap().IsZero() && engine != nil { syscall := func(contract common.Address, data []byte) ([]byte, error) { - return core.SysCallContract(contract, data, cfg, initialState, header, engine, true /* constCall */) + return core.SysCallContract(contract, data, cfg, initialState, header, engine, true /* constCall */, nil) } msg.SetIsFree(engine.IsServiceTransaction(msg.From(), syscall)) } @@ -767,7 +991,7 @@ func (api *TraceAPIImpl) callManyTransactions( parentHash := block.ParentHash() - traces, lastState, cmErr := api.doCallMany(ctx, dbtx, msgs, callParams, &rpc.BlockNumberOrHash{ + traces, lastState, cmErr := api.doCallMany(ctx, dbtx, txs, msgs, callParams, &rpc.BlockNumberOrHash{ BlockNumber: &parentNo, BlockHash: &parentHash, RequireCanonical: true, @@ -778,7 +1002,7 @@ func (api *TraceAPIImpl) callManyTransactions( } syscall := func(contract common.Address, data []byte) ([]byte, error) { - return core.SysCallContract(contract, data, cfg, lastState, header, engine, false /* constCall */) + return core.SysCallContract(contract, data, cfg, lastState, header, engine, false /* constCall */, nil) } return traces, syscall, nil diff --git a/turbo/jsonrpc/tracing.go b/turbo/jsonrpc/tracing.go index b5d54309fa3..3a40e6dae63 100644 --- a/turbo/jsonrpc/tracing.go +++ b/turbo/jsonrpc/tracing.go @@ -18,7 +18,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/core/vm/evmtypes" - "github.com/ledgerwatch/erigon/eth/tracers" + tracerConfig "github.com/ledgerwatch/erigon/eth/tracers/config" bortypes "github.com/ledgerwatch/erigon/polygon/bor/types" polygontracer "github.com/ledgerwatch/erigon/polygon/tracer" "github.com/ledgerwatch/erigon/rpc" @@ -28,16 +28,16 @@ import ( ) // TraceBlockByNumber implements debug_traceBlockByNumber. Returns Geth style block traces. -func (api *PrivateDebugAPIImpl) TraceBlockByNumber(ctx context.Context, blockNum rpc.BlockNumber, config *tracers.TraceConfig, stream *jsoniter.Stream) error { +func (api *PrivateDebugAPIImpl) TraceBlockByNumber(ctx context.Context, blockNum rpc.BlockNumber, config *tracerConfig.TraceConfig, stream *jsoniter.Stream) error { return api.traceBlock(ctx, rpc.BlockNumberOrHashWithNumber(blockNum), config, stream) } // TraceBlockByHash implements debug_traceBlockByHash. Returns Geth style block traces. -func (api *PrivateDebugAPIImpl) TraceBlockByHash(ctx context.Context, hash common.Hash, config *tracers.TraceConfig, stream *jsoniter.Stream) error { +func (api *PrivateDebugAPIImpl) TraceBlockByHash(ctx context.Context, hash common.Hash, config *tracerConfig.TraceConfig, stream *jsoniter.Stream) error { return api.traceBlock(ctx, rpc.BlockNumberOrHashWithHash(hash, true), config, stream) } -func (api *PrivateDebugAPIImpl) traceBlock(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash, config *tracers.TraceConfig, stream *jsoniter.Stream) error { +func (api *PrivateDebugAPIImpl) traceBlock(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash, config *tracerConfig.TraceConfig, stream *jsoniter.Stream) error { tx, err := api.db.BeginRo(ctx) if err != nil { stream.WriteNil() @@ -69,7 +69,7 @@ func (api *PrivateDebugAPIImpl) traceBlock(ctx context.Context, blockNrOrHash rp } if config == nil { - config = &tracers.TraceConfig{} + config = &tracerConfig.TraceConfig{} } if config.BorTraceEnabled == nil { @@ -84,7 +84,7 @@ func (api *PrivateDebugAPIImpl) traceBlock(ctx context.Context, blockNrOrHash rp } engine := api.engine() - _, blockCtx, _, ibs, _, err := transactions.ComputeTxEnv(ctx, engine, block, chainConfig, api._blockReader, tx, 0) + _, blockCtx, _, ibs, _, err := transactions.ComputeTxEnv(ctx, engine, block, chainConfig, api._blockReader, tx, 0, api.historyV3(tx)) if err != nil { stream.WriteNil() return err @@ -136,7 +136,7 @@ func (api *PrivateDebugAPIImpl) traceBlock(ctx context.Context, blockNrOrHash rp if msg.FeeCap().IsZero() && engine != nil { syscall := func(contract common.Address, data []byte) ([]byte, error) { - return core.SysCallContract(contract, data, chainConfig, ibs, block.Header(), engine, true /* constCall */) + return core.SysCallContract(contract, data, chainConfig, ibs, block.Header(), engine, true /* constCall */, nil) } msg.SetIsFree(engine.IsServiceTransaction(msg.From(), syscall)) } @@ -164,7 +164,7 @@ func (api *PrivateDebugAPIImpl) traceBlock(ctx context.Context, blockNrOrHash rp api.evmCallTimeout, ) } else { - err = transactions.TraceTx(ctx, msg, blockCtx, txCtx, ibs, config, chainConfig, stream, api.evmCallTimeout) + err = transactions.TraceTx(ctx, txn, msg, blockCtx, txCtx, ibs, config, chainConfig, stream, api.evmCallTimeout) } if err == nil { err = ibs.FinalizeTx(rules, state.NewNoopWriter()) @@ -195,7 +195,7 @@ func (api *PrivateDebugAPIImpl) traceBlock(ctx context.Context, blockNrOrHash rp } // TraceTransaction implements debug_traceTransaction. Returns Geth style transaction traces. -func (api *PrivateDebugAPIImpl) TraceTransaction(ctx context.Context, hash common.Hash, config *tracers.TraceConfig, stream *jsoniter.Stream) error { +func (api *PrivateDebugAPIImpl) TraceTransaction(ctx context.Context, hash common.Hash, config *tracerConfig.TraceConfig, stream *jsoniter.Stream) error { tx, err := api.db.BeginRo(ctx) if err != nil { stream.WriteNil() @@ -275,7 +275,7 @@ func (api *PrivateDebugAPIImpl) TraceTransaction(ctx context.Context, hash commo } engine := api.engine() - msg, blockCtx, txCtx, ibs, _, err := transactions.ComputeTxEnv(ctx, engine, block, chainConfig, api._blockReader, tx, txnIndex) + msg, blockCtx, txCtx, ibs, _, err := transactions.ComputeTxEnv(ctx, engine, block, chainConfig, api._blockReader, tx, txnIndex, api.historyV3(tx)) if err != nil { stream.WriteNil() return err @@ -297,11 +297,11 @@ func (api *PrivateDebugAPIImpl) TraceTransaction(ctx context.Context, hash commo ) } // Trace the transaction and return - return transactions.TraceTx(ctx, msg, blockCtx, txCtx, ibs, config, chainConfig, stream, api.evmCallTimeout) + return transactions.TraceTx(ctx, txn, msg, blockCtx, txCtx, ibs, config, chainConfig, stream, api.evmCallTimeout) } // TraceCall implements debug_traceCall. Returns Geth style call traces. -func (api *PrivateDebugAPIImpl) TraceCall(ctx context.Context, args ethapi.CallArgs, blockNrOrHash rpc.BlockNumberOrHash, config *tracers.TraceConfig, stream *jsoniter.Stream) error { +func (api *PrivateDebugAPIImpl) TraceCall(ctx context.Context, args ethapi.CallArgs, blockNrOrHash rpc.BlockNumberOrHash, config *tracerConfig.TraceConfig, stream *jsoniter.Stream) error { dbtx, err := api.db.BeginRo(ctx) if err != nil { return fmt.Errorf("create ro transaction: %v", err) @@ -326,9 +326,9 @@ func (api *PrivateDebugAPIImpl) TraceCall(ctx context.Context, args ethapi.CallA var stateReader state.StateReader if config.TxIndex == nil || isLatest { - stateReader, err = rpchelper.CreateStateReader(ctx, dbtx, blockNrOrHash, 0, api.filters, api.stateCache, chainConfig.ChainName) + stateReader, err = rpchelper.CreateStateReader(ctx, dbtx, blockNrOrHash, 0, api.filters, api.stateCache, api.historyV3(dbtx), chainConfig.ChainName) } else { - stateReader, err = rpchelper.CreateHistoryStateReader(dbtx, blockNumber, int(*config.TxIndex), chainConfig.ChainName) + stateReader, err = rpchelper.CreateHistoryStateReader(dbtx, blockNumber, int(*config.TxIndex), api.historyV3(dbtx), chainConfig.ChainName) } if err != nil { return fmt.Errorf("create state reader: %v", err) @@ -361,13 +361,18 @@ func (api *PrivateDebugAPIImpl) TraceCall(ctx context.Context, args ethapi.CallA return fmt.Errorf("convert args to msg: %v", err) } + transaction, err := args.ToTransaction(api.GasCap, baseFee) + if err != nil { + return fmt.Errorf("convert args to msg: %v", err) + } + blockCtx := transactions.NewEVMBlockContext(engine, header, blockNrOrHash.RequireCanonical, dbtx, api._blockReader) txCtx := core.NewEVMTxContext(msg) // Trace the transaction and return - return transactions.TraceTx(ctx, msg, blockCtx, txCtx, ibs, config, chainConfig, stream, api.evmCallTimeout) + return transactions.TraceTx(ctx, transaction, msg, blockCtx, txCtx, ibs, config, chainConfig, stream, api.evmCallTimeout) } -func (api *PrivateDebugAPIImpl) TraceCallMany(ctx context.Context, bundles []Bundle, simulateContext StateContext, config *tracers.TraceConfig, stream *jsoniter.Stream) error { +func (api *PrivateDebugAPIImpl) TraceCallMany(ctx context.Context, bundles []Bundle, simulateContext StateContext, config *tracerConfig.TraceConfig, stream *jsoniter.Stream) error { var ( hash common.Hash replayTransactions types.Transactions @@ -378,7 +383,7 @@ func (api *PrivateDebugAPIImpl) TraceCallMany(ctx context.Context, bundles []Bun ) if config == nil { - config = &tracers.TraceConfig{} + config = &tracerConfig.TraceConfig{} } overrideBlockHash = make(map[uint64]common.Hash) @@ -446,7 +451,7 @@ func (api *PrivateDebugAPIImpl) TraceCallMany(ctx context.Context, bundles []Bun replayTransactions = block.Transactions()[:transactionIndex] - stateReader, err := rpchelper.CreateStateReader(ctx, tx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blockNum-1)), 0, api.filters, api.stateCache, chainConfig.ChainName) + stateReader, err := rpchelper.CreateStateReader(ctx, tx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blockNum-1)), 0, api.filters, api.stateCache, api.historyV3(tx), chainConfig.ChainName) if err != nil { stream.WriteNil() return err @@ -524,10 +529,17 @@ func (api *PrivateDebugAPIImpl) TraceCallMany(ctx context.Context, bundles []Bun stream.WriteArrayEnd() return err } + + transaction, err := txn.ToTransaction(api.GasCap, blockCtx.BaseFee) + if err != nil { + stream.WriteNil() + return err + } + txCtx = core.NewEVMTxContext(msg) ibs := evm.IntraBlockState().(*state.IntraBlockState) ibs.SetTxContext(common.Hash{}, header.Hash(), txnIndex) - err = transactions.TraceTx(ctx, msg, blockCtx, txCtx, evm.IntraBlockState(), config, chainConfig, stream, api.evmCallTimeout) + err = transactions.TraceTx(ctx, transaction, msg, blockCtx, txCtx, ibs, config, chainConfig, stream, api.evmCallTimeout) if err != nil { stream.WriteArrayEnd() stream.WriteArrayEnd() diff --git a/turbo/node/node.go b/turbo/node/node.go index 6bbc9901308..b2230db1b31 100644 --- a/turbo/node/node.go +++ b/turbo/node/node.go @@ -12,6 +12,7 @@ import ( "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/eth" "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/eth/tracers" "github.com/ledgerwatch/erigon/node" "github.com/ledgerwatch/erigon/node/nodecfg" "github.com/ledgerwatch/erigon/params" @@ -114,6 +115,7 @@ func New( nodeConfig *nodecfg.Config, ethConfig *ethconfig.Config, logger log.Logger, + tracer *tracers.Tracer, ) (*ErigonNode, error) { //prepareBuckets(optionalParams.CustomBuckets) node, err := node.New(ctx, nodeConfig, logger) @@ -121,7 +123,7 @@ func New( utils.Fatalf("Failed to create Erigon node: %v", err) } - ethereum, err := eth.New(ctx, node, ethConfig, logger) + ethereum, err := eth.New(ctx, node, ethConfig, logger, tracer) if err != nil { return nil, err } diff --git a/turbo/rpchelper/helper.go b/turbo/rpchelper/helper.go index 824d0afa891..bae2ab70897 100644 --- a/turbo/rpchelper/helper.go +++ b/turbo/rpchelper/helper.go @@ -12,6 +12,7 @@ import ( "github.com/ledgerwatch/erigon-lib/wrap" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/systemcontracts" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" borfinality "github.com/ledgerwatch/erigon/polygon/bor/finality" "github.com/ledgerwatch/erigon/polygon/bor/finality/whitelist" @@ -108,26 +109,31 @@ func _GetBlockNumber(requireCanonical bool, blockNrOrHash rpc.BlockNumberOrHash, return blockNumber, hash, blockNumber == plainStateBlockNumber, nil } -func CreateStateReader(ctx context.Context, tx kv.Tx, blockNrOrHash rpc.BlockNumberOrHash, txnIndex int, filters *Filters, stateCache kvcache.Cache, chainName string) (state.StateReader, error) { +func CreateStateReader(ctx context.Context, tx kv.Tx, blockNrOrHash rpc.BlockNumberOrHash, txnIndex int, filters *Filters, stateCache kvcache.Cache, historyV3 bool, chainName string) (state.StateReader, error) { blockNumber, _, latest, err := _GetBlockNumber(true, blockNrOrHash, tx, filters) if err != nil { return nil, err } - return CreateStateReaderFromBlockNumber(ctx, tx, blockNumber, latest, txnIndex, stateCache, chainName) + return CreateStateReaderFromBlockNumber(ctx, tx, blockNumber, latest, txnIndex, stateCache, historyV3, chainName) } -func CreateStateReaderFromBlockNumber(ctx context.Context, tx kv.Tx, blockNumber uint64, latest bool, txnIndex int, stateCache kvcache.Cache, chainName string) (state.StateReader, error) { +func CreateStateReaderFromBlockNumber(ctx context.Context, tx kv.Tx, blockNumber uint64, latest bool, txnIndex int, stateCache kvcache.Cache, historyV3 bool, chainName string) (state.StateReader, error) { if latest { cacheView, err := stateCache.View(ctx, tx) if err != nil { return nil, err } - return CreateLatestCachedStateReader(cacheView, tx), nil + return CreateLatestCachedStateReader(cacheView, tx, historyV3), nil } - return CreateHistoryStateReader(tx, blockNumber+1, txnIndex, chainName) + return CreateHistoryStateReader(tx, blockNumber+1, txnIndex, historyV3, chainName) } -func CreateHistoryStateReader(tx kv.Tx, blockNumber uint64, txnIndex int, chainName string) (state.StateReader, error) { +func CreateHistoryStateReader(tx kv.Tx, blockNumber uint64, txnIndex int, historyV3 bool, chainName string) (state.StateReader, error) { + if !historyV3 { + r := state.NewPlainState(tx, blockNumber, systemcontracts.SystemContractCodeLookup[chainName]) + //r.SetTrace(true) + return r, nil + } r := state.NewHistoryReaderV3() r.SetTx(tx) //r.SetTrace(true) @@ -139,19 +145,28 @@ func CreateHistoryStateReader(tx kv.Tx, blockNumber uint64, txnIndex int, chainN return r, nil } -func NewLatestStateReader(tx kv.Tx) state.StateReader { - return state.NewReaderV4(tx.(kv.TemporalGetter)) +func NewLatestStateReader(tx kv.Tx, histV3 bool) state.StateReader { + if histV3 { + return state.NewReaderV4(tx.(kv.TemporalGetter)) + } + return state.NewPlainStateReader(tx) } -func NewLatestStateWriter(txc wrap.TxContainer, blockNum uint64) state.StateWriter { - domains := txc.Doms - minTxNum, err := rawdbv3.TxNums.Min(domains.Tx(), blockNum) - if err != nil { - panic(err) +func NewLatestStateWriter(txc wrap.TxContainer, blockNum uint64, histV3 bool) state.StateWriter { + if histV3 { + domains := txc.Doms + minTxNum, err := rawdbv3.TxNums.Min(domains.Tx(), blockNum) + if err != nil { + panic(err) + } + domains.SetTxNum(uint64(int(minTxNum) + /* 1 system txNum in begining of block */ 1)) + return state.NewWriterV4(domains) } - domains.SetTxNum(uint64(int(minTxNum) + /* 1 system txNum in begining of block */ 1)) - return state.NewWriterV4(domains) + return state.NewPlainStateWriter(txc.Tx, txc.Tx, blockNum) } -func CreateLatestCachedStateReader(cache kvcache.CacheView, tx kv.Tx) state.StateReader { - return state.NewCachedReader3(cache, tx.(kv.TemporalTx)) +func CreateLatestCachedStateReader(cache kvcache.CacheView, tx kv.Tx, histV3 bool) state.StateReader { + if histV3 { + return state.NewCachedReader3(cache, tx.(kv.TemporalTx)) + } + return state.NewCachedReader2(cache, tx) } diff --git a/turbo/services/interfaces.go b/turbo/services/interfaces.go index dc4aa1474bf..81fa90110ab 100644 --- a/turbo/services/interfaces.go +++ b/turbo/services/interfaces.go @@ -116,10 +116,8 @@ type FullBlockReader interface { type BlockSnapshots interface { LogStat(label string) ReopenFolder() error - ReopenSegments(types []snaptype.Type, allowGaps bool) error SegmentsMax() uint64 SegmentsMin() uint64 - Delete(fileName string) error Types() []snaptype.Type Close() } @@ -127,7 +125,7 @@ type BlockSnapshots interface { // BlockRetire - freezing blocks: moving old data from DB to snapshot files type BlockRetire interface { PruneAncientBlocks(tx kv.RwTx, limit int) error - RetireBlocksInBackground(ctx context.Context, miBlockNum uint64, maxBlockNum uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []DownloadRequest) error, onDelete func(l []string) error, onFinishRetire func() error) + RetireBlocksInBackground(ctx context.Context, miBlockNum uint64, maxBlockNum uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []DownloadRequest) error, onDelete func(l []string) error) HasNewFrozenFiles() bool BuildMissedIndicesIfNeed(ctx context.Context, logPrefix string, notifier DBEventNotifier, cc *chain.Config) error SetWorkers(workers int) diff --git a/turbo/snapshotsync/freezeblocks/block_reader.go b/turbo/snapshotsync/freezeblocks/block_reader.go index a18b348655f..390c16a098c 100644 --- a/turbo/snapshotsync/freezeblocks/block_reader.go +++ b/turbo/snapshotsync/freezeblocks/block_reader.go @@ -520,6 +520,7 @@ func (r *BlockReader) BodyWithTransactions(ctx context.Context, tx kv.Getter, ha log.Info(dbgPrefix + "found in db=false") } } + view := r.sn.View() defer view.Close() @@ -702,7 +703,7 @@ func (r *BlockReader) blockWithSenders(ctx context.Context, tx kv.Getter, hash c return } if txsAmount == 0 { - block = types.NewBlockFromStorage(hash, h, nil, b.Uncles, b.Withdrawals, b.Requests) + block = types.NewBlockFromStorage(hash, h, nil, b.Uncles, b.Withdrawals) if len(senders) != block.Transactions().Len() { if dbgLogs { log.Info(dbgPrefix + fmt.Sprintf("found block with %d transactions, but %d senders", block.Transactions().Len(), len(senders))) @@ -725,7 +726,7 @@ func (r *BlockReader) blockWithSenders(ctx context.Context, tx kv.Getter, hash c if err != nil { return nil, nil, err } - block = types.NewBlockFromStorage(hash, h, txs, b.Uncles, b.Withdrawals, b.Requests) + block = types.NewBlockFromStorage(hash, h, txs, b.Uncles, b.Withdrawals) if len(senders) != block.Transactions().Len() { if dbgLogs { log.Info(dbgPrefix + fmt.Sprintf("found block with %d transactions, but %d senders", block.Transactions().Len(), len(senders))) @@ -810,10 +811,10 @@ func (r *BlockReader) bodyFromSnapshot(blockHeight uint64, sn *Segment, buf []by if b == nil { return nil, 0, 0, buf, nil } + body := new(types.Body) body.Uncles = b.Uncles body.Withdrawals = b.Withdrawals - body.Requests = b.Requests var txsAmount uint32 if b.TxAmount >= 2 { txsAmount = b.TxAmount - 2 @@ -1046,6 +1047,7 @@ func (r *BlockReader) FirstTxnNumNotInSnapshots() uint64 { func (r *BlockReader) IterateFrozenBodies(f func(blockNum, baseTxNum, txAmount uint64) error) error { view := r.sn.View() defer view.Close() + for _, sn := range view.Bodies() { sn := sn defer sn.EnableReadAhead().DisableReadAhead() diff --git a/turbo/snapshotsync/freezeblocks/block_snapshots.go b/turbo/snapshotsync/freezeblocks/block_snapshots.go index 7412b380649..7aaacc12b7a 100644 --- a/turbo/snapshotsync/freezeblocks/block_snapshots.go +++ b/turbo/snapshotsync/freezeblocks/block_snapshots.go @@ -547,6 +547,7 @@ func (s *RoSnapshots) rebuildSegments(fileNames []string, open bool, optimistic } } if optimistic { + s.logger.Warn("[snapshots] open segment", "err", err) continue } else { return err @@ -724,47 +725,6 @@ func (s *RoSnapshots) buildMissedIndicesIfNeed(ctx context.Context, logPrefix st return nil } -func (s *RoSnapshots) delete(fileName string) error { - v := s.View() - defer v.Close() - - _, fName := filepath.Split(fileName) - var err error - s.segments.Scan(func(segtype snaptype.Enum, value *segments) bool { - idxsToRemove := []int{} - for i, sn := range value.segments { - if sn.Decompressor == nil { - continue - } - if sn.segType.FileName(sn.version, sn.from, sn.to) != fName { - continue - } - files := sn.openFiles() - sn.close() - idxsToRemove = append(idxsToRemove, i) - for _, f := range files { - _ = os.Remove(f) - } - } - for i := len(idxsToRemove) - 1; i >= 0; i-- { - value.segments = append(value.segments[:idxsToRemove[i]], value.segments[idxsToRemove[i]+1:]...) - } - return true - }) - return err -} - -func (s *RoSnapshots) Delete(fileName string) error { - if s == nil { - return nil - } - if err := s.delete(fileName); err != nil { - return fmt.Errorf("can't delete file: %w", err) - } - return s.ReopenFolder() - -} - func (s *RoSnapshots) buildMissedIndices(logPrefix string, ctx context.Context, dirs datadir.Dirs, chainConfig *chain.Config, workers int, logger log.Logger) error { if s == nil { return nil @@ -948,11 +908,8 @@ func sendDiagnostics(startIndexingTime time.Time, indexPercent map[string]int, a }) } -func noGaps(in []snaptype.FileInfo) (out []snaptype.FileInfo, missingSnapshots []Range) { - if len(in) == 0 { - return nil, nil - } - prevTo := in[0].From +func noGaps(in []snaptype.FileInfo, from uint64) (out []snaptype.FileInfo, missingSnapshots []Range) { + prevTo := from for _, f := range in { if f.To <= prevTo { continue @@ -1072,7 +1029,7 @@ func SegmentsCaplin(dir string, minBlock uint64) (res []snaptype.FileInfo, missi } l = append(l, f) } - l, m = noGaps(noOverlaps(l)) + l, m = noGaps(noOverlaps(l), minBlock) if len(m) > 0 { lst := m[len(m)-1] log.Debug("[snapshots] see gap", "type", snaptype.CaplinEnums.BeaconBlocks, "from", lst.from) @@ -1085,7 +1042,7 @@ func SegmentsCaplin(dir string, minBlock uint64) (res []snaptype.FileInfo, missi } func Segments(dir string, minBlock uint64) (res []snaptype.FileInfo, missingSnapshots []Range, err error) { - return typedSegments(dir, minBlock, coresnaptype.BlockSnapshotTypes, true) + return typedSegments(dir, minBlock, coresnaptype.BlockSnapshotTypes, false) } func typedSegments(dir string, minBlock uint64, types []snaptype.Type, allowGaps bool) (res []snaptype.FileInfo, missingSnapshots []Range, err error) { @@ -1113,7 +1070,7 @@ func typedSegments(dir string, minBlock uint64, types []snaptype.Type, allowGaps if allowGaps { l = noOverlaps(segmentsTypeCheck(dir, l)) } else { - l, m = noGaps(noOverlaps(segmentsTypeCheck(dir, l))) + l, m = noGaps(noOverlaps(segmentsTypeCheck(dir, l)), minBlock) } if len(m) > 0 { lst := m[len(m)-1] @@ -1392,7 +1349,7 @@ func (br *BlockRetire) PruneAncientBlocks(tx kv.RwTx, limit int) error { return nil } -func (br *BlockRetire) RetireBlocksInBackground(ctx context.Context, minBlockNum, maxBlockNum uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []services.DownloadRequest) error, onDeleteSnapshots func(l []string) error, onFinishRetire func() error) { +func (br *BlockRetire) RetireBlocksInBackground(ctx context.Context, minBlockNum, maxBlockNum uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []services.DownloadRequest) error, onDeleteSnapshots func(l []string) error) { if maxBlockNum > br.maxScheduledBlock.Load() { br.maxScheduledBlock.Store(maxBlockNum) } @@ -1413,7 +1370,7 @@ func (br *BlockRetire) RetireBlocksInBackground(ctx context.Context, minBlockNum defer br.snBuildAllowed.Release(1) } - err := br.RetireBlocks(ctx, minBlockNum, maxBlockNum, lvl, seedNewSnapshots, onDeleteSnapshots, onFinishRetire) + err := br.RetireBlocks(ctx, minBlockNum, maxBlockNum, lvl, seedNewSnapshots, onDeleteSnapshots) if err != nil { br.logger.Warn("[snapshots] retire blocks", "err", err) return @@ -1421,7 +1378,7 @@ func (br *BlockRetire) RetireBlocksInBackground(ctx context.Context, minBlockNum }() } -func (br *BlockRetire) RetireBlocks(ctx context.Context, minBlockNum uint64, maxBlockNum uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []services.DownloadRequest) error, onDeleteSnapshots func(l []string) error, onFinish func() error) error { +func (br *BlockRetire) RetireBlocks(ctx context.Context, minBlockNum uint64, maxBlockNum uint64, lvl log.Lvl, seedNewSnapshots func(downloadRequest []services.DownloadRequest) error, onDeleteSnapshots func(l []string) error) error { if maxBlockNum > br.maxScheduledBlock.Load() { br.maxScheduledBlock.Store(maxBlockNum) } @@ -1458,11 +1415,6 @@ func (br *BlockRetire) RetireBlocks(ctx context.Context, minBlockNum uint64, max return err } } - if onFinish != nil { - if err := onFinish(); err != nil { - return err - } - } if !(ok || okBor) { break @@ -1486,6 +1438,7 @@ func (br *BlockRetire) BuildMissedIndicesIfNeed(ctx context.Context, logPrefix s } func DumpBlocks(ctx context.Context, blockFrom, blockTo uint64, chainConfig *chain.Config, tmpDir, snapDir string, chainDB kv.RoDB, workers int, lvl log.Lvl, logger log.Logger, blockReader services.FullBlockReader) error { + firstTxNum := blockReader.FirstTxnNumNotInSnapshots() for i := blockFrom; i < blockTo; i = chooseSegmentEnd(i, blockTo, coresnaptype.Enums.Headers, chainConfig) { lastTxNum, err := dumpBlocksRange(ctx, i, chooseSegmentEnd(i, blockTo, coresnaptype.Enums.Headers, chainConfig), tmpDir, snapDir, firstTxNum, chainDB, chainConfig, workers, lvl, logger) diff --git a/turbo/snapshotsync/freezeblocks/block_snapshots_test.go b/turbo/snapshotsync/freezeblocks/block_snapshots_test.go index f795f6f2597..9eae5a710c3 100644 --- a/turbo/snapshotsync/freezeblocks/block_snapshots_test.go +++ b/turbo/snapshotsync/freezeblocks/block_snapshots_test.go @@ -8,7 +8,6 @@ import ( "github.com/ledgerwatch/log/v3" "github.com/stretchr/testify/require" - "golang.org/x/exp/slices" "github.com/ledgerwatch/erigon-lib/chain/networkname" "github.com/ledgerwatch/erigon-lib/chain/snapcfg" @@ -138,7 +137,6 @@ func TestMergeSnapshots(t *testing.T) { { merger := NewMerger(dir, 1, log.LvlInfo, nil, params.MainnetChainConfig, logger) merger.DisableFsync() - s.ReopenSegments(coresnaptype.BlockSnapshotTypes, false) ranges := merger.FindMergeRanges(s.Ranges(), s.SegmentsMax()) require.True(len(ranges) > 0) err := merger.Merge(context.Background(), s, coresnaptype.BlockSnapshotTypes, ranges, s.Dir(), false, nil, nil) @@ -155,91 +153,57 @@ func TestMergeSnapshots(t *testing.T) { { merger := NewMerger(dir, 1, log.LvlInfo, nil, params.MainnetChainConfig, logger) merger.DisableFsync() - s.ReopenSegments(coresnaptype.BlockSnapshotTypes, false) ranges := merger.FindMergeRanges(s.Ranges(), s.SegmentsMax()) require.True(len(ranges) == 0) err := merger.Merge(context.Background(), s, coresnaptype.BlockSnapshotTypes, ranges, s.Dir(), false, nil, nil) require.NoError(err) } - // [0; N] merges are not supported anymore - - // expectedFileName = snaptype.SegmentFileName(coresnaptype.Transactions.Versions().Current, 600_000, 700_000, coresnaptype.Transactions.Enum()) - // d, err = seg.NewDecompressor(filepath.Join(dir, expectedFileName)) - // require.NoError(err) - // defer d.Close() - // a = d.Count() - // require.Equal(10, a) - - // start := uint64(19_000_000) - // for i := uint64(0); i < N; i++ { - // createFile(start+i*10_000, start+(i+1)*10_000) - // } - // s = NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, start, logger) - // defer s.Close() - // require.NoError(s.ReopenFolder()) - // { - // merger := NewMerger(dir, 1, log.LvlInfo, nil, params.MainnetChainConfig, logger) - // merger.DisableFsync() - // fmt.Println(s.Ranges(), s.SegmentsMax()) - // fmt.Println(s.Ranges(), s.SegmentsMax()) - // ranges := merger.FindMergeRanges(s.Ranges(), s.SegmentsMax()) - // require.True(len(ranges) > 0) - // err := merger.Merge(context.Background(), s, coresnaptype.BlockSnapshotTypes, ranges, s.Dir(), false, nil, nil) - // require.NoError(err) - // } - - // expectedFileName = snaptype.SegmentFileName(coresnaptype.Transactions.Versions().Current, start+100_000, start+200_000, coresnaptype.Transactions.Enum()) - // d, err = seg.NewDecompressor(filepath.Join(dir, expectedFileName)) - // require.NoError(err) - // defer d.Close() - // a = d.Count() - // require.Equal(10, a) - - // { - // merger := NewMerger(dir, 1, log.LvlInfo, nil, params.MainnetChainConfig, logger) - // merger.DisableFsync() - // s.ReopenSegments(coresnaptype.BlockSnapshotTypes, false) - // ranges := merger.FindMergeRanges(s.Ranges(), s.SegmentsMax()) - // require.True(len(ranges) == 0) - // err := merger.Merge(context.Background(), s, coresnaptype.BlockSnapshotTypes, ranges, s.Dir(), false, nil, nil) - // require.NoError(err) - // } - - // expectedFileName = snaptype.SegmentFileName(coresnaptype.Transactions.Versions().Current, start+600_000, start+700_000, coresnaptype.Transactions.Enum()) - // d, err = seg.NewDecompressor(filepath.Join(dir, expectedFileName)) - // require.NoError(err) - // defer d.Close() - // a = d.Count() - // require.Equal(10, a) -} - -func TestDeleteSnapshots(t *testing.T) { - logger := log.New() - dir, require := t.TempDir(), require.New(t) - createFile := func(from, to uint64) { - for _, snT := range coresnaptype.BlockSnapshotTypes { - createTestSegmentFile(t, from, to, snT.Enum(), dir, 1, logger) - } - } - - N := uint64(70) + expectedFileName = snaptype.SegmentFileName(coresnaptype.Transactions.Versions().Current, 600_000, 700_000, coresnaptype.Transactions.Enum()) + d, err = seg.NewDecompressor(filepath.Join(dir, expectedFileName)) + require.NoError(err) + defer d.Close() + a = d.Count() + require.Equal(10, a) + start := uint64(19_000_000) for i := uint64(0); i < N; i++ { - createFile(i*10_000, (i+1)*10_000) + createFile(start+i*10_000, start+(i+1)*10_000) } - s := NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 0, logger) + s = NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, start, logger) defer s.Close() - retireFiles := []string{ - "v1-000000-000010-bodies.seg", - "v1-000000-000010-headers.seg", - "v1-000000-000010-transactions.seg", - } require.NoError(s.ReopenFolder()) - for _, f := range retireFiles { - require.NoError(s.Delete(f)) - require.False(slices.Contains(s.Files(), f)) + { + merger := NewMerger(dir, 1, log.LvlInfo, nil, params.MainnetChainConfig, logger) + merger.DisableFsync() + ranges := merger.FindMergeRanges(s.Ranges(), s.SegmentsMax()) + require.True(len(ranges) > 0) + err := merger.Merge(context.Background(), s, coresnaptype.BlockSnapshotTypes, ranges, s.Dir(), false, nil, nil) + require.NoError(err) + } + + expectedFileName = snaptype.SegmentFileName(coresnaptype.Transactions.Versions().Current, start+100_000, start+200_000, coresnaptype.Transactions.Enum()) + d, err = seg.NewDecompressor(filepath.Join(dir, expectedFileName)) + require.NoError(err) + defer d.Close() + a = d.Count() + require.Equal(10, a) + + { + merger := NewMerger(dir, 1, log.LvlInfo, nil, params.MainnetChainConfig, logger) + merger.DisableFsync() + ranges := merger.FindMergeRanges(s.Ranges(), s.SegmentsMax()) + require.True(len(ranges) == 0) + err := merger.Merge(context.Background(), s, coresnaptype.BlockSnapshotTypes, ranges, s.Dir(), false, nil, nil) + require.NoError(err) } + + expectedFileName = snaptype.SegmentFileName(coresnaptype.Transactions.Versions().Current, start+600_000, start+700_000, coresnaptype.Transactions.Enum()) + d, err = seg.NewDecompressor(filepath.Join(dir, expectedFileName)) + require.NoError(err) + defer d.Close() + a = d.Count() + require.Equal(10, a) } func TestRemoveOverlaps(t *testing.T) { @@ -270,7 +234,7 @@ func TestRemoveOverlaps(t *testing.T) { s := NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: true}, dir, 0, logger) defer s.Close() - require.NoError(s.ReopenSegments(coresnaptype.BlockSnapshotTypes, false)) + require.NoError(s.ReopenFolder()) list, err := snaptype.Segments(s.dir) require.NoError(err) @@ -348,8 +312,7 @@ func TestOpenAllSnapshot(t *testing.T) { err = s.ReopenFolder() require.NoError(err) require.NotNil(getSegs(coresnaptype.Enums.Headers)) - s.ReopenSegments(coresnaptype.BlockSnapshotTypes, false) - require.Equal(1, len(getSegs(coresnaptype.Enums.Headers).segments)) + require.Equal(0, len(getSegs(coresnaptype.Enums.Headers).segments)) s.Close() createFile(0, 500_000, coresnaptype.Bodies) diff --git a/turbo/snapshotsync/freezeblocks/bor_snapshots.go b/turbo/snapshotsync/freezeblocks/bor_snapshots.go index 178471742c0..c0cf858d15b 100644 --- a/turbo/snapshotsync/freezeblocks/bor_snapshots.go +++ b/turbo/snapshotsync/freezeblocks/bor_snapshots.go @@ -101,7 +101,7 @@ func (br *BlockRetire) retireBorBlocks(ctx context.Context, minBlockNum uint64, return nil } - err := merger.Merge(ctx, &snapshots.RoSnapshots, borsnaptype.BorSnapshotTypes(), rangesToMerge, snapshots.Dir(), true /* doIndex */, onMerge, onDelete) + err := merger.Merge(ctx, &snapshots.RoSnapshots, borsnaptype.BorSnapshotTypes, rangesToMerge, snapshots.Dir(), true /* doIndex */, onMerge, onDelete) if err != nil { return blocksRetired, err @@ -127,7 +127,7 @@ type BorRoSnapshots struct { // - gaps are not allowed // - segment have [from:to] semantic func NewBorRoSnapshots(cfg ethconfig.BlocksFreezing, snapDir string, segmentsMin uint64, logger log.Logger) *BorRoSnapshots { - return &BorRoSnapshots{*newRoSnapshots(cfg, snapDir, borsnaptype.BorSnapshotTypes(), segmentsMin, logger)} + return &BorRoSnapshots{*newRoSnapshots(cfg, snapDir, borsnaptype.BorSnapshotTypes, segmentsMin, logger)} } func (s *BorRoSnapshots) Ranges() []Range { @@ -199,7 +199,7 @@ func removeBorOverlaps(dir string, active []snaptype.FileInfo, max uint64) { } func (s *BorRoSnapshots) ReopenFolder() error { - files, _, err := typedSegments(s.dir, s.segmentsMin.Load(), borsnaptype.BorSnapshotTypes(), false) + files, _, err := typedSegments(s.dir, s.segmentsMin.Load(), borsnaptype.BorSnapshotTypes, false) if err != nil { return err } diff --git a/turbo/snapshotsync/snapshotsync.go b/turbo/snapshotsync/snapshotsync.go index ceb342a6a02..1578eac4569 100644 --- a/turbo/snapshotsync/snapshotsync.go +++ b/turbo/snapshotsync/snapshotsync.go @@ -4,10 +4,7 @@ import ( "context" "encoding/binary" "fmt" - "math" "runtime" - "sort" - "strconv" "strings" "time" @@ -15,7 +12,6 @@ import ( "github.com/ledgerwatch/erigon-lib/chain/snapcfg" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/dbg" - "github.com/ledgerwatch/erigon-lib/config3" "github.com/ledgerwatch/erigon-lib/diagnostics" "github.com/ledgerwatch/erigon-lib/downloader/downloadergrpc" "github.com/ledgerwatch/erigon-lib/downloader/snaptype" @@ -27,7 +23,6 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" coresnaptype "github.com/ledgerwatch/erigon/core/snaptype" - "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/turbo/services" ) @@ -71,197 +66,11 @@ func RequestSnapshotsDownload(ctx context.Context, downloadRequest []services.Do return nil } -func adjustStepPrune(steps uint64) uint64 { - if steps == 0 { - return 0 - } - if steps < snaptype.Erigon3SeedableSteps { - return snaptype.Erigon3SeedableSteps - } - if steps%snaptype.Erigon3SeedableSteps == 0 { - return steps - } - // round to nearest multiple of 64. if less than 64, round to 64 - return steps + steps%snaptype.Erigon3SeedableSteps -} - -func adjustBlockPrune(blocks, minBlocksToDownload uint64) uint64 { - if minBlocksToDownload < snaptype.Erigon2MergeLimit { - minBlocksToDownload = snaptype.Erigon2MergeLimit - } - if blocks < minBlocksToDownload { - blocks = minBlocksToDownload - } - if blocks%snaptype.Erigon2MergeLimit == 0 { - return blocks - } - ret := blocks + snaptype.Erigon2MergeLimit - // round to nearest multiple of 64. if less than 64, round to 64 - return ret - ret%snaptype.Erigon2MergeLimit -} - -func shouldUseStepsForPruning(name string) bool { - return strings.HasPrefix(name, "idx") || strings.HasPrefix(name, "history") -} - -func canSnapshotBePruned(name string) bool { - return strings.HasPrefix(name, "idx") || strings.HasPrefix(name, "history") || strings.Contains(name, "transactions") -} - -func buildBlackListForPruning(pruneMode bool, stepPrune, minBlockToDownload, blockPrune uint64, preverified snapcfg.Preverified) (map[string]struct{}, error) { - type snapshotFileData struct { - from, to uint64 - stepBased bool - name string - } - blackList := make(map[string]struct{}) - if !pruneMode { - return blackList, nil - } - stepPrune = adjustStepPrune(stepPrune) - blockPrune = adjustBlockPrune(blockPrune, minBlockToDownload) - snapshotKindToNames := make(map[string][]snapshotFileData) - for _, p := range preverified { - name := p.Name - // Dont prune unprunable files - if !canSnapshotBePruned(name) { - continue - } - var from, to uint64 - var err error - var kind string - if shouldUseStepsForPruning(name) { - // parse "from" (0) and "to" (64) from the name - // parse the snapshot "kind". e.g kind of 'idx/v1-accounts.0-64.ef' is "idx/v1-accounts" - rangeString := strings.Split(name, ".")[1] - rangeNums := strings.Split(rangeString, "-") - // convert the range to uint64 - from, err = strconv.ParseUint(rangeNums[0], 10, 64) - if err != nil { - return nil, err - } - to, err = strconv.ParseUint(rangeNums[1], 10, 64) - if err != nil { - return nil, err - } - kind = strings.Split(name, ".")[0] - } else { - // e.g 'v1-000000-000100-beaconblocks.seg' - // parse "from" (000000) and "to" (000100) from the name. 100 is 100'000 blocks - minusSplit := strings.Split(name, "-") - s, _, ok := snaptype.ParseFileName("", name) - if !ok { - continue - } - from = s.From - to = s.To - kind = minusSplit[3] - } - blackList[p.Name] = struct{}{} // Add all of them to the blacklist and remove the ones that are not blacklisted later. - snapshotKindToNames[kind] = append(snapshotKindToNames[kind], snapshotFileData{ - from: from, - to: to, - stepBased: shouldUseStepsForPruning(name), - name: name, - }) - } - // sort the snapshots by "from" and "to" in ascending order - for _, snapshots := range snapshotKindToNames { - prunedDistance := uint64(0) // keep track of pruned distance for snapshots - // sort the snapshots by "from" and "to" in descending order - sort.Slice(snapshots, func(i, j int) bool { - if snapshots[i].from == snapshots[j].from { - return snapshots[i].to > snapshots[j].to - } - return snapshots[i].from > snapshots[j].from - }) - for _, snapshot := range snapshots { - if snapshot.stepBased { - if prunedDistance >= stepPrune { - break - } - } else if prunedDistance >= blockPrune { - break - } - delete(blackList, snapshot.name) - prunedDistance += snapshot.to - snapshot.from - } - } - return blackList, nil -} - -// getMinimumBlocksToDownload - get the minimum number of blocks to download -func getMinimumBlocksToDownload(tx kv.Tx, blockReader services.FullBlockReader, minStep uint64, expectedPruneBlockAmount, expectedPruneHistoryAmount uint64) (uint64, uint64, error) { - frozenBlocks := blockReader.Snapshots().SegmentsMax() - minToDownload := uint64(math.MaxUint64) - minStepToDownload := minStep - stateTxNum := minStep * config3.HistoryV3AggregationStep - if err := blockReader.IterateFrozenBodies(func(blockNum, baseTxNum, txAmount uint64) error { - if blockNum == frozenBlocks-expectedPruneHistoryAmount { - minStepToDownload = (baseTxNum / config3.HistoryV3AggregationStep) - 1 - } - if stateTxNum <= baseTxNum { // only cosnider the block if it - return nil - } - newMinToDownload := uint64(0) - if frozenBlocks > blockNum { - newMinToDownload = frozenBlocks - blockNum - } - if newMinToDownload < minToDownload { - minToDownload = newMinToDownload - } - return nil - }); err != nil { - return 0, 0, err - } - if expectedPruneBlockAmount == 0 { - return minToDownload, 0, nil - } - // return the minimum number of blocks to download and the minimum step. - return minToDownload, minStep - minStepToDownload, nil -} - -func getMaxStepRangeInSnapshots(preverified snapcfg.Preverified) (uint64, error) { - maxTo := uint64(0) - for _, p := range preverified { - // take the "to" from "domain" snapshot - if !strings.HasPrefix(p.Name, "domain") { - continue - } - rangeString := strings.Split(p.Name, ".")[1] - rangeNums := strings.Split(rangeString, "-") - // convert the range to uint64 - to, err := strconv.ParseUint(rangeNums[1], 10, 64) - if err != nil { - return 0, err - } - if to > maxTo { - maxTo = to - } - } - return maxTo, nil -} - -func computeBlocksToPrune(blockReader services.FullBlockReader, p prune.Mode) (blocksToPrune uint64, historyToPrune uint64) { - frozenBlocks := blockReader.Snapshots().SegmentsMax() - blocksPruneTo := p.Blocks.PruneTo(frozenBlocks) - historyPruneTo := p.History.PruneTo(frozenBlocks) - if blocksPruneTo <= frozenBlocks { - blocksToPrune = frozenBlocks - blocksPruneTo - } - if historyPruneTo <= frozenBlocks { - historyToPrune = frozenBlocks - historyPruneTo - } - return blocksToPrune, historyToPrune -} - // WaitForDownloader - wait for Downloader service to download all expected snapshots // for MVP we sync with Downloader only once, in future will send new snapshots also -func WaitForDownloader(ctx context.Context, logPrefix string, headerchain, blobs bool, prune prune.Mode, caplin CaplinMode, agg *state.Aggregator, tx kv.RwTx, blockReader services.FullBlockReader, cc *chain.Config, snapshotDownloader proto_downloader.DownloaderClient, stagesIdsList []string) error { +func WaitForDownloader(ctx context.Context, logPrefix string, histV3, blobs bool, caplin CaplinMode, agg *state.Aggregator, tx kv.RwTx, blockReader services.FullBlockReader, cc *chain.Config, snapshotDownloader proto_downloader.DownloaderClient, stagesIdsList []string) error { snapshots := blockReader.Snapshots() borSnapshots := blockReader.BorSnapshots() - - // Find minimum block to download. if blockReader.FreezingCfg().NoDownloader || snapshotDownloader == nil { if err := snapshots.ReopenFolder(); err != nil { return err @@ -274,11 +83,9 @@ func WaitForDownloader(ctx context.Context, logPrefix string, headerchain, blobs return nil } - if headerchain { - snapshots.Close() - if cc.Bor != nil { - borSnapshots.Close() - } + snapshots.Close() + if cc.Bor != nil { + borSnapshots.Close() } //Corner cases: @@ -291,26 +98,13 @@ func WaitForDownloader(ctx context.Context, logPrefix string, headerchain, blobs preverifiedBlockSnapshots := snapCfg.Preverified downloadRequest := make([]services.DownloadRequest, 0, len(preverifiedBlockSnapshots)) - blockPrune, historyPrune := computeBlocksToPrune(blockReader, prune) - blackListForPruning := make(map[string]struct{}) - wantToPrune := prune.Blocks.Enabled() || prune.History.Enabled() - if !headerchain && wantToPrune { - minStep, err := getMaxStepRangeInSnapshots(preverifiedBlockSnapshots) - if err != nil { - return err - } - minBlockAmountToDownload, minStepToDownload, err := getMinimumBlocksToDownload(tx, blockReader, minStep, blockPrune, historyPrune) - if err != nil { - return err - } - blackListForPruning, err = buildBlackListForPruning(wantToPrune, minStepToDownload, minBlockAmountToDownload, blockPrune, preverifiedBlockSnapshots) - if err != nil { - return err - } - } - // build all download requests for _, p := range preverifiedBlockSnapshots { + if !histV3 { + if strings.HasPrefix(p.Name, "domain") || strings.HasPrefix(p.Name, "history") || strings.HasPrefix(p.Name, "idx") { + continue + } + } if caplin == NoCaplin && (strings.Contains(p.Name, "beaconblocks") || strings.Contains(p.Name, "blobsidecars")) { continue } @@ -320,13 +114,6 @@ func WaitForDownloader(ctx context.Context, logPrefix string, headerchain, blobs if !blobs && strings.Contains(p.Name, "blobsidecars") { continue } - if headerchain && !strings.Contains(p.Name, "headers") && !strings.Contains(p.Name, "bodies") { - continue - } - if _, ok := blackListForPruning[p.Name]; ok { - continue - } - downloadRequest = append(downloadRequest, services.NewDownloadRequest(p.Name, p.Hash)) } @@ -422,23 +209,6 @@ func WaitForDownloader(ctx context.Context, logPrefix string, headerchain, blobs // after the initial call the downloader or snapshot-lock.file will prevent this download from running // - // prohibit new downloads for the files that were downloaded - - // If we only download headers and bodies, we should prohibit only those. - if headerchain { - if _, err := snapshotDownloader.ProhibitNewDownloads(ctx, &proto_downloader.ProhibitNewDownloadsRequest{ - Type: coresnaptype.Bodies.Name(), - }); err != nil { - return err - } - if _, err := snapshotDownloader.ProhibitNewDownloads(ctx, &proto_downloader.ProhibitNewDownloadsRequest{ - Type: coresnaptype.Headers.Name(), - }); err != nil { - return err - } - return nil - } - // prohibits further downloads, except some exceptions for _, p := range blockReader.AllTypes() { if _, err := snapshotDownloader.ProhibitNewDownloads(ctx, &proto_downloader.ProhibitNewDownloadsRequest{ diff --git a/turbo/snapshotsync/snapshotsync_test.go b/turbo/snapshotsync/snapshotsync_test.go deleted file mode 100644 index 284a7b2646e..00000000000 --- a/turbo/snapshotsync/snapshotsync_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package snapshotsync - -import ( - "strings" - "testing" - - "github.com/ledgerwatch/erigon-lib/chain/snapcfg" - "github.com/ledgerwatch/erigon-lib/downloader/snaptype" -) - -func TestBlackListForPruning(t *testing.T) { - preverified := snapcfg.Mainnet - - maxStep, err := getMaxStepRangeInSnapshots(preverified) - if err != nil { - t.Fatal(err) - } - // Prune 64 steps and contain at least all the blocks - blackList, err := buildBlackListForPruning(true, 64, 100_000, 25_000_000, preverified) - if err != nil { - t.Fatal(err) - } - for p := range blackList { - // take the snapshot file name and parse it to get the "from" - info, _, ok := snaptype.ParseFileName("tmp", p) - if !ok { - continue - } - if strings.Contains(p, "transactions") { - if info.From < 19_000_000 { - t.Errorf("Should have pruned %s", p) - } - continue - } - if strings.Contains(p, "domain") { - t.Errorf("Should not have pruned %s", p) - } - if info.To == maxStep { - t.Errorf("Should not have pruned %s", p) - } - } - -} diff --git a/turbo/stages/genesis_test.go b/turbo/stages/genesis_test.go index e2118f7166b..214416f3edc 100644 --- a/turbo/stages/genesis_test.go +++ b/turbo/stages/genesis_test.go @@ -64,7 +64,7 @@ func TestSetupGenesis(t *testing.T) { { name: "genesis without ChainConfig", fn: func(t *testing.T, db kv.RwDB) (*chain.Config, *types.Block, error) { - return core.CommitGenesisBlock(db, new(types.Genesis), tmpdir, logger) + return core.CommitGenesisBlock(db, new(types.Genesis), tmpdir, logger, nil) }, wantErr: types.ErrGenesisNoConfig, wantConfig: params.AllProtocolChanges, @@ -72,7 +72,7 @@ func TestSetupGenesis(t *testing.T) { { name: "no block in DB, genesis == nil", fn: func(t *testing.T, db kv.RwDB) (*chain.Config, *types.Block, error) { - return core.CommitGenesisBlock(db, nil, tmpdir, logger) + return core.CommitGenesisBlock(db, nil, tmpdir, logger, nil) }, wantHash: params.MainnetGenesisHash, wantConfig: params.MainnetChainConfig, @@ -80,7 +80,7 @@ func TestSetupGenesis(t *testing.T) { { name: "mainnet block in DB, genesis == nil", fn: func(t *testing.T, db kv.RwDB) (*chain.Config, *types.Block, error) { - return core.CommitGenesisBlock(db, nil, tmpdir, logger) + return core.CommitGenesisBlock(db, nil, tmpdir, logger, nil) }, wantHash: params.MainnetGenesisHash, wantConfig: params.MainnetChainConfig, @@ -88,8 +88,8 @@ func TestSetupGenesis(t *testing.T) { { name: "custom block in DB, genesis == nil", fn: func(t *testing.T, db kv.RwDB) (*chain.Config, *types.Block, error) { - core.MustCommitGenesis(&customg, db, tmpdir, logger) - return core.CommitGenesisBlock(db, nil, tmpdir, logger) + core.MustCommitGenesis(&customg, db, tmpdir, logger, nil) + return core.CommitGenesisBlock(db, nil, tmpdir, logger, nil) }, wantHash: customghash, wantConfig: customg.Config, @@ -97,8 +97,8 @@ func TestSetupGenesis(t *testing.T) { { name: "custom block in DB, genesis == sepolia", fn: func(t *testing.T, db kv.RwDB) (*chain.Config, *types.Block, error) { - core.MustCommitGenesis(&customg, db, tmpdir, logger) - return core.CommitGenesisBlock(db, core.SepoliaGenesisBlock(), tmpdir, logger) + core.MustCommitGenesis(&customg, db, tmpdir, logger, nil) + return core.CommitGenesisBlock(db, core.SepoliaGenesisBlock(), tmpdir, logger, nil) }, wantErr: &types.GenesisMismatchError{Stored: customghash, New: params.SepoliaGenesisHash}, wantHash: params.SepoliaGenesisHash, @@ -107,8 +107,8 @@ func TestSetupGenesis(t *testing.T) { { name: "custom block in DB, genesis == bor-mainnet", fn: func(t *testing.T, db kv.RwDB) (*chain.Config, *types.Block, error) { - core.MustCommitGenesis(&customg, db, tmpdir, logger) - return core.CommitGenesisBlock(db, core.BorMainnetGenesisBlock(), tmpdir, logger) + core.MustCommitGenesis(&customg, db, tmpdir, logger, nil) + return core.CommitGenesisBlock(db, core.BorMainnetGenesisBlock(), tmpdir, logger, nil) }, wantErr: &types.GenesisMismatchError{Stored: customghash, New: params.BorMainnetGenesisHash}, wantHash: params.BorMainnetGenesisHash, @@ -117,8 +117,8 @@ func TestSetupGenesis(t *testing.T) { { name: "custom block in DB, genesis == mumbai", fn: func(t *testing.T, db kv.RwDB) (*chain.Config, *types.Block, error) { - core.MustCommitGenesis(&customg, db, tmpdir, logger) - return core.CommitGenesisBlock(db, core.MumbaiGenesisBlock(), tmpdir, logger) + core.MustCommitGenesis(&customg, db, tmpdir, logger, nil) + return core.CommitGenesisBlock(db, core.MumbaiGenesisBlock(), tmpdir, logger, nil) }, wantErr: &types.GenesisMismatchError{Stored: customghash, New: params.MumbaiGenesisHash}, wantHash: params.MumbaiGenesisHash, @@ -127,8 +127,8 @@ func TestSetupGenesis(t *testing.T) { { name: "custom block in DB, genesis == amoy", fn: func(t *testing.T, db kv.RwDB) (*chain.Config, *types.Block, error) { - core.MustCommitGenesis(&customg, db, tmpdir, logger) - return core.CommitGenesisBlock(db, core.AmoyGenesisBlock(), tmpdir, logger) + core.MustCommitGenesis(&customg, db, tmpdir, logger, nil) + return core.CommitGenesisBlock(db, core.AmoyGenesisBlock(), tmpdir, logger, nil) }, wantErr: &types.GenesisMismatchError{Stored: customghash, New: params.AmoyGenesisHash}, wantHash: params.AmoyGenesisHash, @@ -137,8 +137,8 @@ func TestSetupGenesis(t *testing.T) { { name: "compatible config in DB", fn: func(t *testing.T, db kv.RwDB) (*chain.Config, *types.Block, error) { - core.MustCommitGenesis(&oldcustomg, db, tmpdir, logger) - return core.CommitGenesisBlock(db, &customg, tmpdir, logger) + core.MustCommitGenesis(&oldcustomg, db, tmpdir, logger, nil) + return core.CommitGenesisBlock(db, &customg, tmpdir, logger, nil) }, wantHash: customghash, wantConfig: customg.Config, @@ -162,7 +162,7 @@ func TestSetupGenesis(t *testing.T) { return nil, nil, err } // This should return a compatibility error. - return core.CommitGenesisBlock(m.DB, &customg, tmpdir, logger) + return core.CommitGenesisBlock(m.DB, &customg, tmpdir, logger, nil) }, wantHash: customghash, wantConfig: customg.Config, @@ -180,7 +180,7 @@ func TestSetupGenesis(t *testing.T) { t.Run(test.name, func(t *testing.T) { t.Parallel() dirs := datadir.New(tmpdir) - db, _ := temporaltest.NewTestDB(t, dirs) + _, db, _ := temporaltest.NewTestDB(t, dirs) blockReader := freezeblocks.NewBlockReader(freezeblocks.NewRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, dirs.Snap, 0, log.New()), freezeblocks.NewBorRoSnapshots(ethconfig.BlocksFreezing{Enabled: false}, dirs.Snap, 0, log.New())) config, genesis, err := test.fn(t, db) // Check the return values. diff --git a/turbo/stages/mock/mock_sentry.go b/turbo/stages/mock/mock_sentry.go index c92613741af..b78e7726c9a 100644 --- a/turbo/stages/mock/mock_sentry.go +++ b/turbo/stages/mock/mock_sentry.go @@ -260,7 +260,8 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK logger := log.New() ctx, ctxCancel := context.WithCancel(context.Background()) - db, agg := temporaltest.NewTestDB(nil, dirs) + histV3, db, agg := temporaltest.NewTestDB(nil, dirs) + cfg.HistoryV3 = histV3 erigonGrpcServeer := remotedbserver.NewKvServer(ctx, db, nil, nil, nil, logger) allSnapshots := freezeblocks.NewRoSnapshots(ethconfig.Defaults.Snapshot, dirs.Snap, 0, logger) @@ -282,12 +283,12 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK PeerId: gointerfaces.ConvertHashToH512([64]byte{0x12, 0x34, 0x50}), // "12345" BlockSnapshots: allSnapshots, BlockReader: freezeblocks.NewBlockReader(allSnapshots, allBorSnapshots), - HistoryV3: true, + HistoryV3: cfg.HistoryV3, } if tb != nil { tb.Cleanup(mock.Close) } - blockWriter := blockio.NewBlockWriter() + blockWriter := blockio.NewBlockWriter(mock.HistoryV3) mock.Address = crypto.PubkeyToAddress(mock.Key.PublicKey) @@ -312,7 +313,7 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK shanghaiTime := mock.ChainConfig.ShanghaiTime cancunTime := mock.ChainConfig.CancunTime maxBlobsPerBlock := mock.ChainConfig.GetMaxBlobsPerBlock() - mock.TxPool, err = txpool.New(newTxs, mock.DB, poolCfg, kvcache.NewDummy(), *chainID, shanghaiTime, nil /* agraBlock */, cancunTime, maxBlobsPerBlock, nil, logger) + mock.TxPool, err = txpool.New(newTxs, mock.DB, poolCfg, kvcache.NewDummy(histV3), *chainID, shanghaiTime, nil /* agraBlock */, cancunTime, maxBlobsPerBlock, nil, logger) if err != nil { tb.Fatal(err) } @@ -334,7 +335,7 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK } // Committed genesis will be shared between download and mock sentry - _, mock.Genesis, err = core.CommitGenesisBlock(mock.DB, gspec, "", mock.Log) + _, mock.Genesis, err = core.CommitGenesisBlock(mock.DB, gspec, "", mock.Log, nil) if _, ok := err.(*chain.ConfigCompatError); err != nil && !ok { if tb != nil { tb.Fatal(err) @@ -350,15 +351,19 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK terseLogger.SetHandler(log.LvlFilterHandler(log.LvlWarn, log.StderrHandler)) // Needs its own notifications to not update RPC daemon and txpool about pending blocks stateSync := stages2.NewInMemoryExecution(mock.Ctx, mock.DB, &cfg, mock.sentriesClient, - dirs, notifications, mock.BlockReader, blockWriter, mock.agg, nil, terseLogger) + dirs, notifications, mock.BlockReader, blockWriter, mock.agg, nil, terseLogger, nil) chainReader := consensuschain.NewReader(mock.ChainConfig, txc.Tx, mock.BlockReader, logger) // We start the mining step - if err := stages2.StateStep(ctx, chainReader, mock.Engine, txc, stateSync, header, body, unwindPoint, headersChain, bodiesChain); err != nil { + if err := stages2.StateStep(ctx, chainReader, mock.Engine, txc, stateSync, header, body, unwindPoint, headersChain, bodiesChain, histV3); err != nil { logger.Warn("Could not validate block", "err", err) return err } var progress uint64 - progress, err = stages.GetStageProgress(txc.Tx, stages.Execution) + if histV3 { + progress, err = stages.GetStageProgress(txc.Tx, stages.Execution) + } else { + progress, err = stages.GetStageProgress(txc.Tx, stages.IntermediateHashes) + } if err != nil { return err } @@ -461,6 +466,7 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK mock.Notifications.Accumulator, cfg.StateStream, /*stateStream=*/ false, + /*exec22=*/ cfg.HistoryV3, dirs, mock.BlockReader, mock.sentriesClient.Hd, @@ -486,15 +492,14 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK agg.SetSnapshotBuildSema(blockSnapBuildSema) blockRetire := freezeblocks.NewBlockRetire(1, dirs, mock.BlockReader, blockWriter, mock.DB, mock.ChainConfig, mock.Notifications.Events, blockSnapBuildSema, logger) - historyV3 := true mock.Sync = stagedsync.New( cfg.Sync, stagedsync.DefaultStages(mock.Ctx, - stagedsync.StageSnapshotsCfg(mock.DB, *mock.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, mock.BlockReader, mock.Notifications, mock.agg, false, false, nil, prune), - stagedsync.StageHeadersCfg(mock.DB, mock.sentriesClient.Hd, mock.sentriesClient.Bd, *mock.ChainConfig, cfg.Sync, sendHeaderRequest, propagateNewBlockHashes, penalize, cfg.BatchSize, false, mock.BlockReader, blockWriter, dirs.Tmp, mock.Notifications, nil), + stagedsync.StageSnapshotsCfg(mock.DB, *mock.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, mock.BlockReader, mock.Notifications, mock.HistoryV3, mock.agg, false, false, nil), + stagedsync.StageHeadersCfg(mock.DB, mock.sentriesClient.Hd, mock.sentriesClient.Bd, *mock.ChainConfig, cfg.Sync, sendHeaderRequest, propagateNewBlockHashes, penalize, cfg.BatchSize, false, mock.BlockReader, blockWriter, dirs.Tmp, mock.HistoryV3, mock.Notifications, nil), stagedsync.StageBorHeimdallCfg(mock.DB, snapDb, stagedsync.MiningState{}, *mock.ChainConfig, nil /* heimdallClient */, mock.BlockReader, nil, nil, nil, recents, signatures, false, nil), stagedsync.StageBlockHashesCfg(mock.DB, mock.Dirs.Tmp, mock.ChainConfig, blockWriter), - stagedsync.StageBodiesCfg(mock.DB, mock.sentriesClient.Bd, sendBodyRequest, penalize, blockPropagator, cfg.Sync.BodyDownloadTimeoutSeconds, *mock.ChainConfig, mock.BlockReader, blockWriter, nil), + stagedsync.StageBodiesCfg(mock.DB, mock.sentriesClient.Bd, sendBodyRequest, penalize, blockPropagator, cfg.Sync.BodyDownloadTimeoutSeconds, *mock.ChainConfig, mock.BlockReader, cfg.HistoryV3, blockWriter, nil), stagedsync.StageSendersCfg(mock.DB, mock.ChainConfig, cfg.Sync, false, dirs.Tmp, prune, mock.BlockReader, mock.sentriesClient.Hd, nil), stagedsync.StageExecuteBlocksCfg( mock.DB, @@ -507,6 +512,7 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK mock.Notifications.Accumulator, cfg.StateStream, /*stateStream=*/ false, + /*exec22=*/ cfg.HistoryV3, dirs, mock.BlockReader, mock.sentriesClient.Hd, @@ -515,8 +521,8 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK mock.agg, nil, ), - stagedsync.StageHashStateCfg(mock.DB, mock.Dirs), - stagedsync.StageTrieCfg(mock.DB, checkStateRoot, true, false, dirs.Tmp, mock.BlockReader, mock.sentriesClient.Hd, historyV3, mock.agg), + stagedsync.StageHashStateCfg(mock.DB, mock.Dirs, cfg.HistoryV3), + stagedsync.StageTrieCfg(mock.DB, checkStateRoot, true, false, dirs.Tmp, mock.BlockReader, mock.sentriesClient.Hd, cfg.HistoryV3, mock.agg), stagedsync.StageHistoryCfg(mock.DB, prune, dirs.Tmp), stagedsync.StageLogIndexCfg(mock.DB, prune, dirs.Tmp, nil), stagedsync.StageCallTracesCfg(mock.DB, prune, 0, dirs.Tmp), @@ -530,10 +536,10 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK cfg.Genesis = gspec pipelineStages := stages2.NewPipelineStages(mock.Ctx, db, &cfg, p2p.Config{}, mock.sentriesClient, mock.Notifications, - snapDownloader, mock.BlockReader, blockRetire, mock.agg, nil, forkValidator, logger, checkStateRoot) + snapDownloader, mock.BlockReader, blockRetire, mock.agg, nil, forkValidator, logger, nil, checkStateRoot) mock.posStagedSync = stagedsync.New(cfg.Sync, pipelineStages, stagedsync.PipelineUnwindOrder, stagedsync.PipelinePruneOrder, logger) - mock.Eth1ExecutionService = eth1.NewEthereumExecutionModule(mock.BlockReader, mock.DB, mock.posStagedSync, forkValidator, mock.ChainConfig, assembleBlockPOS, nil, mock.Notifications.Accumulator, mock.Notifications.StateChangesConsumer, logger, engine, cfg.Sync, ctx) + mock.Eth1ExecutionService = eth1.NewEthereumExecutionModule(mock.BlockReader, mock.DB, mock.posStagedSync, forkValidator, mock.ChainConfig, assembleBlockPOS, nil, mock.Notifications.Accumulator, mock.Notifications.StateChangesConsumer, logger, engine, histV3, cfg.Sync, ctx) mock.sentriesClient.Hd.StartPoSDownloader(mock.Ctx, sendHeaderRequest, penalize) @@ -553,6 +559,7 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK mock.Notifications.Accumulator, cfg.StateStream, /*stateStream=*/ false, + /*exec22=*/ cfg.HistoryV3, dirs, mock.BlockReader, mock.sentriesClient.Hd, @@ -582,14 +589,15 @@ func MockWithEverything(tb testing.TB, gspec *types.Genesis, key *ecdsa.PrivateK go mock.sentriesClient.RecvUploadHeadersMessageLoop(mock.Ctx, mock.SentryClient, &mock.ReceiveWg) mock.StreamWg.Wait() - //app expecting that genesis will always be in db - c := &core.ChainPack{ - Headers: []*types.Header{mock.Genesis.HeaderNoCopy()}, - Blocks: []*types.Block{mock.Genesis}, - TopBlock: mock.Genesis, - } - if err = mock.InsertChain(c); err != nil { - tb.Fatal(err) + if histV3 { + c := &core.ChainPack{ + Headers: []*types.Header{mock.Genesis.HeaderNoCopy()}, + Blocks: []*types.Block{mock.Genesis}, + TopBlock: mock.Genesis, + } + if err = mock.InsertChain(c); err != nil { + tb.Fatal(err) + } } return mock } @@ -839,7 +847,7 @@ func (ms *MockSentry) HeaderDownload() *headerdownload.HeaderDownload { } func (ms *MockSentry) NewHistoryStateReader(blockNum uint64, tx kv.Tx) state.StateReader { - r, err := rpchelper.CreateHistoryStateReader(tx, blockNum, 0, ms.ChainConfig.ChainName) + r, err := rpchelper.CreateHistoryStateReader(tx, blockNum, 0, ms.HistoryV3, ms.ChainConfig.ChainName) if err != nil { panic(err) } @@ -857,5 +865,5 @@ func (ms *MockSentry) HistoryV3Components() *libstate.Aggregator { } func (ms *MockSentry) BlocksIO() (services.FullBlockReader, *blockio.BlockWriter) { - return ms.BlockReader, blockio.NewBlockWriter() + return ms.BlockReader, blockio.NewBlockWriter(ms.HistoryV3) } diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index 66de6f1543e..f7189c2f262 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -23,11 +23,13 @@ import ( "github.com/ledgerwatch/erigon/consensus/misc" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/rawdb/blockio" + "github.com/ledgerwatch/erigon/core/tracing" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/eth/tracers" "github.com/ledgerwatch/erigon/p2p" "github.com/ledgerwatch/erigon/p2p/sentry/sentry_multi_client" "github.com/ledgerwatch/erigon/polygon/bor" @@ -381,6 +383,10 @@ func MiningStep(ctx context.Context, db kv.RwDB, mining *stagedsync.Sync, tmpDir } defer tx.Rollback() + //histV3, err := kvcfg.HistoryV3.Enabled(tx) + //if err != nil { + // return err + //} var miningBatch kv.RwTx //if histV3 { // sd := state.NewSharedDomains(tx) @@ -406,7 +412,7 @@ func MiningStep(ctx context.Context, db kv.RwDB, mining *stagedsync.Sync, tmpDir return nil } -func addAndVerifyBlockStep(batch kv.RwTx, engine consensus.Engine, chainReader consensus.ChainReader, currentHeader *types.Header, currentBody *types.RawBody) error { +func addAndVerifyBlockStep(batch kv.RwTx, engine consensus.Engine, chainReader consensus.ChainReader, currentHeader *types.Header, currentBody *types.RawBody, histV3 bool) error { currentHeight := currentHeader.Number.Uint64() currentHash := currentHeader.Hash() if chainReader != nil { @@ -436,7 +442,7 @@ func addAndVerifyBlockStep(batch kv.RwTx, engine consensus.Engine, chainReader c if _, err := rawdb.WriteRawBodyIfNotExists(batch, currentHash, currentHeight, currentBody); err != nil { return err } - if prevHash != currentHash { + if histV3 && prevHash != currentHash { if err := rawdb.AppendCanonicalTxNums(batch, currentHeight); err != nil { return err } @@ -450,7 +456,7 @@ func addAndVerifyBlockStep(batch kv.RwTx, engine consensus.Engine, chainReader c return nil } -func StateStep(ctx context.Context, chainReader consensus.ChainReader, engine consensus.Engine, txc wrap.TxContainer, stateSync *stagedsync.Sync, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody) (err error) { +func StateStep(ctx context.Context, chainReader consensus.ChainReader, engine consensus.Engine, txc wrap.TxContainer, stateSync *stagedsync.Sync, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody, histV3 bool) (err error) { defer func() { if rec := recover(); rec != nil { err = fmt.Errorf("%+v, trace: %s", rec, dbg.Stack()) @@ -475,7 +481,7 @@ func StateStep(ctx context.Context, chainReader consensus.ChainReader, engine co currentHeader := headersChain[i] currentBody := bodiesChain[i] - if err := addAndVerifyBlockStep(txc.Tx, engine, chainReader, currentHeader, currentBody); err != nil { + if err := addAndVerifyBlockStep(txc.Tx, engine, chainReader, currentHeader, currentBody, histV3); err != nil { return err } // Run state sync @@ -489,7 +495,7 @@ func StateStep(ctx context.Context, chainReader consensus.ChainReader, engine co return nil } // Prepare memory state for block execution - if err := addAndVerifyBlockStep(txc.Tx, engine, chainReader, header, body); err != nil { + if err := addAndVerifyBlockStep(txc.Tx, engine, chainReader, header, body, histV3); err != nil { return err } // Run state sync @@ -523,9 +529,14 @@ func NewDefaultStages(ctx context.Context, recents *lru.ARCCache[libcommon.Hash, *bor.Snapshot], signatures *lru.ARCCache[libcommon.Hash, libcommon.Address], logger log.Logger, + tracer *tracers.Tracer, ) []*stagedsync.Stage { + var tracingHooks *tracing.Hooks + if tracer != nil { + tracingHooks = tracer.Hooks + } dirs := cfg.Dirs - blockWriter := blockio.NewBlockWriter() + blockWriter := blockio.NewBlockWriter(cfg.HistoryV3) // During Import we don't want other services like header requests, body requests etc. to be running. // Hence we run it in the test mode. @@ -559,13 +570,12 @@ func NewDefaultStages(ctx context.Context, depositContract = cfg.Genesis.Config.DepositContract } - historyV3 := true return stagedsync.DefaultStages(ctx, - stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling, silkworm, cfg.Prune), - stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, notifications, loopBreakCheck), + stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, cfg.HistoryV3, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling, silkworm), + stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, cfg.HistoryV3, notifications, loopBreakCheck), stagedsync.StageBorHeimdallCfg(db, snapDb, stagedsync.MiningState{}, *controlServer.ChainConfig, heimdallClient, blockReader, controlServer.Hd, controlServer.Penalize, loopBreakCheck, recents, signatures, cfg.WithHeimdallWaypointRecording, nil), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), - stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, blockWriter, loopBreakCheck), + stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, cfg.HistoryV3, blockWriter, loopBreakCheck), stagedsync.StageSendersCfg(db, controlServer.ChainConfig, cfg.Sync, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd, loopBreakCheck), stagedsync.StageExecuteBlocksCfg( db, @@ -574,10 +584,11 @@ func NewDefaultStages(ctx context.Context, nil, controlServer.ChainConfig, controlServer.Engine, - &vm.Config{}, + &vm.Config{Tracer: tracingHooks}, notifications.Accumulator, cfg.StateStream, /*stateStream=*/ false, + cfg.HistoryV3, dirs, blockReader, controlServer.Hd, @@ -586,8 +597,8 @@ func NewDefaultStages(ctx context.Context, agg, SilkwormForExecutionStage(silkworm, cfg), ), - stagedsync.StageHashStateCfg(db, dirs), - stagedsync.StageTrieCfg(db, true, true, false, dirs.Tmp, blockReader, controlServer.Hd, historyV3, agg), + stagedsync.StageHashStateCfg(db, dirs, cfg.HistoryV3), + stagedsync.StageTrieCfg(db, true, true, false, dirs.Tmp, blockReader, controlServer.Hd, cfg.HistoryV3, agg), stagedsync.StageHistoryCfg(db, cfg.Prune, dirs.Tmp), stagedsync.StageLogIndexCfg(db, cfg.Prune, dirs.Tmp, depositContract), stagedsync.StageCallTracesCfg(db, cfg.Prune, 0, dirs.Tmp), @@ -609,10 +620,15 @@ func NewPipelineStages(ctx context.Context, silkworm *silkworm.Silkworm, forkValidator *engine_helpers.ForkValidator, logger log.Logger, + tracer *tracers.Tracer, checkStateRoot bool, ) []*stagedsync.Stage { + var tracingHooks *tracing.Hooks + if tracer != nil { + tracingHooks = tracer.Hooks + } dirs := cfg.Dirs - blockWriter := blockio.NewBlockWriter() + blockWriter := blockio.NewBlockWriter(cfg.HistoryV3) // During Import we don't want other services like header requests, body requests etc. to be running. // Hence we run it in the test mode. @@ -625,9 +641,8 @@ func NewPipelineStages(ctx context.Context, } if len(cfg.Sync.UploadLocation) == 0 { - historyV3 := true return stagedsync.PipelineStages(ctx, - stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling, silkworm, cfg.Prune), + stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, cfg.HistoryV3, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling, silkworm), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), stagedsync.StageSendersCfg(db, controlServer.ChainConfig, cfg.Sync, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd, loopBreakCheck), stagedsync.StageExecuteBlocksCfg( @@ -637,10 +652,11 @@ func NewPipelineStages(ctx context.Context, nil, controlServer.ChainConfig, controlServer.Engine, - &vm.Config{}, + &vm.Config{Tracer: tracingHooks}, notifications.Accumulator, cfg.StateStream, /*stateStream=*/ false, + cfg.HistoryV3, dirs, blockReader, controlServer.Hd, @@ -649,8 +665,8 @@ func NewPipelineStages(ctx context.Context, agg, SilkwormForExecutionStage(silkworm, cfg), ), - stagedsync.StageHashStateCfg(db, dirs), - stagedsync.StageTrieCfg(db, checkStateRoot, true, false, dirs.Tmp, blockReader, controlServer.Hd, historyV3, agg), + stagedsync.StageHashStateCfg(db, dirs, cfg.HistoryV3), + stagedsync.StageTrieCfg(db, checkStateRoot, true, false, dirs.Tmp, blockReader, controlServer.Hd, cfg.HistoryV3, agg), stagedsync.StageHistoryCfg(db, cfg.Prune, dirs.Tmp), stagedsync.StageLogIndexCfg(db, cfg.Prune, dirs.Tmp, depositContract), stagedsync.StageCallTracesCfg(db, cfg.Prune, 0, dirs.Tmp), @@ -659,13 +675,12 @@ func NewPipelineStages(ctx context.Context, runInTestMode) } - historyV3 := true return stagedsync.UploaderPipelineStages(ctx, - stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling, silkworm, cfg.Prune), - stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, notifications, loopBreakCheck), + stagedsync.StageSnapshotsCfg(db, *controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, cfg.HistoryV3, agg, cfg.InternalCL && cfg.CaplinConfig.Backfilling, cfg.CaplinConfig.BlobBackfilling, silkworm), + stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, cfg.HistoryV3, notifications, loopBreakCheck), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), stagedsync.StageSendersCfg(db, controlServer.ChainConfig, cfg.Sync, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd, loopBreakCheck), - stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, blockWriter, loopBreakCheck), + stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, cfg.HistoryV3, blockWriter, loopBreakCheck), stagedsync.StageExecuteBlocksCfg( db, cfg.Prune, @@ -673,10 +688,11 @@ func NewPipelineStages(ctx context.Context, nil, controlServer.ChainConfig, controlServer.Engine, - &vm.Config{}, + &vm.Config{Tracer: tracingHooks}, notifications.Accumulator, cfg.StateStream, /*stateStream=*/ false, + cfg.HistoryV3, dirs, blockReader, controlServer.Hd, @@ -685,8 +701,8 @@ func NewPipelineStages(ctx context.Context, agg, SilkwormForExecutionStage(silkworm, cfg), ), - stagedsync.StageHashStateCfg(db, dirs), - stagedsync.StageTrieCfg(db, checkStateRoot, true, false, dirs.Tmp, blockReader, controlServer.Hd, historyV3, agg), + stagedsync.StageHashStateCfg(db, dirs, cfg.HistoryV3), + stagedsync.StageTrieCfg(db, checkStateRoot, true, false, dirs.Tmp, blockReader, controlServer.Hd, cfg.HistoryV3, agg), stagedsync.StageHistoryCfg(db, cfg.Prune, dirs.Tmp), stagedsync.StageLogIndexCfg(db, cfg.Prune, dirs.Tmp, depositContract), stagedsync.StageCallTracesCfg(db, cfg.Prune, 0, dirs.Tmp), @@ -698,13 +714,16 @@ func NewPipelineStages(ctx context.Context, func NewInMemoryExecution(ctx context.Context, db kv.RwDB, cfg *ethconfig.Config, controlServer *sentry_multi_client.MultiClient, dirs datadir.Dirs, notifications *shards.Notifications, blockReader services.FullBlockReader, blockWriter *blockio.BlockWriter, agg *state.Aggregator, - silkworm *silkworm.Silkworm, logger log.Logger) *stagedsync.Sync { - historyV3 := true + silkworm *silkworm.Silkworm, logger log.Logger, tracer *tracers.Tracer) *stagedsync.Sync { + var tracingHooks *tracing.Hooks + if tracer != nil { + tracingHooks = tracer.Hooks + } return stagedsync.New( cfg.Sync, stagedsync.StateStages(ctx, - stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, false, blockReader, blockWriter, dirs.Tmp, nil, nil), - stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, blockWriter, nil), + stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, false, blockReader, blockWriter, dirs.Tmp, cfg.HistoryV3, nil, nil), + stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, *controlServer.ChainConfig, blockReader, cfg.HistoryV3, blockWriter, nil), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), stagedsync.StageSendersCfg(db, controlServer.ChainConfig, cfg.Sync, true, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd, nil), stagedsync.StageExecuteBlocksCfg( @@ -714,10 +733,11 @@ func NewInMemoryExecution(ctx context.Context, db kv.RwDB, cfg *ethconfig.Config nil, controlServer.ChainConfig, controlServer.Engine, - &vm.Config{}, + &vm.Config{Tracer: tracingHooks}, notifications.Accumulator, cfg.StateStream, true, + cfg.HistoryV3, cfg.Dirs, blockReader, controlServer.Hd, @@ -726,8 +746,8 @@ func NewInMemoryExecution(ctx context.Context, db kv.RwDB, cfg *ethconfig.Config agg, SilkwormForExecutionStage(silkworm, cfg), ), - stagedsync.StageHashStateCfg(db, dirs), - stagedsync.StageTrieCfg(db, true, true, true, dirs.Tmp, blockReader, controlServer.Hd, historyV3, agg)), + stagedsync.StageHashStateCfg(db, dirs, cfg.HistoryV3), + stagedsync.StageTrieCfg(db, true, true, true, dirs.Tmp, blockReader, controlServer.Hd, cfg.HistoryV3, agg)), stagedsync.StateUnwindOrder, nil, /* pruneOrder */ logger, @@ -761,17 +781,17 @@ func NewPolygonSyncStages( snapDownloader, blockReader, notifications, + config.HistoryV3, agg, config.InternalCL && config.CaplinConfig.Backfilling, config.CaplinConfig.BlobBackfilling, silkworm, - config.Prune, ), stagedsync.StageBlockHashesCfg( db, config.Dirs.Tmp, chainConfig, - blockio.NewBlockWriter(), + blockio.NewBlockWriter(config.HistoryV3), ), stagedsync.StageSendersCfg( db, @@ -795,6 +815,7 @@ func NewPolygonSyncStages( notifications.Accumulator, config.StateStream, false, /* badBlockHalt */ + config.HistoryV3, config.Dirs, blockReader, nil, /* hd */ diff --git a/turbo/tracing/flags.go b/turbo/tracing/flags.go new file mode 100644 index 00000000000..d0c7192bc95 --- /dev/null +++ b/turbo/tracing/flags.go @@ -0,0 +1,22 @@ +package tracing + +import ( + "github.com/urfave/cli/v2" +) + +var ( + VMTraceFlag = cli.StringFlag{ + Name: "vmtrace", + Usage: "Set the provider tracer", + } + + VMTraceJsonConfigFlag = cli.StringFlag{ + Name: "vmtrace.jsonconfig", + Usage: "Set the config of the tracer", + } +) + +var Flags = []cli.Flag{ + &VMTraceFlag, + &VMTraceJsonConfigFlag, +} diff --git a/turbo/tracing/tracing.go b/turbo/tracing/tracing.go new file mode 100644 index 00000000000..0f26814db01 --- /dev/null +++ b/turbo/tracing/tracing.go @@ -0,0 +1,21 @@ +package tracing + +import ( + "github.com/ledgerwatch/erigon/eth/tracers" + _ "github.com/ledgerwatch/erigon/eth/tracers/live" + + "github.com/urfave/cli/v2" +) + +// SetupTracerCtx performs the tracing setup according to the parameters +// containted in the given urfave context. +func SetupTracerCtx(ctx *cli.Context) (*tracers.Tracer, error) { + tracerName := ctx.String(VMTraceFlag.Name) + if tracerName == "" { + return nil, nil + } + + cfg := ctx.String(VMTraceJsonConfigFlag.Name) + + return tracers.New(tracerName, &tracers.Context{}, []byte(cfg)) +} diff --git a/turbo/transactions/tracing.go b/turbo/transactions/tracing.go index 26da6f5b9c5..d5cda069d74 100644 --- a/turbo/transactions/tracing.go +++ b/turbo/transactions/tracing.go @@ -9,6 +9,9 @@ import ( "time" jsoniter "github.com/json-iterator/go" + "github.com/ledgerwatch/erigon/eth/consensuschain" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon-lib/chain" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" @@ -19,6 +22,7 @@ import ( "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/core/vm/evmtypes" "github.com/ledgerwatch/erigon/eth/tracers" + tracerConfig "github.com/ledgerwatch/erigon/eth/tracers/config" "github.com/ledgerwatch/erigon/eth/tracers/logger" "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/services" @@ -33,8 +37,8 @@ type BlockGetter interface { } // ComputeTxEnv returns the execution environment of a certain transaction. -func ComputeTxEnv(ctx context.Context, engine consensus.EngineReader, block *types.Block, cfg *chain.Config, headerReader services.HeaderReader, dbtx kv.Tx, txIndex int) (core.Message, evmtypes.BlockContext, evmtypes.TxContext, *state.IntraBlockState, state.StateReader, error) { - reader, err := rpchelper.CreateHistoryStateReader(dbtx, block.NumberU64(), txIndex, cfg.ChainName) +func ComputeTxEnv(ctx context.Context, engine consensus.EngineReader, block *types.Block, cfg *chain.Config, headerReader services.HeaderReader, dbtx kv.Tx, txIndex int, historyV3 bool) (core.Message, evmtypes.BlockContext, evmtypes.TxContext, *state.IntraBlockState, state.StateReader, error) { + reader, err := rpchelper.CreateHistoryStateReader(dbtx, block.NumberU64(), txIndex, historyV3, cfg.ChainName) if err != nil { return nil, evmtypes.BlockContext{}, evmtypes.TxContext{}, nil, nil, err } @@ -55,19 +59,68 @@ func ComputeTxEnv(ctx context.Context, engine consensus.EngineReader, block *typ // Recompute transactions up to the target index. signer := types.MakeSigner(cfg, block.NumberU64(), block.Time()) - rules := cfg.Rules(blockContext.BlockNumber, blockContext.Time) - txn := block.Transactions()[txIndex] - statedb.SetTxContext(txn.Hash(), block.Hash(), txIndex) - msg, _ := txn.AsMessage(*signer, block.BaseFee(), rules) - if msg.FeeCap().IsZero() && engine != nil { - syscall := func(contract libcommon.Address, data []byte) ([]byte, error) { - return core.SysCallContract(contract, data, cfg, statedb, header, engine, true /* constCall */) + if historyV3 { + rules := cfg.Rules(blockContext.BlockNumber, blockContext.Time) + txn := block.Transactions()[txIndex] + statedb.SetTxContext(txn.Hash(), block.Hash(), txIndex) + msg, _ := txn.AsMessage(*signer, block.BaseFee(), rules) + if msg.FeeCap().IsZero() && engine != nil { + syscall := func(contract libcommon.Address, data []byte) ([]byte, error) { + return core.SysCallContract(contract, data, cfg, statedb, header, engine, true /* constCall */, nil) + } + msg.SetIsFree(engine.IsServiceTransaction(msg.From(), syscall)) } - msg.SetIsFree(engine.IsServiceTransaction(msg.From(), syscall)) + + TxContext := core.NewEVMTxContext(msg) + return msg, blockContext, TxContext, statedb, reader, nil } + vmenv := vm.NewEVM(blockContext, evmtypes.TxContext{}, statedb, cfg, vm.Config{}) + rules := vmenv.ChainRules() + + consensusHeaderReader := consensuschain.NewReader(cfg, dbtx, nil, nil) + + logger := log.New("tracing") + err = core.InitializeBlockExecution(engine.(consensus.Engine), consensusHeaderReader, header, cfg, statedb, logger, nil) + if err != nil { + return nil, evmtypes.BlockContext{}, evmtypes.TxContext{}, nil, nil, err + } + + for idx, txn := range block.Transactions() { + select { + default: + case <-ctx.Done(): + return nil, evmtypes.BlockContext{}, evmtypes.TxContext{}, nil, nil, ctx.Err() + } + statedb.SetTxContext(txn.Hash(), block.Hash(), idx) + + // Assemble the transaction call message and return if the requested offset + msg, _ := txn.AsMessage(*signer, block.BaseFee(), rules) + if msg.FeeCap().IsZero() && engine != nil { + syscall := func(contract libcommon.Address, data []byte) ([]byte, error) { + return core.SysCallContract(contract, data, cfg, statedb, header, engine, true /* constCall */, nil) + } + msg.SetIsFree(engine.IsServiceTransaction(msg.From(), syscall)) + } - TxContext := core.NewEVMTxContext(msg) - return msg, blockContext, TxContext, statedb, reader, nil + TxContext := core.NewEVMTxContext(msg) + if idx == txIndex { + return msg, blockContext, TxContext, statedb, reader, nil + } + vmenv.Reset(TxContext, statedb) + // Not yet the searched for transaction, execute on top of the current state + if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(txn.GetGas()).AddBlobGas(txn.GetBlobGas()), true /* refunds */, false /* gasBailout */); err != nil { + return nil, evmtypes.BlockContext{}, evmtypes.TxContext{}, nil, nil, fmt.Errorf("transaction %x failed: %w", txn.Hash(), err) + } + // Ensure any modifications are committed to the state + // Only delete empty objects if EIP161 (part of Spurious Dragon) is in effect + _ = statedb.FinalizeTx(rules, reader.(*state.PlainState)) + + if idx+1 == len(block.Transactions()) { + // Return the state from evaluating all txs in the block, note no msg or TxContext in this case + return nil, blockContext, evmtypes.TxContext{}, statedb, reader, nil + } + } + return nil, evmtypes.BlockContext{}, evmtypes.TxContext{}, nil, nil, fmt.Errorf("transaction index %d out of range for block %x", txIndex, block.Hash()) } // TraceTx configures a new tracer according to the provided configuration, and @@ -75,11 +128,12 @@ func ComputeTxEnv(ctx context.Context, engine consensus.EngineReader, block *typ // be tracer dependent. func TraceTx( ctx context.Context, + tx types.Transaction, message core.Message, blockCtx evmtypes.BlockContext, txCtx evmtypes.TxContext, - ibs evmtypes.IntraBlockState, - config *tracers.TraceConfig, + ibs *state.IntraBlockState, + config *tracerConfig.TraceConfig, chainConfig *chain.Config, stream *jsoniter.Stream, callTimeout time.Duration, @@ -94,7 +148,20 @@ func TraceTx( execCb := func(evm *vm.EVM, refunds bool) (*core.ExecutionResult, error) { gp := new(core.GasPool).AddGas(message.Gas()).AddBlobGas(message.BlobGas()) - return core.ApplyMessage(evm, message, gp, refunds, false /* gasBailout */) + if tracer != nil && tracer.Hooks.OnTxStart != nil { + tracer.Hooks.OnTxStart(evm.GetVMContext(), tx, message.From()) + } + result, err := core.ApplyMessage(evm, message, gp, refunds, false /* gasBailout */) + if err != nil { + if tracer != nil && tracer.Hooks.OnTxEnd != nil { + tracer.Hooks.OnTxEnd(nil, err) + } + } else { + if tracer != nil && tracer.Hooks.OnTxEnd != nil { + tracer.Hooks.OnTxEnd(&types.Receipt{GasUsed: result.UsedGas}, nil) + } + } + return result, err } return ExecuteTraceTx(blockCtx, txCtx, ibs, config, chainConfig, stream, tracer, streaming, execCb) @@ -102,11 +169,11 @@ func TraceTx( func AssembleTracer( ctx context.Context, - config *tracers.TraceConfig, + config *tracerConfig.TraceConfig, txHash libcommon.Hash, stream *jsoniter.Stream, callTimeout time.Duration, -) (vm.EVMLogger, bool, context.CancelFunc, error) { +) (*tracers.Tracer, bool, context.CancelFunc, error) { // Assemble the structured logger or the JavaScript tracer switch { case config != nil && config.Tracer != nil: @@ -139,26 +206,26 @@ func AssembleTracer( return tracer, false, cancel, nil case config == nil: - return logger.NewJsonStreamLogger(nil, ctx, stream), true, func() {}, nil + return logger.NewJsonStreamLogger(nil, ctx, stream).Tracer(), true, func() {}, nil default: - return logger.NewJsonStreamLogger(config.LogConfig, ctx, stream), true, func() {}, nil + return logger.NewJsonStreamLogger(config.LogConfig, ctx, stream).Tracer(), true, func() {}, nil } } func ExecuteTraceTx( blockCtx evmtypes.BlockContext, txCtx evmtypes.TxContext, - ibs evmtypes.IntraBlockState, - config *tracers.TraceConfig, + ibs *state.IntraBlockState, + config *tracerConfig.TraceConfig, chainConfig *chain.Config, stream *jsoniter.Stream, - tracer vm.EVMLogger, + tracer *tracers.Tracer, streaming bool, execCb func(evm *vm.EVM, refunds bool) (*core.ExecutionResult, error), ) error { // Run the transaction with tracing enabled. - evm := vm.NewEVM(blockCtx, txCtx, ibs, chainConfig, vm.Config{Debug: true, Tracer: tracer}) - + evm := vm.NewEVM(blockCtx, txCtx, ibs, chainConfig, vm.Config{Debug: true, Tracer: tracer.Hooks}) + ibs.SetLogger(tracer.Hooks) var refunds = true if config != nil && config.NoRefunds != nil && *config.NoRefunds { refunds = false @@ -200,7 +267,7 @@ func ExecuteTraceTx( stream.WriteString(returnVal) stream.WriteObjectEnd() } else { - r, err := tracer.(tracers.Tracer).GetResult() + r, err := tracer.GetResult() if err != nil { stream.WriteNil() return err diff --git a/turbo/trie/trie_root.go b/turbo/trie/trie_root.go index 62e7a579602..6696ecaf954 100644 --- a/turbo/trie/trie_root.go +++ b/turbo/trie/trie_root.go @@ -828,7 +828,7 @@ func (c *AccTrieCursor) _seek(seek []byte, withinPrefix []byte) (bool, error) { // optimistic .Next call, can use result in 2 cases: // - k is not child of current key // - looking for first child, means: c.childID[c.lvl] <= int16(bits.TrailingZeros16(c.hasTree[c.lvl])) - // otherwise do .seekInFiles call + // otherwise do .Seek call //k, v, err = c.c.Next() //if err != nil { // return false, err