diff --git a/cmd/run.go b/cmd/run.go index d4efd13f55..a7c297a26f 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -744,7 +744,8 @@ func forkIDIntervals(ctx context.Context, st *state.State, etherman *etherman.Cl if err != nil && !errors.Is(err, state.ErrStateNotSynchronized) { return []state.ForkIDInterval{}, fmt.Errorf("error checking lastL1BlockSynced. Error: %v", err) } - if lastBlock != nil { + // If lastBlock is below genesisBlock means state.ErrStateNotSynchronized (haven't started yet the sync process, is doing pregenesis sync) + if lastBlock != nil && lastBlock.BlockNumber >= genesisBlockNumber { log.Info("Getting forkIDs intervals. Please wait...") // Read Fork ID FROM POE SC forkIntervals, err := etherman.GetForks(ctx, genesisBlockNumber, lastBlock.BlockNumber) @@ -784,32 +785,14 @@ func forkIDIntervals(ctx context.Context, st *state.State, etherman *etherman.Cl } forkIDIntervals = forkIntervals } else { - log.Debug("Getting all forkIDs") - - // Get last L1 block number - bn, err := etherman.GetLatestBlockNumber(ctx) - if err != nil { - return []state.ForkIDInterval{}, fmt.Errorf("error getting latest block number. Error: %v", err) - } - - // Get all forkIDs since genesis - forkIntervals, err := etherman.GetForks(ctx, genesisBlockNumber, bn) + log.Debug("Getting initial forkID") + forkIntervals, err := etherman.GetForks(ctx, genesisBlockNumber, genesisBlockNumber) if err != nil { return []state.ForkIDInterval{}, fmt.Errorf("error getting forks. Please check the configuration. Error: %v", err) } else if len(forkIntervals) == 0 { return []state.ForkIDInterval{}, fmt.Errorf("error: no forkID received. It should receive at least one, please check the configuration...") } forkIDIntervals = forkIntervals - - log.Debugf("Retrieved %d forkIDs", len(forkIDIntervals)) - - log.Debug("Adding forkIDs to db and memory") - for _, forkID := range forkIDIntervals { - err = st.AddForkIDInterval(ctx, forkID, nil) - if err != nil { - log.Fatal("error adding forkID to db. Error: ", err) - } - } } } return forkIDIntervals, nil diff --git a/etherman/etherman.go b/etherman/etherman.go index 0d48318f3d..eb09d6601a 100644 --- a/etherman/etherman.go +++ b/etherman/etherman.go @@ -363,6 +363,23 @@ func (etherMan *Client) VerifyGenBlockNumber(ctx context.Context, genBlockNumber return true, nil } +// GetL1BlockUpgradeLxLy It returns the block genesis for LxLy before genesisBlock or error +func (etherMan *Client) GetL1BlockUpgradeLxLy(ctx context.Context, genesisBlock uint64) (uint64, error) { + it, err := etherMan.GlobalExitRootManager.FilterUpdateL1InfoTree(&bind.FilterOpts{ + Start: 1, + End: &genesisBlock, + Context: ctx, + }, nil, nil) + if err != nil { + return uint64(0), err + } + for it.Next() { + log.Debugf("BlockNumber: %d Topics:L1InfoTree", it.Event.Raw.BlockNumber) + return it.Event.Raw.BlockNumber, nil + } + return uint64(0), ErrNotFound +} + // GetForks returns fork information func (etherMan *Client) GetForks(ctx context.Context, genBlockNumber uint64, lastL1BlockSynced uint64) ([]state.ForkIDInterval, error) { log.Debug("Getting forkIDs from blockNumber: ", genBlockNumber) @@ -499,6 +516,25 @@ func (etherMan *Client) GetRollupInfoByBlockRange(ctx context.Context, fromBlock return blocks, blocksOrder, nil } +// GetRollupInfoByBlockRangePreviousRollupGenesis function retrieves the Rollup information that are included in all this ethereum blocks +// but it only retrieves the information from the previous rollup genesis block to the current block. +func (etherMan *Client) GetRollupInfoByBlockRangePreviousRollupGenesis(ctx context.Context, fromBlock uint64, toBlock *uint64) ([]Block, map[common.Hash][]Order, error) { + // Filter query + query := ethereum.FilterQuery{ + FromBlock: new(big.Int).SetUint64(fromBlock), + Addresses: []common.Address{etherMan.l1Cfg.GlobalExitRootManagerAddr}, + Topics: [][]common.Hash{{updateL1InfoTreeSignatureHash}}, + } + if toBlock != nil { + query.ToBlock = new(big.Int).SetUint64(*toBlock) + } + blocks, blocksOrder, err := etherMan.readEvents(ctx, query) + if err != nil { + return nil, nil, err + } + return blocks, blocksOrder, nil +} + // Order contains the event order to let the synchronizer store the information following this order. type Order struct { Name EventOrder diff --git a/jsonrpc/mocks/mock_pool.go b/jsonrpc/mocks/mock_pool.go index 1c872c9ca8..b9f08903dc 100644 --- a/jsonrpc/mocks/mock_pool.go +++ b/jsonrpc/mocks/mock_pool.go @@ -70,34 +70,6 @@ func (_m *PoolMock) CalculateEffectiveGasPrice(rawTx []byte, txGasPrice *big.Int return r0, r1 } -// CalculateEffectiveGasPricePercentage provides a mock function with given fields: gasPrice, effectiveGasPrice -func (_m *PoolMock) CalculateEffectiveGasPricePercentage(gasPrice *big.Int, effectiveGasPrice *big.Int) (uint8, error) { - ret := _m.Called(gasPrice, effectiveGasPrice) - - if len(ret) == 0 { - panic("no return value specified for CalculateEffectiveGasPricePercentage") - } - - var r0 uint8 - var r1 error - if rf, ok := ret.Get(0).(func(*big.Int, *big.Int) (uint8, error)); ok { - return rf(gasPrice, effectiveGasPrice) - } - if rf, ok := ret.Get(0).(func(*big.Int, *big.Int) uint8); ok { - r0 = rf(gasPrice, effectiveGasPrice) - } else { - r0 = ret.Get(0).(uint8) - } - - if rf, ok := ret.Get(1).(func(*big.Int, *big.Int) error); ok { - r1 = rf(gasPrice, effectiveGasPrice) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // CheckPolicy provides a mock function with given fields: ctx, policy, address func (_m *PoolMock) CheckPolicy(ctx context.Context, policy pool.PolicyName, address common.Address) (bool, error) { ret := _m.Called(ctx, policy, address) diff --git a/state/block.go b/state/block.go index 7883770249..8b91425bb3 100644 --- a/state/block.go +++ b/state/block.go @@ -1,6 +1,7 @@ package state import ( + "fmt" "time" "github.com/ethereum/go-ethereum/common" @@ -15,6 +16,10 @@ type Block struct { Checked bool } +func (b *Block) String() string { + return fmt.Sprintf("BlockNumber: %d, BlockHash: %s, ParentHash: %s, ReceivedAt: %s", b.BlockNumber, b.BlockHash, b.ParentHash, b.ReceivedAt) +} + // NewBlock creates a block with the given data. func NewBlock(blockNumber uint64) *Block { return &Block{BlockNumber: blockNumber} diff --git a/state/genesis.go b/state/genesis.go index 60a7bcba92..47a5c6f2db 100644 --- a/state/genesis.go +++ b/state/genesis.go @@ -17,6 +17,11 @@ import ( "github.com/jackc/pgx/v4" ) +const ( + // AutoDiscoverRollupManagerBlockNumber is the value to auto-discover the RollupManager creation block number + AutoDiscoverRollupManagerBlockNumber = uint64(0) +) + // Genesis contains the information to populate state on creation type Genesis struct { // RollupBlockNumber is the block number where the polygonZKEVM smc was deployed on L1 diff --git a/synchronizer/common/syncinterfaces/etherman.go b/synchronizer/common/syncinterfaces/etherman.go index fdbdd669f8..44717746df 100644 --- a/synchronizer/common/syncinterfaces/etherman.go +++ b/synchronizer/common/syncinterfaces/etherman.go @@ -20,8 +20,14 @@ type EthermanFullInterface interface { EthermanGetLatestBatchNumber GetFinalizedBlockNumber(ctx context.Context) (uint64, error) + EthermanPreRollup } type EthermanGetLatestBatchNumber interface { GetLatestBatchNumber() (uint64, error) } + +type EthermanPreRollup interface { + GetL1BlockUpgradeLxLy(ctx context.Context, genesisBlock uint64) (uint64, error) + GetRollupInfoByBlockRangePreviousRollupGenesis(ctx context.Context, fromBlock uint64, toBlock *uint64) ([]etherman.Block, map[common.Hash][]etherman.Order, error) +} diff --git a/synchronizer/common/syncinterfaces/mocks/etherman_full_interface.go b/synchronizer/common/syncinterfaces/mocks/etherman_full_interface.go index a904419575..c6e99c36ac 100644 --- a/synchronizer/common/syncinterfaces/mocks/etherman_full_interface.go +++ b/synchronizer/common/syncinterfaces/mocks/etherman_full_interface.go @@ -143,6 +143,63 @@ func (_c *EthermanFullInterface_GetFinalizedBlockNumber_Call) RunAndReturn(run f return _c } +// GetL1BlockUpgradeLxLy provides a mock function with given fields: ctx, genesisBlock +func (_m *EthermanFullInterface) GetL1BlockUpgradeLxLy(ctx context.Context, genesisBlock uint64) (uint64, error) { + ret := _m.Called(ctx, genesisBlock) + + if len(ret) == 0 { + panic("no return value specified for GetL1BlockUpgradeLxLy") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (uint64, error)); ok { + return rf(ctx, genesisBlock) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) uint64); ok { + r0 = rf(ctx, genesisBlock) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, genesisBlock) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthermanFullInterface_GetL1BlockUpgradeLxLy_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetL1BlockUpgradeLxLy' +type EthermanFullInterface_GetL1BlockUpgradeLxLy_Call struct { + *mock.Call +} + +// GetL1BlockUpgradeLxLy is a helper method to define mock.On call +// - ctx context.Context +// - genesisBlock uint64 +func (_e *EthermanFullInterface_Expecter) GetL1BlockUpgradeLxLy(ctx interface{}, genesisBlock interface{}) *EthermanFullInterface_GetL1BlockUpgradeLxLy_Call { + return &EthermanFullInterface_GetL1BlockUpgradeLxLy_Call{Call: _e.mock.On("GetL1BlockUpgradeLxLy", ctx, genesisBlock)} +} + +func (_c *EthermanFullInterface_GetL1BlockUpgradeLxLy_Call) Run(run func(ctx context.Context, genesisBlock uint64)) *EthermanFullInterface_GetL1BlockUpgradeLxLy_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64)) + }) + return _c +} + +func (_c *EthermanFullInterface_GetL1BlockUpgradeLxLy_Call) Return(_a0 uint64, _a1 error) *EthermanFullInterface_GetL1BlockUpgradeLxLy_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthermanFullInterface_GetL1BlockUpgradeLxLy_Call) RunAndReturn(run func(context.Context, uint64) (uint64, error)) *EthermanFullInterface_GetL1BlockUpgradeLxLy_Call { + _c.Call.Return(run) + return _c +} + // GetLatestBatchNumber provides a mock function with given fields: func (_m *EthermanFullInterface) GetLatestBatchNumber() (uint64, error) { ret := _m.Called() @@ -322,6 +379,75 @@ func (_c *EthermanFullInterface_GetRollupInfoByBlockRange_Call) RunAndReturn(run return _c } +// GetRollupInfoByBlockRangePreviousRollupGenesis provides a mock function with given fields: ctx, fromBlock, toBlock +func (_m *EthermanFullInterface) GetRollupInfoByBlockRangePreviousRollupGenesis(ctx context.Context, fromBlock uint64, toBlock *uint64) ([]etherman.Block, map[common.Hash][]etherman.Order, error) { + ret := _m.Called(ctx, fromBlock, toBlock) + + if len(ret) == 0 { + panic("no return value specified for GetRollupInfoByBlockRangePreviousRollupGenesis") + } + + var r0 []etherman.Block + var r1 map[common.Hash][]etherman.Order + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, *uint64) ([]etherman.Block, map[common.Hash][]etherman.Order, error)); ok { + return rf(ctx, fromBlock, toBlock) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, *uint64) []etherman.Block); ok { + r0 = rf(ctx, fromBlock, toBlock) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]etherman.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, *uint64) map[common.Hash][]etherman.Order); ok { + r1 = rf(ctx, fromBlock, toBlock) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(map[common.Hash][]etherman.Order) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, uint64, *uint64) error); ok { + r2 = rf(ctx, fromBlock, toBlock) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// EthermanFullInterface_GetRollupInfoByBlockRangePreviousRollupGenesis_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRollupInfoByBlockRangePreviousRollupGenesis' +type EthermanFullInterface_GetRollupInfoByBlockRangePreviousRollupGenesis_Call struct { + *mock.Call +} + +// GetRollupInfoByBlockRangePreviousRollupGenesis is a helper method to define mock.On call +// - ctx context.Context +// - fromBlock uint64 +// - toBlock *uint64 +func (_e *EthermanFullInterface_Expecter) GetRollupInfoByBlockRangePreviousRollupGenesis(ctx interface{}, fromBlock interface{}, toBlock interface{}) *EthermanFullInterface_GetRollupInfoByBlockRangePreviousRollupGenesis_Call { + return &EthermanFullInterface_GetRollupInfoByBlockRangePreviousRollupGenesis_Call{Call: _e.mock.On("GetRollupInfoByBlockRangePreviousRollupGenesis", ctx, fromBlock, toBlock)} +} + +func (_c *EthermanFullInterface_GetRollupInfoByBlockRangePreviousRollupGenesis_Call) Run(run func(ctx context.Context, fromBlock uint64, toBlock *uint64)) *EthermanFullInterface_GetRollupInfoByBlockRangePreviousRollupGenesis_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(*uint64)) + }) + return _c +} + +func (_c *EthermanFullInterface_GetRollupInfoByBlockRangePreviousRollupGenesis_Call) Return(_a0 []etherman.Block, _a1 map[common.Hash][]etherman.Order, _a2 error) *EthermanFullInterface_GetRollupInfoByBlockRangePreviousRollupGenesis_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *EthermanFullInterface_GetRollupInfoByBlockRangePreviousRollupGenesis_Call) RunAndReturn(run func(context.Context, uint64, *uint64) ([]etherman.Block, map[common.Hash][]etherman.Order, error)) *EthermanFullInterface_GetRollupInfoByBlockRangePreviousRollupGenesis_Call { + _c.Call.Return(run) + return _c +} + // GetTrustedSequencerURL provides a mock function with given fields: func (_m *EthermanFullInterface) GetTrustedSequencerURL() (string, error) { ret := _m.Called() diff --git a/synchronizer/common/syncinterfaces/mocks/etherman_pre_rollup.go b/synchronizer/common/syncinterfaces/mocks/etherman_pre_rollup.go new file mode 100644 index 0000000000..3599152aee --- /dev/null +++ b/synchronizer/common/syncinterfaces/mocks/etherman_pre_rollup.go @@ -0,0 +1,166 @@ +// Code generated by mockery. DO NOT EDIT. + +package mock_syncinterfaces + +import ( + context "context" + + common "github.com/ethereum/go-ethereum/common" + + etherman "github.com/0xPolygonHermez/zkevm-node/etherman" + + mock "github.com/stretchr/testify/mock" +) + +// EthermanPreRollup is an autogenerated mock type for the EthermanPreRollup type +type EthermanPreRollup struct { + mock.Mock +} + +type EthermanPreRollup_Expecter struct { + mock *mock.Mock +} + +func (_m *EthermanPreRollup) EXPECT() *EthermanPreRollup_Expecter { + return &EthermanPreRollup_Expecter{mock: &_m.Mock} +} + +// GetL1BlockUpgradeLxLy provides a mock function with given fields: ctx, genesisBlock +func (_m *EthermanPreRollup) GetL1BlockUpgradeLxLy(ctx context.Context, genesisBlock uint64) (uint64, error) { + ret := _m.Called(ctx, genesisBlock) + + if len(ret) == 0 { + panic("no return value specified for GetL1BlockUpgradeLxLy") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (uint64, error)); ok { + return rf(ctx, genesisBlock) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) uint64); ok { + r0 = rf(ctx, genesisBlock) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, genesisBlock) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// EthermanPreRollup_GetL1BlockUpgradeLxLy_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetL1BlockUpgradeLxLy' +type EthermanPreRollup_GetL1BlockUpgradeLxLy_Call struct { + *mock.Call +} + +// GetL1BlockUpgradeLxLy is a helper method to define mock.On call +// - ctx context.Context +// - genesisBlock uint64 +func (_e *EthermanPreRollup_Expecter) GetL1BlockUpgradeLxLy(ctx interface{}, genesisBlock interface{}) *EthermanPreRollup_GetL1BlockUpgradeLxLy_Call { + return &EthermanPreRollup_GetL1BlockUpgradeLxLy_Call{Call: _e.mock.On("GetL1BlockUpgradeLxLy", ctx, genesisBlock)} +} + +func (_c *EthermanPreRollup_GetL1BlockUpgradeLxLy_Call) Run(run func(ctx context.Context, genesisBlock uint64)) *EthermanPreRollup_GetL1BlockUpgradeLxLy_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64)) + }) + return _c +} + +func (_c *EthermanPreRollup_GetL1BlockUpgradeLxLy_Call) Return(_a0 uint64, _a1 error) *EthermanPreRollup_GetL1BlockUpgradeLxLy_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *EthermanPreRollup_GetL1BlockUpgradeLxLy_Call) RunAndReturn(run func(context.Context, uint64) (uint64, error)) *EthermanPreRollup_GetL1BlockUpgradeLxLy_Call { + _c.Call.Return(run) + return _c +} + +// GetRollupInfoByBlockRangePreviousRollupGenesis provides a mock function with given fields: ctx, fromBlock, toBlock +func (_m *EthermanPreRollup) GetRollupInfoByBlockRangePreviousRollupGenesis(ctx context.Context, fromBlock uint64, toBlock *uint64) ([]etherman.Block, map[common.Hash][]etherman.Order, error) { + ret := _m.Called(ctx, fromBlock, toBlock) + + if len(ret) == 0 { + panic("no return value specified for GetRollupInfoByBlockRangePreviousRollupGenesis") + } + + var r0 []etherman.Block + var r1 map[common.Hash][]etherman.Order + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, *uint64) ([]etherman.Block, map[common.Hash][]etherman.Order, error)); ok { + return rf(ctx, fromBlock, toBlock) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, *uint64) []etherman.Block); ok { + r0 = rf(ctx, fromBlock, toBlock) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]etherman.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, *uint64) map[common.Hash][]etherman.Order); ok { + r1 = rf(ctx, fromBlock, toBlock) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(map[common.Hash][]etherman.Order) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, uint64, *uint64) error); ok { + r2 = rf(ctx, fromBlock, toBlock) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// EthermanPreRollup_GetRollupInfoByBlockRangePreviousRollupGenesis_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRollupInfoByBlockRangePreviousRollupGenesis' +type EthermanPreRollup_GetRollupInfoByBlockRangePreviousRollupGenesis_Call struct { + *mock.Call +} + +// GetRollupInfoByBlockRangePreviousRollupGenesis is a helper method to define mock.On call +// - ctx context.Context +// - fromBlock uint64 +// - toBlock *uint64 +func (_e *EthermanPreRollup_Expecter) GetRollupInfoByBlockRangePreviousRollupGenesis(ctx interface{}, fromBlock interface{}, toBlock interface{}) *EthermanPreRollup_GetRollupInfoByBlockRangePreviousRollupGenesis_Call { + return &EthermanPreRollup_GetRollupInfoByBlockRangePreviousRollupGenesis_Call{Call: _e.mock.On("GetRollupInfoByBlockRangePreviousRollupGenesis", ctx, fromBlock, toBlock)} +} + +func (_c *EthermanPreRollup_GetRollupInfoByBlockRangePreviousRollupGenesis_Call) Run(run func(ctx context.Context, fromBlock uint64, toBlock *uint64)) *EthermanPreRollup_GetRollupInfoByBlockRangePreviousRollupGenesis_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(*uint64)) + }) + return _c +} + +func (_c *EthermanPreRollup_GetRollupInfoByBlockRangePreviousRollupGenesis_Call) Return(_a0 []etherman.Block, _a1 map[common.Hash][]etherman.Order, _a2 error) *EthermanPreRollup_GetRollupInfoByBlockRangePreviousRollupGenesis_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *EthermanPreRollup_GetRollupInfoByBlockRangePreviousRollupGenesis_Call) RunAndReturn(run func(context.Context, uint64, *uint64) ([]etherman.Block, map[common.Hash][]etherman.Order, error)) *EthermanPreRollup_GetRollupInfoByBlockRangePreviousRollupGenesis_Call { + _c.Call.Return(run) + return _c +} + +// NewEthermanPreRollup creates a new instance of EthermanPreRollup. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewEthermanPreRollup(t interface { + mock.TestingT + Cleanup(func()) +}) *EthermanPreRollup { + mock := &EthermanPreRollup{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/synchronizer/mock_etherman.go b/synchronizer/mock_etherman.go index 4c0b9c1763..4963d5750a 100644 --- a/synchronizer/mock_etherman.go +++ b/synchronizer/mock_etherman.go @@ -87,6 +87,119 @@ func (_c *ethermanMock_EthBlockByNumber_Call) RunAndReturn(run func(context.Cont return _c } +// GetFinalizedBlockNumber provides a mock function with given fields: ctx +func (_m *ethermanMock) GetFinalizedBlockNumber(ctx context.Context) (uint64, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetFinalizedBlockNumber") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ethermanMock_GetFinalizedBlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFinalizedBlockNumber' +type ethermanMock_GetFinalizedBlockNumber_Call struct { + *mock.Call +} + +// GetFinalizedBlockNumber is a helper method to define mock.On call +// - ctx context.Context +func (_e *ethermanMock_Expecter) GetFinalizedBlockNumber(ctx interface{}) *ethermanMock_GetFinalizedBlockNumber_Call { + return ðermanMock_GetFinalizedBlockNumber_Call{Call: _e.mock.On("GetFinalizedBlockNumber", ctx)} +} + +func (_c *ethermanMock_GetFinalizedBlockNumber_Call) Run(run func(ctx context.Context)) *ethermanMock_GetFinalizedBlockNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *ethermanMock_GetFinalizedBlockNumber_Call) Return(_a0 uint64, _a1 error) *ethermanMock_GetFinalizedBlockNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ethermanMock_GetFinalizedBlockNumber_Call) RunAndReturn(run func(context.Context) (uint64, error)) *ethermanMock_GetFinalizedBlockNumber_Call { + _c.Call.Return(run) + return _c +} + +// GetL1BlockUpgradeLxLy provides a mock function with given fields: ctx, genesisBlock +func (_m *ethermanMock) GetL1BlockUpgradeLxLy(ctx context.Context, genesisBlock uint64) (uint64, error) { + ret := _m.Called(ctx, genesisBlock) + + if len(ret) == 0 { + panic("no return value specified for GetL1BlockUpgradeLxLy") + } + + var r0 uint64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64) (uint64, error)); ok { + return rf(ctx, genesisBlock) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64) uint64); ok { + r0 = rf(ctx, genesisBlock) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64) error); ok { + r1 = rf(ctx, genesisBlock) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ethermanMock_GetL1BlockUpgradeLxLy_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetL1BlockUpgradeLxLy' +type ethermanMock_GetL1BlockUpgradeLxLy_Call struct { + *mock.Call +} + +// GetL1BlockUpgradeLxLy is a helper method to define mock.On call +// - ctx context.Context +// - genesisBlock uint64 +func (_e *ethermanMock_Expecter) GetL1BlockUpgradeLxLy(ctx interface{}, genesisBlock interface{}) *ethermanMock_GetL1BlockUpgradeLxLy_Call { + return ðermanMock_GetL1BlockUpgradeLxLy_Call{Call: _e.mock.On("GetL1BlockUpgradeLxLy", ctx, genesisBlock)} +} + +func (_c *ethermanMock_GetL1BlockUpgradeLxLy_Call) Run(run func(ctx context.Context, genesisBlock uint64)) *ethermanMock_GetL1BlockUpgradeLxLy_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64)) + }) + return _c +} + +func (_c *ethermanMock_GetL1BlockUpgradeLxLy_Call) Return(_a0 uint64, _a1 error) *ethermanMock_GetL1BlockUpgradeLxLy_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *ethermanMock_GetL1BlockUpgradeLxLy_Call) RunAndReturn(run func(context.Context, uint64) (uint64, error)) *ethermanMock_GetL1BlockUpgradeLxLy_Call { + _c.Call.Return(run) + return _c +} + // GetLatestBatchNumber provides a mock function with given fields: func (_m *ethermanMock) GetLatestBatchNumber() (uint64, error) { ret := _m.Called() @@ -266,6 +379,75 @@ func (_c *ethermanMock_GetRollupInfoByBlockRange_Call) RunAndReturn(run func(con return _c } +// GetRollupInfoByBlockRangePreviousRollupGenesis provides a mock function with given fields: ctx, fromBlock, toBlock +func (_m *ethermanMock) GetRollupInfoByBlockRangePreviousRollupGenesis(ctx context.Context, fromBlock uint64, toBlock *uint64) ([]etherman.Block, map[common.Hash][]etherman.Order, error) { + ret := _m.Called(ctx, fromBlock, toBlock) + + if len(ret) == 0 { + panic("no return value specified for GetRollupInfoByBlockRangePreviousRollupGenesis") + } + + var r0 []etherman.Block + var r1 map[common.Hash][]etherman.Order + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, *uint64) ([]etherman.Block, map[common.Hash][]etherman.Order, error)); ok { + return rf(ctx, fromBlock, toBlock) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, *uint64) []etherman.Block); ok { + r0 = rf(ctx, fromBlock, toBlock) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]etherman.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, *uint64) map[common.Hash][]etherman.Order); ok { + r1 = rf(ctx, fromBlock, toBlock) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(map[common.Hash][]etherman.Order) + } + } + + if rf, ok := ret.Get(2).(func(context.Context, uint64, *uint64) error); ok { + r2 = rf(ctx, fromBlock, toBlock) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +// ethermanMock_GetRollupInfoByBlockRangePreviousRollupGenesis_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRollupInfoByBlockRangePreviousRollupGenesis' +type ethermanMock_GetRollupInfoByBlockRangePreviousRollupGenesis_Call struct { + *mock.Call +} + +// GetRollupInfoByBlockRangePreviousRollupGenesis is a helper method to define mock.On call +// - ctx context.Context +// - fromBlock uint64 +// - toBlock *uint64 +func (_e *ethermanMock_Expecter) GetRollupInfoByBlockRangePreviousRollupGenesis(ctx interface{}, fromBlock interface{}, toBlock interface{}) *ethermanMock_GetRollupInfoByBlockRangePreviousRollupGenesis_Call { + return ðermanMock_GetRollupInfoByBlockRangePreviousRollupGenesis_Call{Call: _e.mock.On("GetRollupInfoByBlockRangePreviousRollupGenesis", ctx, fromBlock, toBlock)} +} + +func (_c *ethermanMock_GetRollupInfoByBlockRangePreviousRollupGenesis_Call) Run(run func(ctx context.Context, fromBlock uint64, toBlock *uint64)) *ethermanMock_GetRollupInfoByBlockRangePreviousRollupGenesis_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(*uint64)) + }) + return _c +} + +func (_c *ethermanMock_GetRollupInfoByBlockRangePreviousRollupGenesis_Call) Return(_a0 []etherman.Block, _a1 map[common.Hash][]etherman.Order, _a2 error) *ethermanMock_GetRollupInfoByBlockRangePreviousRollupGenesis_Call { + _c.Call.Return(_a0, _a1, _a2) + return _c +} + +func (_c *ethermanMock_GetRollupInfoByBlockRangePreviousRollupGenesis_Call) RunAndReturn(run func(context.Context, uint64, *uint64) ([]etherman.Block, map[common.Hash][]etherman.Order, error)) *ethermanMock_GetRollupInfoByBlockRangePreviousRollupGenesis_Call { + _c.Call.Return(run) + return _c +} + // GetTrustedSequencerURL provides a mock function with given fields: func (_m *ethermanMock) GetTrustedSequencerURL() (string, error) { ret := _m.Called() diff --git a/synchronizer/mock_state.go b/synchronizer/mock_state.go index ad1a3eeaaa..fc741a114d 100644 --- a/synchronizer/mock_state.go +++ b/synchronizer/mock_state.go @@ -821,6 +821,66 @@ func (_c *StateMock_GetBatchByNumber_Call) RunAndReturn(run func(context.Context return _c } +// GetBlockByNumber provides a mock function with given fields: ctx, blockNumber, dbTx +func (_m *StateMock) GetBlockByNumber(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) (*state.Block, error) { + ret := _m.Called(ctx, blockNumber, dbTx) + + if len(ret) == 0 { + panic("no return value specified for GetBlockByNumber") + } + + var r0 *state.Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (*state.Block, error)); ok { + return rf(ctx, blockNumber, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) *state.Block); ok { + r0 = rf(ctx, blockNumber, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*state.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok { + r1 = rf(ctx, blockNumber, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StateMock_GetBlockByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetBlockByNumber' +type StateMock_GetBlockByNumber_Call struct { + *mock.Call +} + +// GetBlockByNumber is a helper method to define mock.On call +// - ctx context.Context +// - blockNumber uint64 +// - dbTx pgx.Tx +func (_e *StateMock_Expecter) GetBlockByNumber(ctx interface{}, blockNumber interface{}, dbTx interface{}) *StateMock_GetBlockByNumber_Call { + return &StateMock_GetBlockByNumber_Call{Call: _e.mock.On("GetBlockByNumber", ctx, blockNumber, dbTx)} +} + +func (_c *StateMock_GetBlockByNumber_Call) Run(run func(ctx context.Context, blockNumber uint64, dbTx pgx.Tx)) *StateMock_GetBlockByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(pgx.Tx)) + }) + return _c +} + +func (_c *StateMock_GetBlockByNumber_Call) Return(_a0 *state.Block, _a1 error) *StateMock_GetBlockByNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *StateMock_GetBlockByNumber_Call) RunAndReturn(run func(context.Context, uint64, pgx.Tx) (*state.Block, error)) *StateMock_GetBlockByNumber_Call { + _c.Call.Return(run) + return _c +} + // GetExitRootByGlobalExitRoot provides a mock function with given fields: ctx, ger, dbTx func (_m *StateMock) GetExitRootByGlobalExitRoot(ctx context.Context, ger common.Hash, dbTx pgx.Tx) (*state.GlobalExitRoot, error) { ret := _m.Called(ctx, ger, dbTx) @@ -881,6 +941,66 @@ func (_c *StateMock_GetExitRootByGlobalExitRoot_Call) RunAndReturn(run func(cont return _c } +// GetFirstUncheckedBlock provides a mock function with given fields: ctx, fromBlockNumber, dbTx +func (_m *StateMock) GetFirstUncheckedBlock(ctx context.Context, fromBlockNumber uint64, dbTx pgx.Tx) (*state.Block, error) { + ret := _m.Called(ctx, fromBlockNumber, dbTx) + + if len(ret) == 0 { + panic("no return value specified for GetFirstUncheckedBlock") + } + + var r0 *state.Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (*state.Block, error)); ok { + return rf(ctx, fromBlockNumber, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) *state.Block); ok { + r0 = rf(ctx, fromBlockNumber, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*state.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok { + r1 = rf(ctx, fromBlockNumber, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StateMock_GetFirstUncheckedBlock_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFirstUncheckedBlock' +type StateMock_GetFirstUncheckedBlock_Call struct { + *mock.Call +} + +// GetFirstUncheckedBlock is a helper method to define mock.On call +// - ctx context.Context +// - fromBlockNumber uint64 +// - dbTx pgx.Tx +func (_e *StateMock_Expecter) GetFirstUncheckedBlock(ctx interface{}, fromBlockNumber interface{}, dbTx interface{}) *StateMock_GetFirstUncheckedBlock_Call { + return &StateMock_GetFirstUncheckedBlock_Call{Call: _e.mock.On("GetFirstUncheckedBlock", ctx, fromBlockNumber, dbTx)} +} + +func (_c *StateMock_GetFirstUncheckedBlock_Call) Run(run func(ctx context.Context, fromBlockNumber uint64, dbTx pgx.Tx)) *StateMock_GetFirstUncheckedBlock_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(pgx.Tx)) + }) + return _c +} + +func (_c *StateMock_GetFirstUncheckedBlock_Call) Return(_a0 *state.Block, _a1 error) *StateMock_GetFirstUncheckedBlock_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *StateMock_GetFirstUncheckedBlock_Call) RunAndReturn(run func(context.Context, uint64, pgx.Tx) (*state.Block, error)) *StateMock_GetFirstUncheckedBlock_Call { + _c.Call.Return(run) + return _c +} + // GetForkIDByBatchNumber provides a mock function with given fields: batchNumber func (_m *StateMock) GetForkIDByBatchNumber(batchNumber uint64) uint64 { ret := _m.Called(batchNumber) @@ -1745,6 +1865,66 @@ func (_c *StateMock_GetPreviousBlock_Call) RunAndReturn(run func(context.Context return _c } +// GetPreviousBlockToBlockNumber provides a mock function with given fields: ctx, blockNumber, dbTx +func (_m *StateMock) GetPreviousBlockToBlockNumber(ctx context.Context, blockNumber uint64, dbTx pgx.Tx) (*state.Block, error) { + ret := _m.Called(ctx, blockNumber, dbTx) + + if len(ret) == 0 { + panic("no return value specified for GetPreviousBlockToBlockNumber") + } + + var r0 *state.Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) (*state.Block, error)); ok { + return rf(ctx, blockNumber, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, pgx.Tx) *state.Block); ok { + r0 = rf(ctx, blockNumber, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*state.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, pgx.Tx) error); ok { + r1 = rf(ctx, blockNumber, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StateMock_GetPreviousBlockToBlockNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPreviousBlockToBlockNumber' +type StateMock_GetPreviousBlockToBlockNumber_Call struct { + *mock.Call +} + +// GetPreviousBlockToBlockNumber is a helper method to define mock.On call +// - ctx context.Context +// - blockNumber uint64 +// - dbTx pgx.Tx +func (_e *StateMock_Expecter) GetPreviousBlockToBlockNumber(ctx interface{}, blockNumber interface{}, dbTx interface{}) *StateMock_GetPreviousBlockToBlockNumber_Call { + return &StateMock_GetPreviousBlockToBlockNumber_Call{Call: _e.mock.On("GetPreviousBlockToBlockNumber", ctx, blockNumber, dbTx)} +} + +func (_c *StateMock_GetPreviousBlockToBlockNumber_Call) Run(run func(ctx context.Context, blockNumber uint64, dbTx pgx.Tx)) *StateMock_GetPreviousBlockToBlockNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(pgx.Tx)) + }) + return _c +} + +func (_c *StateMock_GetPreviousBlockToBlockNumber_Call) Return(_a0 *state.Block, _a1 error) *StateMock_GetPreviousBlockToBlockNumber_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *StateMock_GetPreviousBlockToBlockNumber_Call) RunAndReturn(run func(context.Context, uint64, pgx.Tx) (*state.Block, error)) *StateMock_GetPreviousBlockToBlockNumber_Call { + _c.Call.Return(run) + return _c +} + // GetReorgedTransactions provides a mock function with given fields: ctx, batchNumber, dbTx func (_m *StateMock) GetReorgedTransactions(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) ([]*types.Transaction, error) { ret := _m.Called(ctx, batchNumber, dbTx) @@ -1928,6 +2108,67 @@ func (_c *StateMock_GetStoredFlushID_Call) RunAndReturn(run func(context.Context return _c } +// GetUncheckedBlocks provides a mock function with given fields: ctx, fromBlockNumber, toBlockNumber, dbTx +func (_m *StateMock) GetUncheckedBlocks(ctx context.Context, fromBlockNumber uint64, toBlockNumber uint64, dbTx pgx.Tx) ([]*state.Block, error) { + ret := _m.Called(ctx, fromBlockNumber, toBlockNumber, dbTx) + + if len(ret) == 0 { + panic("no return value specified for GetUncheckedBlocks") + } + + var r0 []*state.Block + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, pgx.Tx) ([]*state.Block, error)); ok { + return rf(ctx, fromBlockNumber, toBlockNumber, dbTx) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, pgx.Tx) []*state.Block); ok { + r0 = rf(ctx, fromBlockNumber, toBlockNumber, dbTx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*state.Block) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64, pgx.Tx) error); ok { + r1 = rf(ctx, fromBlockNumber, toBlockNumber, dbTx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// StateMock_GetUncheckedBlocks_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetUncheckedBlocks' +type StateMock_GetUncheckedBlocks_Call struct { + *mock.Call +} + +// GetUncheckedBlocks is a helper method to define mock.On call +// - ctx context.Context +// - fromBlockNumber uint64 +// - toBlockNumber uint64 +// - dbTx pgx.Tx +func (_e *StateMock_Expecter) GetUncheckedBlocks(ctx interface{}, fromBlockNumber interface{}, toBlockNumber interface{}, dbTx interface{}) *StateMock_GetUncheckedBlocks_Call { + return &StateMock_GetUncheckedBlocks_Call{Call: _e.mock.On("GetUncheckedBlocks", ctx, fromBlockNumber, toBlockNumber, dbTx)} +} + +func (_c *StateMock_GetUncheckedBlocks_Call) Run(run func(ctx context.Context, fromBlockNumber uint64, toBlockNumber uint64, dbTx pgx.Tx)) *StateMock_GetUncheckedBlocks_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(uint64), args[3].(pgx.Tx)) + }) + return _c +} + +func (_c *StateMock_GetUncheckedBlocks_Call) Return(_a0 []*state.Block, _a1 error) *StateMock_GetUncheckedBlocks_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *StateMock_GetUncheckedBlocks_Call) RunAndReturn(run func(context.Context, uint64, uint64, pgx.Tx) ([]*state.Block, error)) *StateMock_GetUncheckedBlocks_Call { + _c.Call.Return(run) + return _c +} + // OpenBatch provides a mock function with given fields: ctx, processingContext, dbTx func (_m *StateMock) OpenBatch(ctx context.Context, processingContext state.ProcessingContext, dbTx pgx.Tx) error { ret := _m.Called(ctx, processingContext, dbTx) @@ -2188,7 +2429,7 @@ func (_c *StateMock_ProcessBatch_Call) RunAndReturn(run func(context.Context, st } // ProcessBatchV2 provides a mock function with given fields: ctx, request, updateMerkleTree -func (_m *StateMock) ProcessBatchV2(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, error) { +func (_m *StateMock) ProcessBatchV2(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, string, error) { ret := _m.Called(ctx, request, updateMerkleTree) if len(ret) == 0 { @@ -2196,8 +2437,9 @@ func (_m *StateMock) ProcessBatchV2(ctx context.Context, request state.ProcessRe } var r0 *state.ProcessBatchResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, state.ProcessRequest, bool) (*state.ProcessBatchResponse, error)); ok { + var r1 string + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, state.ProcessRequest, bool) (*state.ProcessBatchResponse, string, error)); ok { return rf(ctx, request, updateMerkleTree) } if rf, ok := ret.Get(0).(func(context.Context, state.ProcessRequest, bool) *state.ProcessBatchResponse); ok { @@ -2208,13 +2450,19 @@ func (_m *StateMock) ProcessBatchV2(ctx context.Context, request state.ProcessRe } } - if rf, ok := ret.Get(1).(func(context.Context, state.ProcessRequest, bool) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, state.ProcessRequest, bool) string); ok { r1 = rf(ctx, request, updateMerkleTree) } else { - r1 = ret.Error(1) + r1 = ret.Get(1).(string) } - return r0, r1 + if rf, ok := ret.Get(2).(func(context.Context, state.ProcessRequest, bool) error); ok { + r2 = rf(ctx, request, updateMerkleTree) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 } // StateMock_ProcessBatchV2_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ProcessBatchV2' @@ -2237,12 +2485,12 @@ func (_c *StateMock_ProcessBatchV2_Call) Run(run func(ctx context.Context, reque return _c } -func (_c *StateMock_ProcessBatchV2_Call) Return(_a0 *state.ProcessBatchResponse, _a1 error) *StateMock_ProcessBatchV2_Call { - _c.Call.Return(_a0, _a1) +func (_c *StateMock_ProcessBatchV2_Call) Return(_a0 *state.ProcessBatchResponse, _a1 string, _a2 error) *StateMock_ProcessBatchV2_Call { + _c.Call.Return(_a0, _a1, _a2) return _c } -func (_c *StateMock_ProcessBatchV2_Call) RunAndReturn(run func(context.Context, state.ProcessRequest, bool) (*state.ProcessBatchResponse, error)) *StateMock_ProcessBatchV2_Call { +func (_c *StateMock_ProcessBatchV2_Call) RunAndReturn(run func(context.Context, state.ProcessRequest, bool) (*state.ProcessBatchResponse, string, error)) *StateMock_ProcessBatchV2_Call { _c.Call.Return(run) return _c } @@ -2715,6 +2963,104 @@ func (_c *StateMock_UpdateBatchL2Data_Call) RunAndReturn(run func(context.Contex return _c } +// UpdateBatchTimestamp provides a mock function with given fields: ctx, batchNumber, timestamp, dbTx +func (_m *StateMock) UpdateBatchTimestamp(ctx context.Context, batchNumber uint64, timestamp time.Time, dbTx pgx.Tx) error { + ret := _m.Called(ctx, batchNumber, timestamp, dbTx) + + if len(ret) == 0 { + panic("no return value specified for UpdateBatchTimestamp") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, time.Time, pgx.Tx) error); ok { + r0 = rf(ctx, batchNumber, timestamp, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// StateMock_UpdateBatchTimestamp_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateBatchTimestamp' +type StateMock_UpdateBatchTimestamp_Call struct { + *mock.Call +} + +// UpdateBatchTimestamp is a helper method to define mock.On call +// - ctx context.Context +// - batchNumber uint64 +// - timestamp time.Time +// - dbTx pgx.Tx +func (_e *StateMock_Expecter) UpdateBatchTimestamp(ctx interface{}, batchNumber interface{}, timestamp interface{}, dbTx interface{}) *StateMock_UpdateBatchTimestamp_Call { + return &StateMock_UpdateBatchTimestamp_Call{Call: _e.mock.On("UpdateBatchTimestamp", ctx, batchNumber, timestamp, dbTx)} +} + +func (_c *StateMock_UpdateBatchTimestamp_Call) Run(run func(ctx context.Context, batchNumber uint64, timestamp time.Time, dbTx pgx.Tx)) *StateMock_UpdateBatchTimestamp_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(time.Time), args[3].(pgx.Tx)) + }) + return _c +} + +func (_c *StateMock_UpdateBatchTimestamp_Call) Return(_a0 error) *StateMock_UpdateBatchTimestamp_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *StateMock_UpdateBatchTimestamp_Call) RunAndReturn(run func(context.Context, uint64, time.Time, pgx.Tx) error) *StateMock_UpdateBatchTimestamp_Call { + _c.Call.Return(run) + return _c +} + +// UpdateCheckedBlockByNumber provides a mock function with given fields: ctx, blockNumber, newCheckedStatus, dbTx +func (_m *StateMock) UpdateCheckedBlockByNumber(ctx context.Context, blockNumber uint64, newCheckedStatus bool, dbTx pgx.Tx) error { + ret := _m.Called(ctx, blockNumber, newCheckedStatus, dbTx) + + if len(ret) == 0 { + panic("no return value specified for UpdateCheckedBlockByNumber") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, bool, pgx.Tx) error); ok { + r0 = rf(ctx, blockNumber, newCheckedStatus, dbTx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// StateMock_UpdateCheckedBlockByNumber_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateCheckedBlockByNumber' +type StateMock_UpdateCheckedBlockByNumber_Call struct { + *mock.Call +} + +// UpdateCheckedBlockByNumber is a helper method to define mock.On call +// - ctx context.Context +// - blockNumber uint64 +// - newCheckedStatus bool +// - dbTx pgx.Tx +func (_e *StateMock_Expecter) UpdateCheckedBlockByNumber(ctx interface{}, blockNumber interface{}, newCheckedStatus interface{}, dbTx interface{}) *StateMock_UpdateCheckedBlockByNumber_Call { + return &StateMock_UpdateCheckedBlockByNumber_Call{Call: _e.mock.On("UpdateCheckedBlockByNumber", ctx, blockNumber, newCheckedStatus, dbTx)} +} + +func (_c *StateMock_UpdateCheckedBlockByNumber_Call) Run(run func(ctx context.Context, blockNumber uint64, newCheckedStatus bool, dbTx pgx.Tx)) *StateMock_UpdateCheckedBlockByNumber_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].(bool), args[3].(pgx.Tx)) + }) + return _c +} + +func (_c *StateMock_UpdateCheckedBlockByNumber_Call) Return(_a0 error) *StateMock_UpdateCheckedBlockByNumber_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *StateMock_UpdateCheckedBlockByNumber_Call) RunAndReturn(run func(context.Context, uint64, bool, pgx.Tx) error) *StateMock_UpdateCheckedBlockByNumber_Call { + _c.Call.Return(run) + return _c +} + // UpdateForkIDBlockNumber provides a mock function with given fields: ctx, forkdID, newBlockNumber, updateMemCache, dbTx func (_m *StateMock) UpdateForkIDBlockNumber(ctx context.Context, forkdID uint64, newBlockNumber uint64, updateMemCache bool, dbTx pgx.Tx) error { ret := _m.Called(ctx, forkdID, newBlockNumber, updateMemCache, dbTx) diff --git a/synchronizer/synchronizer.go b/synchronizer/synchronizer.go index f29421aad6..796f019169 100644 --- a/synchronizer/synchronizer.go +++ b/synchronizer/synchronizer.go @@ -33,6 +33,7 @@ const ( // SequentialMode is the value for L1SynchronizationMode to run in sequential mode SequentialMode = "sequential" maxBatchNumber = ^uint64(0) + maxBlockNumber = ^uint64(0) timeOfLiveBatchOnCache = 5 * time.Minute ) @@ -275,6 +276,162 @@ func rollback(ctx context.Context, dbTx pgx.Tx, err error) error { return err } +func (s *ClientSynchronizer) isGenesisProcessed(ctx context.Context, dbTx pgx.Tx) (bool, *state.Block, error) { + lastEthBlockSynced, err := s.state.GetLastBlock(ctx, dbTx) + if err != nil && errors.Is(err, state.ErrStateNotSynchronized) { + return false, lastEthBlockSynced, nil + } + + if lastEthBlockSynced.BlockNumber >= s.genesis.RollupBlockNumber { + log.Infof("Genesis block processed. Last block synced: %d >= genesis %d", lastEthBlockSynced.BlockNumber, s.genesis.RollupBlockNumber) + return true, lastEthBlockSynced, nil + } + log.Warnf("Genesis block not processed yet. Last block synced: %d < genesis %d", lastEthBlockSynced.BlockNumber, s.genesis.RollupBlockNumber) + return false, lastEthBlockSynced, nil +} + +// getStartingL1Block find if need to update and if yes the starting point: +// bool -> need to process blocks +// uint64 -> first block to synchronize +// error -> error +// 1. Check last synced block on DB, if there are any could be fully synced (>=genesis) or syncing pre-genesis events (= genesisBlockNumber { + log.Infof("sync pregenesis: rollupManagerBlockNumber>=genesisBlockNumber (%d>=%d). Nothing in pregenesis", rollupManagerBlockNumber, genesisBlockNumber) + return false, 0, nil + } + log.Infof("sync pregenesis: No block on DB, starting from LxLy upgrade block (rollupManagerBlockNumber) %d", upgradeLxLyBlockNumber) + return true, upgradeLxLyBlockNumber, nil + } else if err != nil { + log.Errorf("Error getting last Block on DB err:%v", err) + return false, 0, err + } + if lastBlock.BlockNumber >= genesisBlockNumber-1 { + log.Warnf("sync pregenesis: Last block processed is %d, which is greater or equal than the previous genesis block %d", lastBlock, genesisBlockNumber) + return false, 0, nil + } + log.Infof("sync pregenesis: Continue processing pre-genesis blocks, last block processed on DB is %d", lastBlock.BlockNumber+1) + return true, lastBlock.BlockNumber + 1, nil +} + +func (s *ClientSynchronizer) synchronizePreGenesisRollupEvents(syncChunkSize uint64, ctx context.Context) error { + // Sync events from RollupManager that happen before rollup creation + startTime := time.Now() + log.Info("synchronizing events from RollupManager that happen before rollup creation") + needToUpdate, fromBlock, err := s.getStartingL1Block(ctx, s.genesis.RollupBlockNumber, s.genesis.RollupManagerBlockNumber, nil) + if err != nil { + log.Errorf("sync pregenesis: error getting starting L1 block. Error: %v", err) + return err + } + if !needToUpdate { + log.Infof("sync pregenesis: No need to process blocks before the genesis block %d", s.genesis.RollupBlockNumber) + return nil + } + toBlockFinal := s.genesis.RollupBlockNumber - 1 + log.Infof("sync pregenesis: starting syncing pre genesis LxLy events from block %d to block %d (total %d blocks) chunk size %d", + fromBlock, toBlockFinal, toBlockFinal-fromBlock+1, syncChunkSize) + for i := fromBlock; true; i += syncChunkSize { + toBlock := min(i+syncChunkSize-1, toBlockFinal) + log.Debugf("sync pregenesis: syncing L1InfoTree from blocks [%d - %d] remains: %d", i, toBlock, toBlockFinal-toBlock) + blocks, order, err := s.etherMan.GetRollupInfoByBlockRangePreviousRollupGenesis(s.ctx, i, &toBlock) + if err != nil { + log.Error("sync pregenesis: error getting rollupInfoByBlockRange before rollup genesis: ", err) + return err + } + log.Debugf("sync pregenesis: syncing L1InfoTree from blocks [%d - %d] -> num_block:%d num_order:%d", i, toBlock, len(blocks), len(order)) + err = s.ProcessBlockRange(blocks, order) + if err != nil { + log.Error("sync pregenesis: error processing blocks before the genesis: ", err) + return err + } + if toBlock == toBlockFinal { + break + } + } + elapsedTime := time.Since(startTime) + log.Infof("sync pregenesis: sync L1InfoTree finish: from %d to %d total_block %d done in %s", fromBlock, toBlockFinal, toBlockFinal-fromBlock+1, &elapsedTime) + return nil +} + +func (s *ClientSynchronizer) processGenesis() (*state.Block, error) { + log.Info("State is empty, verifying genesis block") + valid, err := s.etherMan.VerifyGenBlockNumber(s.ctx, s.genesis.RollupBlockNumber) + if err != nil { + log.Error("error checking genesis block number. Error: ", err) + return nil, err + } else if !valid { + log.Error("genesis Block number configured is not valid. It is required the block number where the PolygonZkEVM smc was deployed") + return nil, fmt.Errorf("genesis Block number configured is not valid. It is required the block number where the PolygonZkEVM smc was deployed") + } + err = s.synchronizePreGenesisRollupEvents(s.cfg.SyncChunkSize, s.ctx) + if err != nil { + log.Error("error synchronizing pre genesis events: ", err) + return nil, err + } + + header, err := s.etherMan.HeaderByNumber(s.ctx, big.NewInt(0).SetUint64(s.genesis.RollupBlockNumber)) + if err != nil { + log.Errorf("error getting l1 block header for block %d. Error: %v", s.genesis.RollupBlockNumber, err) + return nil, err + } + log.Info("synchronizing rollup creation block") + lastEthBlockSynced := &state.Block{ + BlockNumber: header.Number.Uint64(), + BlockHash: header.Hash(), + ParentHash: header.ParentHash, + ReceivedAt: time.Unix(int64(header.Time), 0), + } + dbTx, err := s.state.BeginStateTransaction(s.ctx) + if err != nil { + log.Errorf("error creating db transaction to get latest block. Error: %v", err) + return nil, err + } + // This add the genesis block and set values on tree + genesisRoot, err := s.state.SetGenesis(s.ctx, *lastEthBlockSynced, s.genesis, stateMetrics.SynchronizerCallerLabel, dbTx) + if err != nil { + log.Error("error setting genesis: ", err) + return nil, rollback(s.ctx, dbTx, err) + } + err = s.RequestAndProcessRollupGenesisBlock(dbTx, lastEthBlockSynced) + if err != nil { + log.Error("error processing Rollup genesis block: ", err) + return nil, rollback(s.ctx, dbTx, err) + } + + if genesisRoot != s.genesis.Root { + log.Errorf("Calculated newRoot should be %s instead of %s", s.genesis.Root.String(), genesisRoot.String()) + return nil, rollback(s.ctx, dbTx, fmt.Errorf("calculated newRoot should be %s instead of %s", s.genesis.Root.String(), genesisRoot.String())) + } + // Waiting for the flushID to be stored + err = s.checkFlushID(dbTx) + if err != nil { + log.Error("error checking genesis flushID: ", err) + return nil, rollback(s.ctx, dbTx, err) + } + if err := dbTx.Commit(s.ctx); err != nil { + log.Errorf("error genesis committing dbTx, err: %v", err) + return nil, rollback(s.ctx, dbTx, err) + } + log.Info("Genesis root matches! Stored genesis blocks.") + return lastEthBlockSynced, nil +} + // Sync function will read the last state synced and will continue from that point. // Sync() will read blockchain events to detect rollup updates func (s *ClientSynchronizer) Sync() error { @@ -286,97 +443,25 @@ func (s *ClientSynchronizer) Sync() error { _ = s.asyncL1BlockChecker.OnStart(s.ctx) } - dbTx, err := s.state.BeginStateTransaction(s.ctx) + genesisDone, lastEthBlockSynced, err := s.isGenesisProcessed(s.ctx, nil) if err != nil { - log.Errorf("error creating db transaction to get latest block. Error: %v", err) + log.Errorf("error checking if genesis is processed. Error: %v", err) return err } - lastEthBlockSynced, err := s.state.GetLastBlock(s.ctx, dbTx) - if err != nil { - if errors.Is(err, state.ErrStateNotSynchronized) { - log.Info("State is empty, verifying genesis block") - valid, err := s.etherMan.VerifyGenBlockNumber(s.ctx, s.genesis.RollupBlockNumber) - if err != nil { - log.Error("error checking genesis block number. Error: ", err) - return rollback(s.ctx, dbTx, err) - } else if !valid { - log.Error("genesis Block number configured is not valid. It is required the block number where the PolygonZkEVM smc was deployed") - return rollback(s.ctx, dbTx, fmt.Errorf("genesis Block number configured is not valid. It is required the block number where the PolygonZkEVM smc was deployed")) - } - - // Sync events from RollupManager that happen before rollup creation - log.Info("synchronizing events from RollupManager that happen before rollup creation") - for i := s.genesis.RollupManagerBlockNumber; true; i += s.cfg.SyncChunkSize { - toBlock := min(i+s.cfg.SyncChunkSize-1, s.genesis.RollupBlockNumber-1) - blocks, order, err := s.etherMan.GetRollupInfoByBlockRange(s.ctx, i, &toBlock) - if err != nil { - log.Error("error getting rollupInfoByBlockRange before rollup genesis: ", err) - rollbackErr := dbTx.Rollback(s.ctx) - if rollbackErr != nil { - log.Errorf("error rolling back state. RollbackErr: %v, err: %s", rollbackErr, err.Error()) - return rollbackErr - } - return err - } - err = s.ProcessBlockRange(blocks, order) - if err != nil { - log.Error("error processing blocks before the genesis: ", err) - rollbackErr := dbTx.Rollback(s.ctx) - if rollbackErr != nil { - log.Errorf("error rolling back state. RollbackErr: %v, err: %s", rollbackErr, err.Error()) - return rollbackErr - } - return err - } - if toBlock == s.genesis.RollupBlockNumber-1 { - break - } - } - - header, err := s.etherMan.HeaderByNumber(s.ctx, big.NewInt(0).SetUint64(s.genesis.RollupBlockNumber)) - if err != nil { - log.Errorf("error getting l1 block header for block %d. Error: %v", s.genesis.RollupBlockNumber, err) - return rollback(s.ctx, dbTx, err) - } - log.Info("synchronizing rollup creation block") - lastEthBlockSynced = &state.Block{ - BlockNumber: header.Number.Uint64(), - BlockHash: header.Hash(), - ParentHash: header.ParentHash, - ReceivedAt: time.Unix(int64(header.Time), 0), - } - genesisRoot, err := s.state.SetGenesis(s.ctx, *lastEthBlockSynced, s.genesis, stateMetrics.SynchronizerCallerLabel, dbTx) - if err != nil { - log.Error("error setting genesis: ", err) - return rollback(s.ctx, dbTx, err) - } - err = s.RequestAndProcessRollupGenesisBlock(dbTx, lastEthBlockSynced) - if err != nil { - log.Error("error processing Rollup genesis block: ", err) - return rollback(s.ctx, dbTx, err) - } - - if genesisRoot != s.genesis.Root { - log.Errorf("Calculated newRoot should be %s instead of %s", s.genesis.Root.String(), genesisRoot.String()) - return rollback(s.ctx, dbTx, fmt.Errorf("calculated newRoot should be %s instead of %s", s.genesis.Root.String(), genesisRoot.String())) - } - // Waiting for the flushID to be stored - err = s.checkFlushID(dbTx) - if err != nil { - log.Error("error checking genesis flushID: ", err) - return rollback(s.ctx, dbTx, err) - } - log.Debug("Genesis root matches!") - } else { - log.Error("unexpected error getting the latest ethereum block. Error: ", err) - rollbackErr := dbTx.Rollback(s.ctx) - if rollbackErr != nil { - log.Errorf("error rolling back state. RollbackErr: %v, err: %s", rollbackErr, err.Error()) - return rollbackErr - } + if !genesisDone { + lastEthBlockSynced, err = s.processGenesis() + if err != nil { + log.Errorf("error processing genesis. Error: %v", err) return err } } + + dbTx, err := s.state.BeginStateTransaction(s.ctx) + if err != nil { + log.Errorf("error creating db transaction to get latest block. Error: %v", err) + return err + } + initBatchNumber, err := s.state.GetLastBatchNumber(s.ctx, dbTx) if err != nil { log.Error("error getting latest batchNumber synced. Error: ", err) @@ -547,40 +632,36 @@ func (s *ClientSynchronizer) RequestAndProcessRollupGenesisBlock(dbTx pgx.Tx, la log.Error("error getting rollupInfoByBlockRange after set the genesis: ", err) return err } - // Check that the response is the expected. It should be 1 block with 2 orders + // Check that the response is the expected. It should be 1 block with ForkID event + log.Debugf("SanityCheck for genesis block (%d) events: %+v", lastEthBlockSynced.BlockNumber, order) err = sanityCheckForGenesisBlockRollupInfo(blocks, order) if err != nil { return err } - forkId := s.state.GetForkIDByBlockNumber(blocks[0].BlockNumber) - err = s.l1EventProcessors.Process(s.ctx, actions.ForkIdType(forkId), etherman.Order{Name: etherman.ForkIDsOrder, Pos: 0}, &blocks[0], dbTx) + log.Infof("Processing genesis block %d orders: %+v", lastEthBlockSynced.BlockNumber, order) + err = s.internalProcessBlock(maxBlockNumber, blocks[0], order[blocks[0].BlockHash], false, dbTx) if err != nil { - log.Error("error storing genesis forkID: ", err) - return err - } - if len(blocks[0].SequencedBatches) != 0 { - batchSequence := l1event_orders.GetSequenceFromL1EventOrder(etherman.InitialSequenceBatchesOrder, &blocks[0], 0) - forkId = s.state.GetForkIDByBatchNumber(batchSequence.FromBatchNumber) - err = s.l1EventProcessors.Process(s.ctx, actions.ForkIdType(forkId), etherman.Order{Name: etherman.InitialSequenceBatchesOrder, Pos: 0}, &blocks[0], dbTx) - if err != nil { - log.Error("error storing initial tx (batch 1): ", err) - return err - } + log.Errorf("error processinge events on genesis block %d: err:%w", lastEthBlockSynced.BlockNumber, err) } + return nil } func sanityCheckForGenesisBlockRollupInfo(blocks []etherman.Block, order map[common.Hash][]etherman.Order) error { if len(blocks) != 1 || len(order) < 1 || len(order[blocks[0].BlockHash]) < 1 { - log.Errorf("error getting rollupInfoByBlockRange after set the genesis. Expected 1 block with 2 orders") - return fmt.Errorf("error getting rollupInfoByBlockRange after set the genesis. Expected 1 block with 2 orders") + log.Errorf("error getting rollupInfoByBlockRange after set the genesis. Expected 1 block with minimum 2 orders") + return fmt.Errorf("error getting rollupInfoByBlockRange after set the genesis. Expected 1 block with minimum 2 orders") } - if order[blocks[0].BlockHash][0].Name != etherman.ForkIDsOrder { - log.Errorf("error getting rollupInfoByBlockRange after set the genesis. Expected ForkIDsOrder, got %s", order[blocks[0].BlockHash][0].Name) - return fmt.Errorf("error getting rollupInfoByBlockRange after set the genesis. Expected ForkIDsOrder") + // The genesis block implies 1 ForkID event + for _, value := range order[blocks[0].BlockHash] { + if value.Name == etherman.ForkIDsOrder { + return nil + } } - - return nil + err := fmt.Errorf("events on genesis block (%d) need a ForkIDsOrder event but this block got %+v", + blocks[0].BlockNumber, order[blocks[0].BlockHash]) + log.Error(err.Error()) + return err } // This function syncs the node from a specific block to the latest @@ -727,67 +808,20 @@ func (s *ClientSynchronizer) ProcessBlockRange(blocks []etherman.Block, order ma log.Errorf("error creating db transaction to store block. BlockNumber: %d, error: %v", blocks[i].BlockNumber, err) return err } - b := state.Block{ - BlockNumber: blocks[i].BlockNumber, - BlockHash: blocks[i].BlockHash, - ParentHash: blocks[i].ParentHash, - ReceivedAt: blocks[i].ReceivedAt, - } - if blocks[i].BlockNumber <= finalizedBlockNumber { - b.Checked = true - } - // Add block information - err = s.state.AddBlock(s.ctx, &b, dbTx) + err = s.internalProcessBlock(finalizedBlockNumber, blocks[i], order[blocks[i].BlockHash], true, dbTx) if err != nil { + log.Error("rollingback BlockNumber: %d, because error internalProcessBlock: ", blocks[i].BlockNumber, err) // If any goes wrong we ensure that the state is rollbacked - log.Errorf("error storing block. BlockNumber: %d, error: %v", blocks[i].BlockNumber, err) rollbackErr := dbTx.Rollback(s.ctx) - if rollbackErr != nil { - log.Errorf("error rolling back state to store block. BlockNumber: %d, rollbackErr: %s, error : %v", blocks[i].BlockNumber, rollbackErr.Error(), err) - return rollbackErr + if rollbackErr != nil && !errors.Is(rollbackErr, pgx.ErrTxClosed) { + log.Warnf("error rolling back state to store block. BlockNumber: %d, rollbackErr: %s, error : %v", blocks[i].BlockNumber, rollbackErr.Error(), err) + return fmt.Errorf("error rollback BlockNumber: %d: err:%w original error:%w", blocks[i].BlockNumber, rollbackErr, err) } return err } - for _, element := range order[blocks[i].BlockHash] { - batchSequence := l1event_orders.GetSequenceFromL1EventOrder(element.Name, &blocks[i], element.Pos) - var forkId uint64 - if batchSequence != nil { - forkId = s.state.GetForkIDByBatchNumber(batchSequence.FromBatchNumber) - log.Debug("EventOrder: ", element.Name, ". Batch Sequence: ", batchSequence, "forkId: ", forkId) - } else { - forkId = s.state.GetForkIDByBlockNumber(blocks[i].BlockNumber) - log.Debug("EventOrder: ", element.Name, ". BlockNumber: ", blocks[i].BlockNumber, ". forkId: ", forkId) - } - forkIdTyped := actions.ForkIdType(forkId) - // Process event received from l1 - err := s.l1EventProcessors.Process(s.ctx, forkIdTyped, element, &blocks[i], dbTx) - if err != nil { - log.Error("error: ", err) - // If any goes wrong we ensure that the state is rollbacked - rollbackErr := dbTx.Rollback(s.ctx) - if rollbackErr != nil && !errors.Is(rollbackErr, pgx.ErrTxClosed) { - log.Warnf("error rolling back state to store block. BlockNumber: %d, rollbackErr: %s, error : %v", blocks[i].BlockNumber, rollbackErr.Error(), err) - return rollbackErr - } - return err - } - } - log.Debug("Checking FlushID to commit L1 data to db") - err = s.checkFlushID(dbTx) - if err != nil { - // If any goes wrong we ensure that the state is rollbacked - log.Errorf("error checking flushID. Error: %v", err) - rollbackErr := dbTx.Rollback(s.ctx) - if rollbackErr != nil { - log.Errorf("error rolling back state. RollbackErr: %s, Error : %v", rollbackErr.Error(), err) - return rollbackErr - } - return err - } err = dbTx.Commit(s.ctx) if err != nil { - // If any goes wrong we ensure that the state is rollbacked log.Errorf("error committing state to store block. BlockNumber: %d, err: %v", blocks[i].BlockNumber, err) rollbackErr := dbTx.Rollback(s.ctx) if rollbackErr != nil { @@ -800,6 +834,57 @@ func (s *ClientSynchronizer) ProcessBlockRange(blocks []etherman.Block, order ma return nil } +// internalProcessBlock process one iteration of events and stores the information in the db +func (s *ClientSynchronizer) internalProcessBlock(finalizedBlockNumber uint64, blocks etherman.Block, order []etherman.Order, addBlock bool, dbTx pgx.Tx) error { + var err error + // New info has to be included into the db using the state + if addBlock { + b := state.Block{ + BlockNumber: blocks.BlockNumber, + BlockHash: blocks.BlockHash, + ParentHash: blocks.ParentHash, + ReceivedAt: blocks.ReceivedAt, + } + if blocks.BlockNumber <= finalizedBlockNumber { + b.Checked = true + } + // Add block information + log.Debugf("Storing block. Block: %s", b.String()) + err = s.state.AddBlock(s.ctx, &b, dbTx) + if err != nil { + log.Errorf("error storing block. BlockNumber: %d, error: %v", blocks.BlockNumber, err) + return err + } + } + + for _, element := range order { + batchSequence := l1event_orders.GetSequenceFromL1EventOrder(element.Name, &blocks, element.Pos) + var forkId uint64 + if batchSequence != nil { + forkId = s.state.GetForkIDByBatchNumber(batchSequence.FromBatchNumber) + log.Debug("EventOrder: ", element.Name, ". Batch Sequence: ", batchSequence, "forkId: ", forkId) + } else { + forkId = s.state.GetForkIDByBlockNumber(blocks.BlockNumber) + log.Debug("EventOrder: ", element.Name, ". BlockNumber: ", blocks.BlockNumber, "forkId: ", forkId) + } + forkIdTyped := actions.ForkIdType(forkId) + // Process event received from l1 + err := s.l1EventProcessors.Process(s.ctx, forkIdTyped, element, &blocks, dbTx) + if err != nil { + log.Error("1EventProcessors.Process error: ", err) + return err + } + } + log.Debug("Checking FlushID to commit L1 data to db") + err = s.checkFlushID(dbTx) + if err != nil { + log.Errorf("error checking flushID. Error: %v", err) + return err + } + + return nil +} + func (s *ClientSynchronizer) syncTrustedState(latestSyncedBatch uint64) error { if s.syncTrustedStateExecutor == nil || s.isTrustedSequencer { return nil diff --git a/synchronizer/synchronizer_test.go b/synchronizer/synchronizer_test.go index 47143e2026..6ac3102648 100644 --- a/synchronizer/synchronizer_test.go +++ b/synchronizer/synchronizer_test.go @@ -2,6 +2,7 @@ package synchronizer import ( context "context" + "fmt" "math" "math/big" "testing" @@ -44,6 +45,40 @@ type mocks struct { //EventLog *eventLogMock } +func TestGetStartingL1BlockAutodiscover(t *testing.T) { + genesis, cfg, m := setupGenericTest(t) + ethermanForL1 := []syncinterfaces.EthermanFullInterface{m.Etherman} + syncInterface, err := NewSynchronizer(false, m.Etherman, ethermanForL1, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, m.zkEVMClientEthereumCompatible, nil, *genesis, *cfg, false) + require.NoError(t, err) + sync, ok := syncInterface.(*ClientSynchronizer) + require.True(t, ok) + ctx := context.TODO() + t.Run("getStartingL1Block autodiscover OK", func(t *testing.T) { + m.State.EXPECT().GetLastBlock(ctx, nil).Return(nil, state.ErrStateNotSynchronized).Once() + m.Etherman.EXPECT().GetL1BlockUpgradeLxLy(mock.Anything, mock.Anything).Return(uint64(100), nil).Once() + needProcess, firstBlock, err := sync.getStartingL1Block(ctx, 123, 0, nil) + require.NoError(t, err) + require.True(t, needProcess) + require.Equal(t, uint64(100), firstBlock) + }) + + t.Run("getStartingL1Block autodiscover Fails", func(t *testing.T) { + m.State.EXPECT().GetLastBlock(ctx, nil).Return(nil, state.ErrStateNotSynchronized).Once() + m.Etherman.EXPECT().GetL1BlockUpgradeLxLy(mock.Anything, mock.Anything).Return(uint64(0), fmt.Errorf("error")).Once() + _, _, err = sync.getStartingL1Block(ctx, 123, 0, nil) + require.Error(t, err) + }) + + t.Run("getStartingL1Block have already started sync", func(t *testing.T) { + m.State.EXPECT().GetLastBlock(ctx, nil).Return(&state.Block{BlockNumber: 100}, nil).Once() + + needProcess, firstBlock, err := sync.getStartingL1Block(ctx, 123, 0, nil) + require.NoError(t, err) + require.True(t, needProcess) + require.Equal(t, uint64(101), firstBlock) + }) +} + // Feature #2220 and #2239: Optimize Trusted state synchronization // // this Check partially point 2: Use previous batch stored in memory to avoid getting from database @@ -122,7 +157,7 @@ func TestGivenPermissionlessNodeWhenSyncronizeFirstTimeABatchThenStoreItInALocal // but it used a feature that is not implemented in new one that is asking beyond the last block on L1 func TestForcedBatchEtrog(t *testing.T) { genesis := state.Genesis{ - RollupBlockNumber: uint64(123456), + RollupBlockNumber: uint64(0), } cfg := Config{ SyncInterval: cfgTypes.Duration{Duration: 1 * time.Second}, @@ -152,6 +187,14 @@ func TestForcedBatchEtrog(t *testing.T) { ToBatchNumber: ^uint64(0), } m.State.EXPECT().GetForkIDInMemory(uint64(7)).Return(&forkIdInterval) + parentHash := common.HexToHash("0x111") + ethHeader := ðTypes.Header{Number: big.NewInt(0), ParentHash: parentHash} + ethBlock := ethTypes.NewBlockWithHeader(ethHeader) + lastBlock := &state.Block{BlockHash: ethBlock.Hash(), BlockNumber: ethBlock.Number().Uint64(), ParentHash: ethBlock.ParentHash()} + m.State. + On("GetLastBlock", mock.Anything, nil). + Return(lastBlock, nil). + Once() m.State. On("BeginStateTransaction", ctxMatchBy). @@ -169,10 +212,11 @@ func TestForcedBatchEtrog(t *testing.T) { On("GetForkIDByBatchNumber", mock.Anything). Return(uint64(7), nil). Maybe() + m.State. On("GetLastBlock", ctx, m.DbTx). Return(lastBlock0, nil). - Once() + Maybe() m.State. On("GetLastBatchNumber", ctx, m.DbTx). @@ -397,7 +441,7 @@ func TestForcedBatchEtrog(t *testing.T) { // but it used a feature that is not implemented in new one that is asking beyond the last block on L1 func TestSequenceForcedBatchIncaberry(t *testing.T) { genesis := state.Genesis{ - RollupBlockNumber: uint64(123456), + RollupBlockNumber: uint64(0), } cfg := Config{ SyncInterval: cfgTypes.Duration{Duration: 1 * time.Second}, @@ -416,20 +460,24 @@ func TestSequenceForcedBatchIncaberry(t *testing.T) { ethermanForL1 := []syncinterfaces.EthermanFullInterface{m.Etherman} sync, err := NewSynchronizer(true, m.Etherman, ethermanForL1, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, m.zkEVMClientEthereumCompatible, nil, genesis, cfg, false) require.NoError(t, err) - + parentHash := common.HexToHash("0x111") + ethHeader0 := ðTypes.Header{Number: big.NewInt(0), ParentHash: parentHash} + ethBlock0 := ethTypes.NewBlockWithHeader(ethHeader0) + ethHeader1 := ðTypes.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash()} + ethBlock1 := ethTypes.NewBlockWithHeader(ethHeader1) + lastBlock0 := &state.Block{BlockHash: ethBlock0.Hash(), BlockNumber: ethBlock0.Number().Uint64(), ParentHash: ethBlock0.ParentHash()} + lastBlock1 := &state.Block{BlockHash: ethBlock1.Hash(), BlockNumber: ethBlock1.Number().Uint64(), ParentHash: ethBlock1.ParentHash()} + m.State. + On("GetLastBlock", mock.Anything, nil). + Return(lastBlock0, nil). + Once() // state preparation ctxMatchBy := mock.MatchedBy(func(ctx context.Context) bool { return ctx != nil }) m.State. On("BeginStateTransaction", ctxMatchBy). Run(func(args mock.Arguments) { ctx := args[0].(context.Context) - parentHash := common.HexToHash("0x111") - ethHeader0 := ðTypes.Header{Number: big.NewInt(0), ParentHash: parentHash} - ethBlock0 := ethTypes.NewBlockWithHeader(ethHeader0) - ethHeader1 := ðTypes.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash()} - ethBlock1 := ethTypes.NewBlockWithHeader(ethHeader1) - lastBlock0 := &state.Block{BlockHash: ethBlock0.Hash(), BlockNumber: ethBlock0.Number().Uint64(), ParentHash: ethBlock0.ParentHash()} - lastBlock1 := &state.Block{BlockHash: ethBlock1.Hash(), BlockNumber: ethBlock1.Number().Uint64(), ParentHash: ethBlock1.ParentHash()} + m.State. On("GetForkIDByBatchNumber", mock.Anything). Return(uint64(1), nil). @@ -438,7 +486,7 @@ func TestSequenceForcedBatchIncaberry(t *testing.T) { m.State. On("GetLastBlock", ctx, m.DbTx). Return(lastBlock0, nil). - Once() + Maybe() m.State. On("GetLastBatchNumber", ctx, m.DbTx). @@ -955,29 +1003,33 @@ func TestReorg(t *testing.T) { ToBatchNumber: math.MaxUint64, } m.State.EXPECT().GetForkIDInMemory(uint64(9)).Return(&forkIdInterval) + parentHash := common.HexToHash("0x111") + ethHeader0 := ðTypes.Header{Number: big.NewInt(0), ParentHash: parentHash} + ethBlock0 := ethTypes.NewBlockWithHeader(ethHeader0) + ethHeader1bis := ðTypes.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash(), Time: 10, GasUsed: 20, Root: common.HexToHash("0x234")} + ethBlock1bis := ethTypes.NewBlockWithHeader(ethHeader1bis) + ethHeader2bis := ðTypes.Header{Number: big.NewInt(2), ParentHash: ethBlock1bis.Hash()} + ethBlock2bis := ethTypes.NewBlockWithHeader(ethHeader2bis) + ethHeader3bis := ðTypes.Header{Number: big.NewInt(3), ParentHash: ethBlock2bis.Hash()} + ethBlock3bis := ethTypes.NewBlockWithHeader(ethHeader3bis) + ethHeader1 := ðTypes.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash()} + ethBlock1 := ethTypes.NewBlockWithHeader(ethHeader1) + ethHeader2 := ðTypes.Header{Number: big.NewInt(2), ParentHash: ethBlock1.Hash()} + ethBlock2 := ethTypes.NewBlockWithHeader(ethHeader2) + ethHeader3 := ðTypes.Header{Number: big.NewInt(3), ParentHash: ethBlock2.Hash()} + ethBlock3 := ethTypes.NewBlockWithHeader(ethHeader3) + + lastBlock0 := &state.Block{BlockHash: ethBlock0.Hash(), BlockNumber: ethBlock0.Number().Uint64(), ParentHash: ethBlock0.ParentHash()} + lastBlock1 := &state.Block{BlockHash: ethBlock1.Hash(), BlockNumber: ethBlock1.Number().Uint64(), ParentHash: ethBlock1.ParentHash()} + m.State. + On("GetLastBlock", mock.Anything, nil). + Return(lastBlock1, nil). + Once() m.State. On("BeginStateTransaction", ctxMatchBy). Run(func(args mock.Arguments) { ctx := args[0].(context.Context) - parentHash := common.HexToHash("0x111") - ethHeader0 := ðTypes.Header{Number: big.NewInt(0), ParentHash: parentHash} - ethBlock0 := ethTypes.NewBlockWithHeader(ethHeader0) - ethHeader1bis := ðTypes.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash(), Time: 10, GasUsed: 20, Root: common.HexToHash("0x234")} - ethBlock1bis := ethTypes.NewBlockWithHeader(ethHeader1bis) - ethHeader2bis := ðTypes.Header{Number: big.NewInt(2), ParentHash: ethBlock1bis.Hash()} - ethBlock2bis := ethTypes.NewBlockWithHeader(ethHeader2bis) - ethHeader3bis := ðTypes.Header{Number: big.NewInt(3), ParentHash: ethBlock2bis.Hash()} - ethBlock3bis := ethTypes.NewBlockWithHeader(ethHeader3bis) - ethHeader1 := ðTypes.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash()} - ethBlock1 := ethTypes.NewBlockWithHeader(ethHeader1) - ethHeader2 := ðTypes.Header{Number: big.NewInt(2), ParentHash: ethBlock1.Hash()} - ethBlock2 := ethTypes.NewBlockWithHeader(ethHeader2) - ethHeader3 := ðTypes.Header{Number: big.NewInt(3), ParentHash: ethBlock2.Hash()} - ethBlock3 := ethTypes.NewBlockWithHeader(ethHeader3) - - lastBlock0 := &state.Block{BlockHash: ethBlock0.Hash(), BlockNumber: ethBlock0.Number().Uint64(), ParentHash: ethBlock0.ParentHash()} - lastBlock1 := &state.Block{BlockHash: ethBlock1.Hash(), BlockNumber: ethBlock1.Number().Uint64(), ParentHash: ethBlock1.ParentHash()} m.State. On("GetForkIDByBatchNumber", mock.Anything). @@ -986,7 +1038,7 @@ func TestReorg(t *testing.T) { m.State. On("GetLastBlock", ctx, m.DbTx). Return(lastBlock1, nil). - Once() + Maybe() m.State. On("GetLastBatchNumber", ctx, m.DbTx). @@ -1275,24 +1327,26 @@ func TestLatestSyncedBlockEmpty(t *testing.T) { ToBatchNumber: math.MaxUint64, } m.State.EXPECT().GetForkIDInMemory(uint64(9)).Return(&forkIdInterval) - + parentHash := common.HexToHash("0x111") + ethHeader0 := ðTypes.Header{Number: big.NewInt(0), ParentHash: parentHash} + ethBlock0 := ethTypes.NewBlockWithHeader(ethHeader0) + ethHeader1 := ðTypes.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash()} + ethBlock1 := ethTypes.NewBlockWithHeader(ethHeader1) + ethHeader2 := ðTypes.Header{Number: big.NewInt(2), ParentHash: ethBlock1.Hash()} + ethBlock2 := ethTypes.NewBlockWithHeader(ethHeader2) + ethHeader3 := ðTypes.Header{Number: big.NewInt(3), ParentHash: ethBlock2.Hash()} + ethBlock3 := ethTypes.NewBlockWithHeader(ethHeader3) + + lastBlock0 := &state.Block{BlockHash: ethBlock0.Hash(), BlockNumber: ethBlock0.Number().Uint64(), ParentHash: ethBlock0.ParentHash()} + lastBlock1 := &state.Block{BlockHash: ethBlock1.Hash(), BlockNumber: ethBlock1.Number().Uint64(), ParentHash: ethBlock1.ParentHash()} + m.State. + On("GetLastBlock", mock.Anything, nil). + Return(lastBlock1, nil). + Once() m.State. On("BeginStateTransaction", ctxMatchBy). Run(func(args mock.Arguments) { ctx := args[0].(context.Context) - parentHash := common.HexToHash("0x111") - ethHeader0 := ðTypes.Header{Number: big.NewInt(0), ParentHash: parentHash} - ethBlock0 := ethTypes.NewBlockWithHeader(ethHeader0) - ethHeader1 := ðTypes.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash()} - ethBlock1 := ethTypes.NewBlockWithHeader(ethHeader1) - ethHeader2 := ðTypes.Header{Number: big.NewInt(2), ParentHash: ethBlock1.Hash()} - ethBlock2 := ethTypes.NewBlockWithHeader(ethHeader2) - ethHeader3 := ðTypes.Header{Number: big.NewInt(3), ParentHash: ethBlock2.Hash()} - ethBlock3 := ethTypes.NewBlockWithHeader(ethHeader3) - - lastBlock0 := &state.Block{BlockHash: ethBlock0.Hash(), BlockNumber: ethBlock0.Number().Uint64(), ParentHash: ethBlock0.ParentHash()} - lastBlock1 := &state.Block{BlockHash: ethBlock1.Hash(), BlockNumber: ethBlock1.Number().Uint64(), ParentHash: ethBlock1.ParentHash()} - m.State. On("GetForkIDByBatchNumber", mock.Anything). Return(uint64(9), nil). @@ -1300,7 +1354,7 @@ func TestLatestSyncedBlockEmpty(t *testing.T) { m.State. On("GetLastBlock", ctx, m.DbTx). Return(lastBlock1, nil). - Once() + Maybe() m.State. On("GetLastBatchNumber", ctx, m.DbTx). @@ -1489,26 +1543,29 @@ func TestRegularReorg(t *testing.T) { ToBatchNumber: math.MaxUint64, } m.State.EXPECT().GetForkIDInMemory(uint64(9)).Return(&forkIdInterval) + parentHash := common.HexToHash("0x111") + ethHeader0 := ðTypes.Header{Number: big.NewInt(0), ParentHash: parentHash} + ethBlock0 := ethTypes.NewBlockWithHeader(ethHeader0) + ethHeader1bis := ðTypes.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash(), Time: 10, GasUsed: 20, Root: common.HexToHash("0x234")} + ethBlock1bis := ethTypes.NewBlockWithHeader(ethHeader1bis) + ethHeader2bis := ðTypes.Header{Number: big.NewInt(2), ParentHash: ethBlock1bis.Hash()} + ethBlock2bis := ethTypes.NewBlockWithHeader(ethHeader2bis) + ethHeader1 := ðTypes.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash()} + ethBlock1 := ethTypes.NewBlockWithHeader(ethHeader1) + ethHeader2 := ðTypes.Header{Number: big.NewInt(2), ParentHash: ethBlock1.Hash()} + ethBlock2 := ethTypes.NewBlockWithHeader(ethHeader2) + + lastBlock0 := &state.Block{BlockHash: ethBlock0.Hash(), BlockNumber: ethBlock0.Number().Uint64(), ParentHash: ethBlock0.ParentHash()} + lastBlock1 := &state.Block{BlockHash: ethBlock1.Hash(), BlockNumber: ethBlock1.Number().Uint64(), ParentHash: ethBlock1.ParentHash()} + m.State. + On("GetLastBlock", mock.Anything, nil). + Return(lastBlock1, nil). + Once() m.State. On("BeginStateTransaction", ctxMatchBy). Run(func(args mock.Arguments) { ctx := args[0].(context.Context) - parentHash := common.HexToHash("0x111") - ethHeader0 := ðTypes.Header{Number: big.NewInt(0), ParentHash: parentHash} - ethBlock0 := ethTypes.NewBlockWithHeader(ethHeader0) - ethHeader1bis := ðTypes.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash(), Time: 10, GasUsed: 20, Root: common.HexToHash("0x234")} - ethBlock1bis := ethTypes.NewBlockWithHeader(ethHeader1bis) - ethHeader2bis := ðTypes.Header{Number: big.NewInt(2), ParentHash: ethBlock1bis.Hash()} - ethBlock2bis := ethTypes.NewBlockWithHeader(ethHeader2bis) - ethHeader1 := ðTypes.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash()} - ethBlock1 := ethTypes.NewBlockWithHeader(ethHeader1) - ethHeader2 := ðTypes.Header{Number: big.NewInt(2), ParentHash: ethBlock1.Hash()} - ethBlock2 := ethTypes.NewBlockWithHeader(ethHeader2) - - lastBlock0 := &state.Block{BlockHash: ethBlock0.Hash(), BlockNumber: ethBlock0.Number().Uint64(), ParentHash: ethBlock0.ParentHash()} - lastBlock1 := &state.Block{BlockHash: ethBlock1.Hash(), BlockNumber: ethBlock1.Number().Uint64(), ParentHash: ethBlock1.ParentHash()} - m.State. On("GetForkIDByBatchNumber", mock.Anything). Return(uint64(9), nil). @@ -1516,7 +1573,7 @@ func TestRegularReorg(t *testing.T) { m.State. On("GetLastBlock", ctx, m.DbTx). Return(lastBlock1, nil). - Once() + Maybe() // After a ResetState get lastblock that must be block 0 m.State. @@ -1771,26 +1828,30 @@ func TestLatestSyncedBlockEmptyWithExtraReorg(t *testing.T) { ToBatchNumber: math.MaxUint64, } m.State.EXPECT().GetForkIDInMemory(uint64(9)).Return(&forkIdInterval) + parentHash := common.HexToHash("0x111") + ethHeader0 := ðTypes.Header{Number: big.NewInt(0), ParentHash: parentHash} + ethBlock0 := ethTypes.NewBlockWithHeader(ethHeader0) + ethHeader1 := ðTypes.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash()} + ethBlock1 := ethTypes.NewBlockWithHeader(ethHeader1) + ethHeader1bis := ðTypes.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash(), Time: 0, GasUsed: 10} + ethBlock1bis := ethTypes.NewBlockWithHeader(ethHeader1bis) + ethHeader2 := ðTypes.Header{Number: big.NewInt(2), ParentHash: ethBlock1.Hash()} + ethBlock2 := ethTypes.NewBlockWithHeader(ethHeader2) + ethHeader3 := ðTypes.Header{Number: big.NewInt(3), ParentHash: ethBlock2.Hash()} + ethBlock3 := ethTypes.NewBlockWithHeader(ethHeader3) + + lastBlock0 := &state.Block{BlockHash: ethBlock0.Hash(), BlockNumber: ethBlock0.Number().Uint64(), ParentHash: ethBlock0.ParentHash()} + lastBlock1 := &state.Block{BlockHash: ethBlock1.Hash(), BlockNumber: ethBlock1.Number().Uint64(), ParentHash: ethBlock1.ParentHash()} + lastBlock2 := &state.Block{BlockHash: ethBlock2.Hash(), BlockNumber: ethBlock2.Number().Uint64(), ParentHash: ethBlock2.ParentHash()} + m.State. + On("GetLastBlock", mock.Anything, nil). + Return(lastBlock2, nil). + Once() m.State. On("BeginStateTransaction", ctxMatchBy). Run(func(args mock.Arguments) { ctx := args[0].(context.Context) - parentHash := common.HexToHash("0x111") - ethHeader0 := ðTypes.Header{Number: big.NewInt(0), ParentHash: parentHash} - ethBlock0 := ethTypes.NewBlockWithHeader(ethHeader0) - ethHeader1 := ðTypes.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash()} - ethBlock1 := ethTypes.NewBlockWithHeader(ethHeader1) - ethHeader1bis := ðTypes.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash(), Time: 0, GasUsed: 10} - ethBlock1bis := ethTypes.NewBlockWithHeader(ethHeader1bis) - ethHeader2 := ðTypes.Header{Number: big.NewInt(2), ParentHash: ethBlock1.Hash()} - ethBlock2 := ethTypes.NewBlockWithHeader(ethHeader2) - ethHeader3 := ðTypes.Header{Number: big.NewInt(3), ParentHash: ethBlock2.Hash()} - ethBlock3 := ethTypes.NewBlockWithHeader(ethHeader3) - - lastBlock0 := &state.Block{BlockHash: ethBlock0.Hash(), BlockNumber: ethBlock0.Number().Uint64(), ParentHash: ethBlock0.ParentHash()} - lastBlock1 := &state.Block{BlockHash: ethBlock1.Hash(), BlockNumber: ethBlock1.Number().Uint64(), ParentHash: ethBlock1.ParentHash()} - lastBlock2 := &state.Block{BlockHash: ethBlock2.Hash(), BlockNumber: ethBlock2.Number().Uint64(), ParentHash: ethBlock2.ParentHash()} m.State. On("GetForkIDByBatchNumber", mock.Anything). @@ -1799,7 +1860,7 @@ func TestLatestSyncedBlockEmptyWithExtraReorg(t *testing.T) { m.State. On("GetLastBlock", ctx, m.DbTx). Return(lastBlock2, nil). - Once() + Maybe() m.State. On("GetLastBatchNumber", ctx, m.DbTx). @@ -2047,25 +2108,28 @@ func TestCallFromEmptyBlockAndReorg(t *testing.T) { ToBatchNumber: math.MaxUint64, } m.State.EXPECT().GetForkIDInMemory(uint64(9)).Return(&forkIdInterval) - + parentHash := common.HexToHash("0x111") + ethHeader0 := ðTypes.Header{Number: big.NewInt(0), ParentHash: parentHash} + ethBlock0 := ethTypes.NewBlockWithHeader(ethHeader0) + ethHeader1bis := ðTypes.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash(), Time: 10, GasUsed: 20, Root: common.HexToHash("0x234")} + ethBlock1bis := ethTypes.NewBlockWithHeader(ethHeader1bis) + ethHeader2bis := ðTypes.Header{Number: big.NewInt(2), ParentHash: ethBlock1bis.Hash()} + ethBlock2bis := ethTypes.NewBlockWithHeader(ethHeader2bis) + ethHeader1 := ðTypes.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash()} + ethBlock1 := ethTypes.NewBlockWithHeader(ethHeader1) + ethHeader2 := ðTypes.Header{Number: big.NewInt(2), ParentHash: ethBlock1.Hash()} + ethBlock2 := ethTypes.NewBlockWithHeader(ethHeader2) + + lastBlock0 := &state.Block{BlockHash: ethBlock0.Hash(), BlockNumber: ethBlock0.Number().Uint64(), ParentHash: ethBlock0.ParentHash()} + lastBlock1 := &state.Block{BlockHash: ethBlock1.Hash(), BlockNumber: ethBlock1.Number().Uint64(), ParentHash: ethBlock1.ParentHash()} + m.State. + On("GetLastBlock", mock.Anything, nil). + Return(lastBlock1, nil). + Once() m.State. On("BeginStateTransaction", ctxMatchBy). Run(func(args mock.Arguments) { ctx := args[0].(context.Context) - parentHash := common.HexToHash("0x111") - ethHeader0 := ðTypes.Header{Number: big.NewInt(0), ParentHash: parentHash} - ethBlock0 := ethTypes.NewBlockWithHeader(ethHeader0) - ethHeader1bis := ðTypes.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash(), Time: 10, GasUsed: 20, Root: common.HexToHash("0x234")} - ethBlock1bis := ethTypes.NewBlockWithHeader(ethHeader1bis) - ethHeader2bis := ðTypes.Header{Number: big.NewInt(2), ParentHash: ethBlock1bis.Hash()} - ethBlock2bis := ethTypes.NewBlockWithHeader(ethHeader2bis) - ethHeader1 := ðTypes.Header{Number: big.NewInt(1), ParentHash: ethBlock0.Hash()} - ethBlock1 := ethTypes.NewBlockWithHeader(ethHeader1) - ethHeader2 := ðTypes.Header{Number: big.NewInt(2), ParentHash: ethBlock1.Hash()} - ethBlock2 := ethTypes.NewBlockWithHeader(ethHeader2) - - lastBlock0 := &state.Block{BlockHash: ethBlock0.Hash(), BlockNumber: ethBlock0.Number().Uint64(), ParentHash: ethBlock0.ParentHash()} - lastBlock1 := &state.Block{BlockHash: ethBlock1.Hash(), BlockNumber: ethBlock1.Number().Uint64(), ParentHash: ethBlock1.ParentHash()} m.State. On("GetForkIDByBatchNumber", mock.Anything). @@ -2074,7 +2138,7 @@ func TestCallFromEmptyBlockAndReorg(t *testing.T) { m.State. On("GetLastBlock", ctx, m.DbTx). Return(lastBlock1, nil). - Once() + Maybe() m.State. On("GetLastBatchNumber", ctx, m.DbTx). diff --git a/test/Makefile b/test/Makefile index 35cd659924..adcc216d68 100644 --- a/test/Makefile +++ b/test/Makefile @@ -113,8 +113,8 @@ STOPMETRICS := $(DOCKERCOMPOSE) stop $(DOCKERCOMPOSEMETRICS) && $(DOCKERCOMPOSE) STOP := $(DOCKERCOMPOSE) down --remove-orphans -RUNDACDB := docker-compose up -d zkevm-data-node-db -STOPDACDB := docker-compose stop zkevm-data-node-db && docker-compose rm -f zkevm-data-node-db +RUNDACDB := docker compose up -d zkevm-data-node-db +STOPDACDB := docker compose stop zkevm-data-node-db && docker compose rm -f zkevm-data-node-db .PHONY: test-full-non-e2e test-full-non-e2e: stop ## Runs non-e2e tests checking race conditions