diff --git a/cmd/geth/verkle.go b/cmd/geth/verkle.go index 57a6fd3096ee..f16895955b47 100644 --- a/cmd/geth/verkle.go +++ b/cmd/geth/verkle.go @@ -130,14 +130,13 @@ func convertToVerkle(ctx *cli.Context) error { vRoot = verkle.New().(*verkle.InternalNode) ) - saveverkle := func(node verkle.VerkleNode) { - comm := node.Commit() + saveverkle := func(path []byte, node verkle.VerkleNode) { + node.Commit() s, err := node.Serialize() if err != nil { panic(err) } - commB := comm.Bytes() - if err := chaindb.Put(commB[:], s); err != nil { + if err := chaindb.Put(path, s); err != nil { panic(err) } } @@ -330,7 +329,7 @@ func checkChildren(root verkle.VerkleNode, resolver verkle.NodeResolverFn) error return fmt.Errorf("could not find child %x in db: %w", childC, err) } // depth is set to 0, the tree isn't rebuilt so it's not a problem - childN, err := verkle.ParseNode(childS, 0, childC[:]) + childN, err := verkle.ParseNode(childS, 0) if err != nil { return fmt.Errorf("decode error child %x in db: %w", child.Commitment().Bytes(), err) } @@ -390,7 +389,7 @@ func verifyVerkle(ctx *cli.Context) error { if err != nil { return err } - root, err := verkle.ParseNode(serializedRoot, 0, rootC[:]) + root, err := verkle.ParseNode(serializedRoot, 0) if err != nil { return err } @@ -439,7 +438,7 @@ func expandVerkle(ctx *cli.Context) error { if err != nil { return err } - root, err := verkle.ParseNode(serializedRoot, 0, rootC[:]) + root, err := verkle.ParseNode(serializedRoot, 0) if err != nil { return err } diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go index 90f009041477..e42120e85ade 100644 --- a/cmd/utils/cmd.go +++ b/cmd/utils/cmd.go @@ -26,6 +26,7 @@ import ( "os" "os/signal" "runtime" + "runtime/pprof" "strings" "syscall" "time" @@ -173,6 +174,17 @@ func ImportChain(chain *core.BlockChain, fn string) error { return err } } + cpuProfile, err := os.Create("cpu.out") + if err != nil { + return fmt.Errorf("Error creating CPU profile: %v", err) + } + defer cpuProfile.Close() + err = pprof.StartCPUProfile(cpuProfile) + if err != nil { + return fmt.Errorf("Error starting CPU profile: %v", err) + } + defer pprof.StopCPUProfile() + stream := rlp.NewStream(reader, 0) // Run actual the import. diff --git a/core/block_validator.go b/core/block_validator.go index 3763be0be08d..0d27d6410fcf 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -65,12 +65,17 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error { if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash { return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash) } - if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) { - if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) { - return consensus.ErrUnknownAncestor - } - return consensus.ErrPrunedAncestor - } + // XXX I had to deactivate this check for replay to work: the block state root + // hash is the one of the overlay tree, but in replay mode, it's the hash of + // the base tree that takes precedence, as the chain would not otherwise be + // recognized. + // if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) { + // if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) { + // return consensus.ErrUnknownAncestor + // } + // fmt.Println("failure here") + // return consensus.ErrPrunedAncestor + // } return nil } @@ -90,15 +95,15 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD return fmt.Errorf("invalid bloom (remote: %x local: %x)", header.Bloom, rbloom) } // Tre receipt Trie's root (R = (Tr [[H1, R1], ... [Hn, Rn]])) - receiptSha := types.DeriveSha(receipts, trie.NewStackTrie(nil)) - if receiptSha != header.ReceiptHash { - return fmt.Errorf("invalid receipt root hash (remote: %x local: %x)", header.ReceiptHash, receiptSha) - } + // receiptSha := types.DeriveSha(receipts, trie.NewStackTrie(nil)) + // if receiptSha != header.ReceiptHash { + // return fmt.Errorf("invalid receipt root hash (remote: %x local: %x)", header.ReceiptHash, receiptSha) + // } // Validate the state root against the received state root and throw // an error if they don't match. - if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root { - return fmt.Errorf("invalid merkle root (remote: %x local: %x)", header.Root, root) - } + // if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root { + // return fmt.Errorf("invalid merkle root (remote: %x local: %x)", header.Root, root) + // } return nil } diff --git a/core/blockchain.go b/core/blockchain.go index 0dd6f63de50c..b32548b9eb57 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -18,12 +18,15 @@ package core import ( + "bufio" "errors" "fmt" "io" "math/big" + "os" "runtime" "sort" + "strconv" "strings" "sync" "sync/atomic" @@ -1484,6 +1487,21 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool) return 0, nil } + f, err := os.Open("conversion.txt") + if err != nil { + log.Error("Failed to open conversion.txt", "err", err) + return 0, err + } + defer f.Close() + scanner := bufio.NewScanner(f) + scanner.Scan() + conversionBlock, err := strconv.ParseUint(scanner.Text(), 10, 64) + if err != nil { + log.Error("Failed to parse conversionBlock", "err", err) + return 0, err + } + log.Info("Found conversion block info", "conversionBlock", conversionBlock) + // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss) senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain) @@ -1668,6 +1686,10 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool) if parent == nil { parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1) } + + if parent.Number.Uint64() == conversionBlock { + bc.StartVerkleTransition(parent.Root, emptyVerkleRoot, bc.Config(), parent.Number) + } statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps) if err != nil { return it.index, err @@ -1703,6 +1725,11 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals, setHead bool) atomic.StoreUint32(&followupInterrupt, 1) return it.index, err } + if fdb, ok := statedb.Database().(*state.ForkingDB); ok { + if fdb.InTransition() || fdb.Transitionned() { + bc.AddRootTranslation(block.Root(), statedb.IntermediateRoot(false)) + } + } // Update the metrics touched during block processing accountReadTimer.Update(statedb.AccountReads) // Account reads are complete, we can mark them @@ -2285,6 +2312,8 @@ func (bc *BlockChain) skipBlock(err error, it *insertIterator) bool { return false } +var emptyVerkleRoot common.Hash + // indexBlocks reindexes or unindexes transactions depending on user configuration func (bc *BlockChain) indexBlocks(tail *uint64, head uint64, done chan struct{}) { defer func() { close(done) }() @@ -2429,3 +2458,11 @@ func (bc *BlockChain) SetBlockValidatorAndProcessorForTesting(v Validator, p Pro bc.validator = v bc.processor = p } + +func (bc *BlockChain) StartVerkleTransition(originalRoot, translatedRoot common.Hash, chainConfig *params.ChainConfig, cancunBlock *big.Int) { + bc.stateCache.(*state.ForkingDB).StartTransition(originalRoot, translatedRoot, chainConfig, cancunBlock) +} + +func (bc *BlockChain) AddRootTranslation(originalRoot, translatedRoot common.Hash) { + bc.stateCache.(*state.ForkingDB).AddTranslation(originalRoot, translatedRoot) +} diff --git a/core/chain_makers.go b/core/chain_makers.go index 13be8372d96a..b0b603d41e5c 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -313,6 +313,7 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine keyvals := make([]verkle.StateDiff, 0, n) blocks, receipts := make(types.Blocks, n), make([]types.Receipts, n) chainreader := &fakeChainReader{config: config} + var preStateTrie *trie.VerkleTrie genblock := func(i int, parent *types.Block, statedb *state.StateDB) (*types.Block, types.Receipts) { b := &BlockGen{i: i, chain: blocks, parent: parent, statedb: statedb, config: config, engine: engine} b.header = makeHeader(chainreader, parent, statedb, b.engine) @@ -372,13 +373,28 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine kvs[string(key)] = v } + // Initialize the preStateTrie if it is nil, this should + // correspond to the genesis block. This is a workaround + // needed until the main verkle PR is rebased on top of + // PBSS. + if preStateTrie == nil { + preStateTrie = vtr + } + vtr.Hash() - p, k, err := vtr.ProveAndSerialize(statedb.Witness().Keys(), kvs) + p, k, err := preStateTrie.ProveAndSerialize(statedb.Witness().Keys(), kvs) if err != nil { panic(err) } proofs = append(proofs, p) keyvals = append(keyvals, k) + + // save the current state of the trie for producing the proof for the next block, + // since reading it from disk is broken with the intermediate PBSS-like system we + // have: it will read the post-state as this is the only state present on disk. + // This is a workaround needed until the main verkle PR is rebased on top of PBSS. + preStateTrie = statedb.GetTrie().(*trie.VerkleTrie) + return block, b.receipts } return nil, nil diff --git a/core/state/access_witness.go b/core/state/access_witness.go index 789bbbc27e3a..05970270dac1 100644 --- a/core/state/access_witness.go +++ b/core/state/access_witness.go @@ -251,7 +251,7 @@ func (aw *AccessWitness) Copy() *AccessWitness { } func (aw *AccessWitness) GetTreeKeyVersionCached(addr []byte) []byte { - return aw.statedb.db.(*VerkleDB).addrToPoint.GetTreeKeyVersionCached(addr) + return aw.statedb.db.(*ForkingDB).addrToPoint.GetTreeKeyVersionCached(addr) } func (aw *AccessWitness) TouchAndChargeProofOfAbsence(addr []byte) uint64 { diff --git a/core/state/database.go b/core/state/database.go index 77c80f10dd9c..5b2330e66e22 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -19,12 +19,15 @@ package state import ( "errors" "fmt" + "math/big" + "sync" "github.com/VictoriaMetrics/fastcache" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/utils" "github.com/gballet/go-verkle" @@ -45,7 +48,7 @@ type Database interface { OpenTrie(root common.Hash) (Trie, error) // OpenStorageTrie opens the storage trie of an account. - OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (Trie, error) + OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash, main Trie) (Trie, error) // CopyTrie returns an independent copy of the given trie. CopyTrie(Trie) Trie @@ -136,21 +139,207 @@ func NewDatabase(db ethdb.Database) Database { // large memory cache. func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database { csc, _ := lru.New(codeSizeCacheSize) - if config != nil && config.UseVerkle { - return &VerkleDB{ + return &ForkingDB{ + cachingDB: &cachingDB{ + db: trie.NewDatabaseWithConfig(db, config), + disk: db, + codeSizeCache: csc, + codeCache: fastcache.New(codeCacheSize), + }, + VerkleDB: &VerkleDB{ db: trie.NewDatabaseWithConfig(db, config), diskdb: db, codeSizeCache: csc, codeCache: fastcache.New(codeCacheSize), addrToPoint: utils.NewPointCache(), + }, + started: (config != nil && config.UseVerkle), + ended: (config != nil && config.UseVerkle), + } +} + +// ForkingDB is an adapter object to support forks between +// cachingDB and VerkleDB. +type ForkingDB struct { + *cachingDB + *VerkleDB + + // TODO ensure that this info is in the DB + started, ended bool + translatedRoots [32]common.Hash // hash of the translated root, for opening + origRoots [32]common.Hash + translationIndex int + translatedRootsLock sync.RWMutex + + baseRoot common.Hash // hash of the read-only base tree + CurrentAccountHash common.Hash // hash of the last translated account + CurrentSlotHash common.Hash // hash of the last translated storage slot + + // Mark whether the storage for an account has been processed. This is useful if the + // maximum number of leaves of the conversion is reached before the whole storage is + // processed. + StorageProcessed bool +} + +// ContractCode implements Database +func (fdb *ForkingDB) ContractCode(addrHash common.Hash, codeHash common.Hash) ([]byte, error) { + if fdb.started { + return fdb.VerkleDB.ContractCode(addrHash, codeHash) + } + + return fdb.cachingDB.ContractCode(addrHash, codeHash) +} + +// ContractCodeSize implements Database +func (fdb *ForkingDB) ContractCodeSize(addrHash common.Hash, codeHash common.Hash) (int, error) { + if fdb.started { + return fdb.VerkleDB.ContractCodeSize(addrHash, codeHash) + } + + return fdb.cachingDB.ContractCodeSize(addrHash, codeHash) +} + +// CopyTrie implements Database +func (fdb *ForkingDB) CopyTrie(t Trie) Trie { + return fdb.cachingDB.CopyTrie(t) +} + +func verkleOrTransitionTree(self Trie) Trie { + switch t := self.(type) { + case *trie.TransitionTrie: + return t.Overlay() + case *trie.VerkleTrie: + return t + default: + panic("unexpected trie type") + } +} + +// OpenStorageTrie implements Database +func (fdb *ForkingDB) OpenStorageTrie(stateRoot, addrHash, root common.Hash, self Trie) (Trie, error) { + mpt, err := fdb.cachingDB.OpenStorageTrie(stateRoot, addrHash, root, nil) + if fdb.started && err == nil { + // Return a "storage trie" that is an adapter between the storge MPT + // and the unique verkle tree. + vkt, err := fdb.VerkleDB.OpenStorageTrie(stateRoot, addrHash, fdb.getTranslation(root), verkleOrTransitionTree(self)) + if err != nil { + return nil, err + } + return trie.NewTransitionTree(mpt.(*trie.SecureTrie), vkt.(*trie.VerkleTrie), true), nil + } + + return mpt, err +} + +// OpenTrie implements Database +func (fdb *ForkingDB) OpenTrie(root common.Hash) (Trie, error) { + var ( + mpt Trie + err error + ) + + if fdb.started { + vkt, err := fdb.VerkleDB.OpenTrie(fdb.getTranslation(root)) + if err != nil { + return nil, err + } + + // If the verkle conversion has ended, return a single + // verkle trie. + if fdb.ended { + return vkt, nil + } + + // Otherwise, return a transition trie, with a base MPT + // trie and an overlay, verkle trie. + mpt, err = fdb.cachingDB.OpenTrie(fdb.baseRoot) + if err != nil { + return nil, err + } + + return trie.NewTransitionTree(mpt.(*trie.SecureTrie), vkt.(*trie.VerkleTrie), false), nil + } else { + mpt, err = fdb.cachingDB.OpenTrie(root) + if err != nil { + return nil, err } } - return &cachingDB{ - db: trie.NewDatabaseWithConfig(db, config), - disk: db, - codeSizeCache: csc, - codeCache: fastcache.New(codeCacheSize), + + return mpt, nil +} + +// TrieDB implements Database +func (fdb *ForkingDB) TrieDB() *trie.Database { + if fdb.started { + return fdb.VerkleDB.TrieDB() } + + return fdb.cachingDB.TrieDB() +} + +// DiskDB retrieves the low level trie database used for data storage. +func (fdb *ForkingDB) DiskDB() ethdb.KeyValueStore { + if fdb.started { + return fdb.VerkleDB.DiskDB() + } + + return fdb.cachingDB.DiskDB() +} + +func (fdg *ForkingDB) InTransition() bool { + return fdg.started && !fdg.ended +} + +func (fdg *ForkingDB) Transitionned() bool { + return fdg.ended +} + +// Fork implements the fork +func (fdb *ForkingDB) StartTransition(originalRoot, translatedRoot common.Hash, chainConfig *params.ChainConfig, cancunBlock *big.Int) { + fmt.Println(` + __________.__ .__ .__ __ .__ .__ ____ + \__ ___| |__ ____ ____ | | ____ ______ | |__ _____ _____/ |_ | |__ _____ ______ __ _ _|__| ____ / ___\ ______ + | | | | \_/ __ \ _/ __ \| | _/ __ \\____ \| | \\__ \ / \ __\ | | \\__ \ / ___/ \ \/ \/ | |/ \ / /_/ / ___/ + | | | Y \ ___/ \ ___/| |_\ ___/| |_> | Y \/ __ \| | | | | Y \/ __ \_\___ \ \ /| | | \\___ /\___ \ + |____| |___| /\___ \___ |____/\___ | __/|___| (____ |___| |__| |___| (____ /_____/ \/\_/ |__|___| /_____//_____/ + |__|`) + fdb.started = true + fdb.AddTranslation(originalRoot, translatedRoot) + fdb.baseRoot = originalRoot + // initialize so that the first storage-less accounts are processed + fdb.StorageProcessed = true + chainConfig.CancunBlock = cancunBlock +} + +func (fdb *ForkingDB) EndTransition() { + fmt.Println(` + __________.__ .__ .__ __ .__ .__ .___ .___ + \__ ___| |__ ____ ____ | | ____ ______ | |__ _____ _____/ |_ | |__ _____ ______ | | _____ ____ __| _/____ __| _/ + | | | | \_/ __ \ _/ __ \| | _/ __ \\____ \| | \\__ \ / \ __\ | | \\__ \ / ___/ | | \__ \ / \ / __ _/ __ \ / __ | + | | | Y \ ___/ \ ___/| |_\ ___/| |_> | Y \/ __ \| | | | | Y \/ __ \_\___ \ | |__/ __ \| | / /_/ \ ___// /_/ | + |____| |___| /\___ \___ |____/\___ | __/|___| (____ |___| |__| |___| (____ /_____/ |____(____ |___| \____ |\___ \____ | + |__|`) + fdb.ended = true +} + +func (fdb *ForkingDB) AddTranslation(orig, trans common.Hash) { + // TODO make this persistent + fdb.translatedRootsLock.Lock() + defer fdb.translatedRootsLock.Unlock() + fdb.translatedRoots[fdb.translationIndex] = trans + fdb.origRoots[fdb.translationIndex] = orig + fdb.translationIndex = (fdb.translationIndex + 1) % len(fdb.translatedRoots) +} + +func (fdb *ForkingDB) getTranslation(orig common.Hash) common.Hash { + fdb.translatedRootsLock.RLock() + defer fdb.translatedRootsLock.RUnlock() + for i, o := range fdb.origRoots { + if o == orig { + return fdb.translatedRoots[i] + } + } + return common.Hash{} } type cachingDB struct { @@ -170,7 +359,7 @@ func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { } // OpenStorageTrie opens the storage trie of an account. -func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (Trie, error) { +func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash, _ Trie) (Trie, error) { tr, err := trie.NewStateTrie(trie.StorageTrieID(stateRoot, addrHash, root), db.db) if err != nil { return nil, err @@ -183,6 +372,10 @@ func (db *cachingDB) CopyTrie(t Trie) Trie { switch t := t.(type) { case *trie.StateTrie: return t.Copy() + case *trie.TransitionTrie: + return t.Copy() + case *trie.VerkleTrie: + return t.Copy() default: panic(fmt.Errorf("unknown trie type %T", t)) } @@ -254,16 +447,13 @@ func (db *VerkleDB) GetTreeKeyHeader(addr []byte) *verkle.Point { } // OpenTrie opens the main account trie. -func (db *VerkleDB) OpenTrie(root common.Hash) (Trie, error) { - if root == (common.Hash{}) || root == emptyRoot { - return trie.NewVerkleTrie(verkle.New(), db.db, db.addrToPoint), nil - } - payload, err := db.DiskDB().Get(root[:]) +func (db *VerkleDB) OpenTrie(_ common.Hash) (Trie, error) { + payload, err := db.DiskDB().Get([]byte("flat-")) if err != nil { - return nil, err + return trie.NewVerkleTrie(verkle.New(), db.db, db.addrToPoint), nil } - r, err := verkle.ParseNode(payload, 0, root[:]) + r, err := verkle.ParseNode(payload, 0) if err != nil { panic(err) } @@ -271,19 +461,20 @@ func (db *VerkleDB) OpenTrie(root common.Hash) (Trie, error) { } // OpenStorageTrie opens the storage trie of an account. -func (db *VerkleDB) OpenStorageTrie(stateRoot, addrHash, root common.Hash) (Trie, error) { - // alternatively, return accTrie - panic("should not be called") +func (db *VerkleDB) OpenStorageTrie(stateRoot, addrHash, root common.Hash, self Trie) (Trie, error) { + return self, nil } // CopyTrie returns an independent copy of the given trie. func (db *VerkleDB) CopyTrie(tr Trie) Trie { - t, ok := tr.(*trie.VerkleTrie) - if ok { - return t.Copy(db.db) + switch t := tr.(type) { + case *trie.VerkleTrie: + return t.Copy() + case *trie.TransitionTrie: + return t.Copy() + default: + panic(fmt.Sprintf("invalid tree type %T != VerkleTrie", tr)) } - - panic("invalid tree type != VerkleTrie") } // ContractCode retrieves a particular contract's code. diff --git a/core/state/iterator.go b/core/state/iterator.go index ca1367e69992..492dda8fd649 100644 --- a/core/state/iterator.go +++ b/core/state/iterator.go @@ -118,7 +118,7 @@ func (it *NodeIterator) step() error { if err := rlp.Decode(bytes.NewReader(it.stateIt.LeafBlob()), &account); err != nil { return err } - dataTrie, err := it.state.db.OpenStorageTrie(it.state.originalRoot, common.BytesToHash(it.stateIt.LeafKey()), account.Root) + dataTrie, err := it.state.db.OpenStorageTrie(it.state.originalRoot, common.BytesToHash(it.stateIt.LeafKey()), account.Root, nil) if err != nil { return err } diff --git a/core/state/snapshot/account.go b/core/state/snapshot/account.go index b92e94295014..21aaeaca0bb1 100644 --- a/core/state/snapshot/account.go +++ b/core/state/snapshot/account.go @@ -84,3 +84,8 @@ func FullAccountRLP(data []byte) ([]byte, error) { } return rlp.EncodeToBytes(account) } + +// HasStorage returns true if the account has a non-empty storage tree. +func (acc *Account) HasStorage() bool { + return len(acc.Root) == 32 && !bytes.Equal(acc.Root, emptyRoot[:]) +} diff --git a/core/state/state_object.go b/core/state/state_object.go index 1b4f60a382ee..e4a5a701c857 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -160,9 +160,9 @@ func (s *stateObject) getTrie(db Database) Trie { } if s.trie == nil { var err error - s.trie, err = db.OpenStorageTrie(s.db.originalRoot, s.addrHash, s.data.Root) + s.trie, err = db.OpenStorageTrie(s.db.originalRoot, s.addrHash, s.data.Root, s.db.trie) if err != nil { - s.trie, _ = db.OpenStorageTrie(s.db.originalRoot, s.addrHash, common.Hash{}) + s.trie, _ = db.OpenStorageTrie(s.db.originalRoot, s.addrHash, common.Hash{}, s.db.trie) s.setError(fmt.Errorf("can't create storage trie: %v", err)) } } @@ -200,8 +200,9 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has } // If no live objects are available, attempt to use snapshots var ( - enc []byte - err error + enc []byte + err error + value common.Hash ) if s.db.snap != nil { // If the object was destructed in *this* block (and potentially resurrected), @@ -221,11 +222,15 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has } // If the snapshot is unavailable or reading from it fails, load from the database. if s.db.snap == nil || err != nil { + var tr = s.getTrie(db) + start := time.Now() if s.db.GetTrie().IsVerkle() { - panic("verkle trees use the snapshot") + var v []byte + v, err = tr.TryGet(s.address[:], key.Bytes()) + copy(value[:], v) + } else { + enc, err = s.getTrie(db).TryGet(s.address[:], key.Bytes()) } - start := time.Now() - enc, err = s.getTrie(db).TryGet(s.address[:], key.Bytes()) if metrics.EnabledExpensive { s.db.StorageReads += time.Since(start) } @@ -234,7 +239,6 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has return common.Hash{} } } - var value common.Hash if len(enc) > 0 { _, content, _, err := rlp.Split(enc) if err != nil { @@ -324,11 +328,7 @@ func (s *stateObject) updateTrie(db Database) Trie { var storage map[common.Hash][]byte // Insert all the pending updates into the trie var tr Trie - if s.db.trie.IsVerkle() { - tr = s.db.trie - } else { - tr = s.getTrie(db) - } + tr = s.getTrie(db) hasher := s.db.hasher usedStorage := make([][]byte, 0, len(s.pendingStorage)) @@ -373,7 +373,7 @@ func (s *stateObject) updateTrie(db Database) Trie { if len(s.pendingStorage) > 0 { s.pendingStorage = make(Storage) } - return tr + return s.trie } // UpdateRoot sets the trie root to the current root hash of diff --git a/core/state/statedb.go b/core/state/statedb.go index 3171cdff3caa..c7b213cdee41 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -175,6 +175,17 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) sdb.snapDestructs = make(map[common.Hash]struct{}) sdb.snapAccounts = make(map[common.Hash][]byte) sdb.snapStorage = make(map[common.Hash]map[common.Hash][]byte) + } else { + if fdb, ok := db.(*ForkingDB); ok { + trans := fdb.getTranslation(root) + if trans != (common.Hash{}) { + if sdb.snap = sdb.snaps.Snapshot(trans); sdb.snap != nil { + sdb.snapDestructs = make(map[common.Hash]struct{}) + sdb.snapAccounts = make(map[common.Hash][]byte) + sdb.snapStorage = make(map[common.Hash]map[common.Hash][]byte) + } + } + } } } return sdb, nil @@ -527,12 +538,13 @@ func (s *StateDB) updateStateObject(obj *stateObject) { chunks = trie.ChunkifyCode(obj.code) values [][]byte key []byte + err error ) for i, chunknr := 0, uint64(0); i < len(chunks); i, chunknr = i+32, chunknr+1 { groupOffset := (chunknr + 128) % 256 if groupOffset == 0 /* start of new group */ || chunknr == 0 /* first chunk in header group */ { values = make([][]byte, verkle.NodeWidth) - key = utils.GetTreeKeyCodeChunkWithEvaluatedAddress(obj.db.db.(*VerkleDB).GetTreeKeyHeader(obj.address[:]), uint256.NewInt(chunknr)) + key = utils.GetTreeKeyCodeChunkWithEvaluatedAddress(obj.db.db.(*ForkingDB).GetTreeKeyHeader(obj.address[:]), uint256.NewInt(chunknr)) } values[groupOffset] = chunks[i : i+32] @@ -544,7 +556,13 @@ func (s *StateDB) updateStateObject(obj *stateObject) { } if groupOffset == 255 || len(chunks)-i <= 32 { - if err := s.trie.(*trie.VerkleTrie).TryUpdateStem(key[:31], values); err != nil { + switch t := s.trie.(type) { + case *trie.VerkleTrie: + err = t.TryUpdateStem(key[:31], values) + case *trie.TransitionTrie: + err = t.TryUpdateStem(key[:31], values) + } + if err != nil { s.setError(fmt.Errorf("updateStateObject (%x) error: %w", addr[:], err)) } } diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go index ad2383103ab4..6e35156c40b6 100644 --- a/core/state/trie_prefetcher.go +++ b/core/state/trie_prefetcher.go @@ -300,7 +300,7 @@ func (sf *subfetcher) loop() { } sf.trie = trie } else { - trie, err := sf.db.OpenStorageTrie(sf.state, sf.owner, sf.root) + trie, err := sf.db.OpenStorageTrie(sf.state, sf.owner, sf.root, nil /* safe to set since there is no prefetcher for verkle */) if err != nil { log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err) return diff --git a/core/state_processor.go b/core/state_processor.go index 94045c742ce7..fb8bd8b88bc6 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -17,17 +17,28 @@ package core import ( + "bytes" + "encoding/binary" "fmt" "math/big" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/misc" + "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" + tutils "github.com/ethereum/go-ethereum/trie/utils" + "github.com/gballet/go-verkle" + "github.com/holiman/uint256" ) // StateProcessor is a basic Processor, which takes care of transitioning @@ -86,9 +97,146 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg receipts = append(receipts, receipt) allLogs = append(allLogs, receipt.Logs...) } + + // verkle transition: if the conversion process is in progress, move + // N values from the MPT into the verkle tree. + if fdb, ok := statedb.Database().(*state.ForkingDB); ok { + if fdb.InTransition() { + var ( + now = time.Now() + tt = statedb.GetTrie().(*trie.TransitionTrie) + mpt = tt.Base() + vkt = tt.Overlay() + ) + + accIt, err := statedb.Snaps().AccountIterator(mpt.Hash(), fdb.CurrentAccountHash) + if err != nil { + return nil, nil, 0, err + } + defer accIt.Release() + accIt.Next() + + const maxMovedCount = 10000 + // mkv will be assiting in the collection of up to maxMovedCount key values to be migrated to the VKT. + // It has internal caches to do efficient MPT->VKT key calculations, which will be discarded after + // this function. + mkv := &keyValueMigrator{vktLeafData: make(map[string]*verkle.BatchNewLeafNodeData)} + // move maxCount accounts into the verkle tree, starting with the + // slots from the previous account. + count := 0 + + // if less than maxCount slots were moved, move to the next account + for count < maxMovedCount { + fdb.CurrentAccountHash = accIt.Hash() + + acc, err := snapshot.FullAccount(accIt.Account()) + if err != nil { + log.Error("Invalid account encountered during traversal", "error", err) + return nil, nil, 0, err + } + addr := rawdb.ReadPreimage(statedb.Database().DiskDB(), accIt.Hash()) + if len(addr) == 0 { + panic(fmt.Sprintf("%x %x %v", addr, accIt.Hash(), acc)) + } + vkt.SetStorageRootConversion(addr, common.BytesToHash(acc.Root)) + + // Start with processing the storage, because once the account is + // converted, the `stateRoot` field loses its meaning. Which means + // that it opens the door to a situation in which the storage isn't + // converted, but it can not be found since the account was and so + // there is no way to find the MPT storage from the information found + // in the verkle account. + // Note that this issue can still occur if the account gets written + // to during normal block execution. A mitigation strategy has been + // introduced with the `*StorageRootConversion` fields in VerkleDB. + if acc.HasStorage() { + stIt, err := statedb.Snaps().StorageIterator(mpt.Hash(), accIt.Hash(), fdb.CurrentSlotHash) + if err != nil { + return nil, nil, 0, err + } + stIt.Next() + + // fdb.StorageProcessed will be initialized to `true` if the + // entire storage for an account was not entirely processed + // by the previous block. This is used as a signal to resume + // processing the storage for that account where we left off. + // If the entire storage was processed, then the iterator was + // created in vain, but it's ok as this will not happen often. + for ; !fdb.StorageProcessed && count < maxMovedCount; count++ { + var ( + value []byte // slot value after RLP decoding + safeValue [32]byte // 32-byte aligned value + ) + if err := rlp.DecodeBytes(stIt.Slot(), &value); err != nil { + return nil, nil, 0, fmt.Errorf("error decoding bytes %x: %w", stIt.Slot(), err) + } + copy(safeValue[32-len(value):], value) + slotnr := rawdb.ReadPreimage(statedb.Database().DiskDB(), stIt.Hash()) + + mkv.addStorageSlot(addr, slotnr, safeValue[:]) + + // advance the storage iterator + fdb.StorageProcessed = !stIt.Next() + if !fdb.StorageProcessed { + fdb.CurrentSlotHash = stIt.Hash() + } + } + stIt.Release() + } + + // If the maximum number of leaves hasn't been reached, then + // it means that the storage has finished processing (or none + // was available for this account) and that the account itself + // can be processed. + if count < maxMovedCount { + count++ // count increase for the account itself + + mkv.addAccount(addr, acc) + vkt.ClearStrorageRootConversion(addr) + + // Store the account code if present + if !bytes.Equal(acc.CodeHash, emptyCodeHash[:]) { + code := rawdb.ReadCode(statedb.Database().DiskDB(), common.BytesToHash(acc.CodeHash)) + chunks := trie.ChunkifyCode(code) + + mkv.addAccountCode(addr, uint64(len(code)), chunks) + } + + // reset storage iterator marker for next account + fdb.StorageProcessed = false + fdb.CurrentSlotHash = common.Hash{} + + // Move to the next account, if available - or end + // the transition otherwise. + if accIt.Next() { + fdb.CurrentAccountHash = accIt.Hash() + } else { + // case when the account iterator has + // reached the end but count < maxCount + fdb.EndTransition() + break + } + } + } + + log.Info("Collected and prepared key values from base tree", "count", count, "duration", time.Since(now), "last account", fdb.CurrentAccountHash) + + now = time.Now() + if err := mkv.migrateCollectedKeyValues(tt.Overlay()); err != nil { + return nil, nil, 0, fmt.Errorf("could not migrate key values: %w", err) + } + log.Info("Inserted key values in overlay tree", "count", count, "duration", time.Since(now)) + } + } + // Finalize the block, applying any consensus engine specific extras (e.g. block rewards) p.engine.Finalize(p.bc, header, statedb, block.Transactions(), block.Uncles()) + if block.NumberU64()%100 == 0 { + stateRoot := statedb.GetTrie().Hash() + log.Info("State root", "number", block.NumberU64(), "hash", stateRoot) + } + return receipts, allLogs, *usedGas, nil } @@ -158,3 +306,117 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, config, cfg) return applyTransaction(msg, config, author, gp, statedb, header.Number, header.Hash(), tx, usedGas, vmenv) } + +// keyValueMigrator is a helper struct that collects key-values from the base tree. +// The walk is done in account order, so **we assume** the APIs hold this invariant. This is +// useful to be smart about caching banderwagon.Points to make VKT key calculations faster. +type keyValueMigrator struct { + currAddr []byte + currAddrPoint *verkle.Point + + vktLeafData map[string]*verkle.BatchNewLeafNodeData +} + +func (kvm *keyValueMigrator) addStorageSlot(addr []byte, slotNumber []byte, slotValue []byte) { + addrPoint := kvm.getAddrPoint(addr) + + vktKey := tutils.GetTreeKeyStorageSlotWithEvaluatedAddress(addrPoint, slotNumber) + leafNodeData := kvm.getOrInitLeafNodeData(vktKey) + + leafNodeData.Values[vktKey[verkle.StemSize]] = slotValue +} + +func (kvm *keyValueMigrator) addAccount(addr []byte, acc snapshot.Account) { + addrPoint := kvm.getAddrPoint(addr) + + vktKey := tutils.GetTreeKeyVersionWithEvaluatedAddress(addrPoint) + leafNodeData := kvm.getOrInitLeafNodeData(vktKey) + + var version [verkle.LeafValueSize]byte + leafNodeData.Values[tutils.VersionLeafKey] = version[:] + + var balance [verkle.LeafValueSize]byte + for i, b := range acc.Balance.Bytes() { + balance[len(acc.Balance.Bytes())-1-i] = b + } + leafNodeData.Values[tutils.BalanceLeafKey] = balance[:] + + var nonce [verkle.LeafValueSize]byte + binary.LittleEndian.PutUint64(nonce[:8], acc.Nonce) + leafNodeData.Values[tutils.NonceLeafKey] = nonce[:] + + leafNodeData.Values[tutils.CodeKeccakLeafKey] = acc.CodeHash[:] + + // Code size is ignored here. If this isn't an EOA, the tree-walk will call + // addAccountCode with this information. +} + +func (kvm *keyValueMigrator) addAccountCode(addr []byte, codeSize uint64, chunks []byte) { + addrPoint := kvm.getAddrPoint(addr) + + vktKey := tutils.GetTreeKeyVersionWithEvaluatedAddress(addrPoint) + leafNodeData := kvm.getOrInitLeafNodeData(vktKey) + + // Save the code size. + var codeSizeBytes [verkle.LeafValueSize]byte + binary.LittleEndian.PutUint64(codeSizeBytes[:8], codeSize) + leafNodeData.Values[tutils.CodeSizeLeafKey] = codeSizeBytes[:] + + // The first 128 chunks are stored in the account header leaf. + for i := 0; i < 128 && i < len(chunks)/32; i++ { + leafNodeData.Values[byte(128+i)] = chunks[32*i : 32*(i+1)] + } + + // Potential further chunks, have their own leaf nodes. + for i := 128; i < len(chunks)/32; { + vktKey := tutils.GetTreeKeyCodeChunkWithEvaluatedAddress(addrPoint, uint256.NewInt(uint64(i))) + leafNodeData := kvm.getOrInitLeafNodeData(vktKey) + + j := i + for ; (j-i) < 256 && j < len(chunks)/32; j++ { + leafNodeData.Values[byte((j-128)%256)] = chunks[32*j : 32*(j+1)] + } + i = j + } +} + +func (kvm *keyValueMigrator) getAddrPoint(addr []byte) *verkle.Point { + if bytes.Equal(addr, kvm.currAddr) { + return kvm.currAddrPoint + } + kvm.currAddr = addr + kvm.currAddrPoint = tutils.EvaluateAddressPoint(addr) + return kvm.currAddrPoint +} + +func (kvm *keyValueMigrator) getOrInitLeafNodeData(stem []byte) *verkle.BatchNewLeafNodeData { + stemStr := string(stem) + if _, ok := kvm.vktLeafData[stemStr]; !ok { + kvm.vktLeafData[stemStr] = &verkle.BatchNewLeafNodeData{ + Stem: stem[:verkle.StemSize], + Values: make(map[byte][]byte), + } + } + return kvm.vktLeafData[stemStr] +} + +func (kvm *keyValueMigrator) migrateCollectedKeyValues(tree *trie.VerkleTrie) error { + // Transform the map into a slice. + nodeValues := make([]verkle.BatchNewLeafNodeData, 0, len(kvm.vktLeafData)) + for _, vld := range kvm.vktLeafData { + nodeValues = append(nodeValues, *vld) + } + + // Create all leaves in batch mode so we can optimize cryptography operations. + newLeaves, err := verkle.BatchNewLeafNode(nodeValues) + if err != nil { + return fmt.Errorf("failed to batch-create new leaf nodes") + } + + // Insert into the tree. + if err := tree.InsertMigratedLeaves(newLeaves); err != nil { + return fmt.Errorf("failed to insert migrated leaves: %w", err) + } + + return nil +} diff --git a/core/state_processor_test.go b/core/state_processor_test.go index 2d90fcea1caa..bbf9c26a6132 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -430,6 +430,7 @@ func TestProcessVerkle(t *testing.T) { if err != nil { t.Fatal(err) } + t.Log("verfied verkle proof") _, err = blockchain.InsertChain(chain) if err != nil { diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index 2fd924bd87ef..0112723f7d08 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -23,6 +23,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/utils" + "github.com/gballet/go-verkle" "github.com/holiman/uint256" ) @@ -177,15 +178,15 @@ func (in *EVMInterpreter) Run(contract *Contract, input []byte, readOnly bool) ( if in.evm.ChainConfig().IsCancun(in.evm.Context.BlockNumber) && !contract.IsDeployment { contract.Chunks = trie.ChunkifyCode(contract.Code) - totalEvals := len(contract.Code) / 31 / 256 - if len(contract.Code)%(256*31) != 0 { - totalEvals += 1 - } + // number of extra stems to evaluate after the header stem + extraEvals := (len(contract.Chunks) + 127) / verkle.NodeWidth - chunkEvals = make([][]byte, totalEvals) - for i := 0; i < totalEvals; i++ { + chunkEvals = make([][]byte, extraEvals+1) + for i := 1; i < extraEvals+1; i++ { chunkEvals[i] = utils.GetTreeKeyCodeChunkWithEvaluatedAddress(contract.AddressPoint(), uint256.NewInt(uint64(i)*256)) } + // Header account is already known, it's the header account + chunkEvals[0] = utils.GetTreeKeyVersionWithEvaluatedAddress(contract.AddressPoint()) } // The Interpreter main run loop (contextual). This loop runs until either an diff --git a/go.mod b/go.mod index 94667644e201..4dcd3342b7ee 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/cespare/cp v0.1.0 github.com/cloudflare/cloudflare-go v0.14.0 github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f - github.com/crate-crypto/go-ipa v0.0.0-20230410135559-ce4a96995014 + github.com/crate-crypto/go-ipa v0.0.0-20230710183535-d5eb1c4661bd github.com/davecgh/go-spew v1.1.1 github.com/deckarep/golang-set v1.8.0 github.com/docker/docker v1.6.2 @@ -23,7 +23,7 @@ require ( github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff - github.com/gballet/go-verkle v0.0.0-20230414192453-2838510d5ee0 + github.com/gballet/go-verkle v0.0.0-20230725193842-b2d852dc666b github.com/go-stack/stack v1.8.0 github.com/golang-jwt/jwt/v4 v4.3.0 github.com/golang/protobuf v1.5.2 @@ -60,8 +60,8 @@ require ( github.com/urfave/cli/v2 v2.10.2 golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 - golang.org/x/sync v0.1.0 - golang.org/x/sys v0.7.0 + golang.org/x/sync v0.3.0 + golang.org/x/sys v0.10.0 golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 golang.org/x/text v0.3.7 golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba diff --git a/go.sum b/go.sum index f7ff7905464d..74475b03b37b 100644 --- a/go.sum +++ b/go.sum @@ -86,10 +86,8 @@ github.com/consensys/gnark-crypto v0.4.1-0.20210426202927-39ac3d4b3f1f/go.mod h1 github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/crate-crypto/go-ipa v0.0.0-20230315201338-1643fdc2ead8 h1:2EBbIwPDRqlCD2K34Eojyy0x9d3RhOuHAZfbQm508X8= -github.com/crate-crypto/go-ipa v0.0.0-20230315201338-1643fdc2ead8/go.mod h1:gzbVz57IDJgQ9rLQwfSk696JGWof8ftznEL9GoAv3NI= -github.com/crate-crypto/go-ipa v0.0.0-20230410135559-ce4a96995014 h1:bbyTlFQ12wkFA6aVL+9HrBZwVl85AN0VS/Bwam7o93U= -github.com/crate-crypto/go-ipa v0.0.0-20230410135559-ce4a96995014/go.mod h1:gzbVz57IDJgQ9rLQwfSk696JGWof8ftznEL9GoAv3NI= +github.com/crate-crypto/go-ipa v0.0.0-20230710183535-d5eb1c4661bd h1:jgf65Q4+jHFuLlhVApaVfTUwcU7dAdXK+GESow2UlaI= +github.com/crate-crypto/go-ipa v0.0.0-20230710183535-d5eb1c4661bd/go.mod h1:gzbVz57IDJgQ9rLQwfSk696JGWof8ftznEL9GoAv3NI= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= @@ -137,16 +135,12 @@ github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 h1:IZqZOB2fydHte3kUgx github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILDlzrGEckF6HKjXe48EgsY/l7K7vhY4MW8= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= -github.com/gballet/go-verkle v0.0.0-20230317174103-141354da6b11 h1:x4hiQFgr1SlqR4IoAZiXLFZK4L7KbibqkORqa1fwKp8= -github.com/gballet/go-verkle v0.0.0-20230317174103-141354da6b11/go.mod h1:IyOnn1kujMWaT+wet/6Ix1BtvYwateOBy9puuWH/8sw= -github.com/gballet/go-verkle v0.0.0-20230412090410-4015adc3d072 h1:gKcktHMBKLdtCSZnaG8tv9bFG80p1tp7MjU1Uvl9nag= -github.com/gballet/go-verkle v0.0.0-20230412090410-4015adc3d072/go.mod h1:P3bwGrLhsUNIsUDlq2yzMPvO1c/15oiB3JS85P+hNfw= -github.com/gballet/go-verkle v0.0.0-20230413104310-bd8d6d33de95 h1:s8p8L/dQVmr/mgMjGIsGnnpvJMYCdfv4GHadLd/ALug= -github.com/gballet/go-verkle v0.0.0-20230413104310-bd8d6d33de95/go.mod h1:P3bwGrLhsUNIsUDlq2yzMPvO1c/15oiB3JS85P+hNfw= -github.com/gballet/go-verkle v0.0.0-20230413135631-4bea2763ed0f h1:gP4uR2/1qx6hsIzbRI28JWcsVuP7xyjyj6SpLnoFobc= -github.com/gballet/go-verkle v0.0.0-20230413135631-4bea2763ed0f/go.mod h1:P3bwGrLhsUNIsUDlq2yzMPvO1c/15oiB3JS85P+hNfw= -github.com/gballet/go-verkle v0.0.0-20230414192453-2838510d5ee0 h1:ENyj6hcn+dtO8iJ1GTzM/gkhdrAFqMi65Yf99cppdPA= -github.com/gballet/go-verkle v0.0.0-20230414192453-2838510d5ee0/go.mod h1:P3bwGrLhsUNIsUDlq2yzMPvO1c/15oiB3JS85P+hNfw= +github.com/gballet/go-verkle v0.0.0-20230711114830-89d284b3f456 h1:ZZd48ay16TgKE1sl6pxrBCoygvsuvUc+7EU8UNZ3DJM= +github.com/gballet/go-verkle v0.0.0-20230711114830-89d284b3f456/go.mod h1:+k9fzNguudDonU5q4/TUaTdmiHw3h3oGOIVmqyhaA3E= +github.com/gballet/go-verkle v0.0.0-20230711131047-e8712ad59b6a h1:L+mkoO+l8Wo26XUJ+fL8t9J02SyyaDHp52djuEOmu1A= +github.com/gballet/go-verkle v0.0.0-20230711131047-e8712ad59b6a/go.mod h1:+k9fzNguudDonU5q4/TUaTdmiHw3h3oGOIVmqyhaA3E= +github.com/gballet/go-verkle v0.0.0-20230725193842-b2d852dc666b h1:2lDzSxjCii8FxrbuxtlFtFiw6c4nTPl9mhaZ6lgpwws= +github.com/gballet/go-verkle v0.0.0-20230725193842-b2d852dc666b/go.mod h1:+k9fzNguudDonU5q4/TUaTdmiHw3h3oGOIVmqyhaA3E= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -521,8 +515,9 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -564,10 +559,8 @@ golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= diff --git a/les/server_requests.go b/les/server_requests.go index b0eb2371e028..a1402a339f94 100644 --- a/les/server_requests.go +++ b/les/server_requests.go @@ -428,7 +428,7 @@ func handleGetProofs(msg Decoder) (serveRequestFn, uint64, uint64, error) { p.bumpInvalid() continue } - trie, err = statedb.OpenStorageTrie(root, common.BytesToHash(request.AccKey), account.Root) + trie, err = statedb.OpenStorageTrie(root, common.BytesToHash(request.AccKey), account.Root, nil) if trie == nil || err != nil { p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "root", account.Root, "err", err) continue diff --git a/light/odr_test.go b/light/odr_test.go index 903c7f6f90a6..0b747e66c1b7 100644 --- a/light/odr_test.go +++ b/light/odr_test.go @@ -87,7 +87,7 @@ func (odr *testOdr) Retrieve(ctx context.Context, req OdrRequest) error { t state.Trie ) if len(req.Id.AccKey) > 0 { - t, err = odr.serverState.OpenStorageTrie(req.Id.StateRoot, common.BytesToHash(req.Id.AccKey), req.Id.Root) + t, err = odr.serverState.OpenStorageTrie(req.Id.StateRoot, common.BytesToHash(req.Id.AccKey), req.Id.Root, nil) } else { t, err = odr.serverState.OpenTrie(req.Id.Root) } diff --git a/light/trie.go b/light/trie.go index 83c9f6910661..721d77dcbaae 100644 --- a/light/trie.go +++ b/light/trie.go @@ -54,7 +54,7 @@ func (db *odrDatabase) OpenTrie(root common.Hash) (state.Trie, error) { return &odrTrie{db: db, id: db.id}, nil } -func (db *odrDatabase) OpenStorageTrie(state, addrHash, root common.Hash) (state.Trie, error) { +func (db *odrDatabase) OpenStorageTrie(state, addrHash, root common.Hash, _ state.Trie) (state.Trie, error) { return &odrTrie{db: db, id: StorageTrieID(db.id, addrHash, root)}, nil } diff --git a/params/protocol_params.go b/params/protocol_params.go index 70cd72a0916f..884fdd9f25d5 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -159,11 +159,11 @@ const ( RefundQuotientEIP3529 uint64 = 5 // Verkle tree EIP: costs associated to witness accesses - WitnessBranchReadCost = uint64(1900) - WitnessChunkReadCost = uint64(200) - WitnessBranchWriteCost = uint64(3000) - WitnessChunkWriteCost = uint64(500) - WitnessChunkFillCost = uint64(6200) + WitnessBranchReadCost = uint64(0) + WitnessChunkReadCost = uint64(0) + WitnessBranchWriteCost = uint64(0) + WitnessChunkWriteCost = uint64(0) + WitnessChunkFillCost = uint64(0) ) // Gas discount table for BLS12-381 G1 and G2 multi exponentiation operations diff --git a/trie/database.go b/trie/database.go index d79296aa4daa..70d9ae6f299e 100644 --- a/trie/database.go +++ b/trie/database.go @@ -87,9 +87,47 @@ type Database struct { childrenSize common.StorageSize // Storage size of the external children tracking preimages *preimageStore // The store for caching preimages + // XXX after the shapella rebase, use common.Address as a key type + addrToRoot map[string]common.Hash + addrToRootLock sync.RWMutex + lock sync.RWMutex } +func (db *Database) HasStorageRootConversion(key []byte) bool { + db.addrToRootLock.RLock() + defer db.addrToRootLock.RUnlock() + if db.addrToRoot == nil { + return false + } + _, ok := db.addrToRoot[string(key)] + return ok +} + +func (db *Database) SetStorageRootConversion(addr []byte, root common.Hash) { + db.addrToRootLock.Lock() + defer db.addrToRootLock.Unlock() + if db.addrToRoot == nil { + db.addrToRoot = make(map[string]common.Hash) + } + db.addrToRoot[string(addr)] = root +} + +func (db *Database) StorageRootConversion(addr []byte) common.Hash { + db.addrToRootLock.RLock() + defer db.addrToRootLock.RUnlock() + if db.addrToRoot == nil { + return common.Hash{} + } + return db.addrToRoot[string(addr)] +} + +func (db *Database) ClearStorageRootConversion(addr []byte) { + db.addrToRootLock.Lock() + defer db.addrToRootLock.Unlock() + delete(db.addrToRoot, string(addr)) +} + // rawNode is a simple binary blob used to differentiate between collapsed trie // nodes and already encoded RLP binary blobs (while at the same time store them // in the same cache fields). diff --git a/trie/transition.go b/trie/transition.go new file mode 100644 index 000000000000..d68c12e8830e --- /dev/null +++ b/trie/transition.go @@ -0,0 +1,199 @@ +// Copyright 2021 go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/rlp" + "github.com/gballet/go-verkle" +) + +type TransitionTrie struct { + overlay *VerkleTrie + base *SecureTrie + storage bool +} + +func NewTransitionTree(base *SecureTrie, overlay *VerkleTrie, st bool) *TransitionTrie { + return &TransitionTrie{ + overlay: overlay, + base: base, + storage: st, + } +} + +func (t *TransitionTrie) Base() *SecureTrie { + return t.base +} + +// TODO(gballet/jsign): consider removing this API. +func (t *TransitionTrie) Overlay() *VerkleTrie { + return t.overlay +} + +// GetKey returns the sha3 preimage of a hashed key that was previously used +// to store a value. +// +// TODO(fjl): remove this when StateTrie is removed +func (t *TransitionTrie) GetKey(key []byte) []byte { + if key := t.overlay.GetKey(key); key != nil { + return key + } + return t.base.GetKey(key) +} + +// TryGet returns the value for key stored in the trie. The value bytes must +// not be modified by the caller. If a node was not found in the database, a +// trie.MissingNodeError is returned. +func (t *TransitionTrie) TryGet(addr, key []byte) ([]byte, error) { + if val, err := t.overlay.TryGet(addr, key); len(val) != 0 || err != nil { + return val, nil + } + // TODO also insert value into overlay + rlpval, err := t.base.TryGet(nil, key) + if err != nil { + return nil, err + } + if len(rlpval) == 0 { + return nil, nil + } + // the value will come as RLP, decode it so that the + // interface is consistent. + _, content, _, err := rlp.Split(rlpval) + if err != nil || len(content) == 0 { + return nil, err + } + var v [32]byte + copy(v[32-len(content):], content) + return v[:], nil +} + +// TryGetAccount abstract an account read from the trie. +func (t *TransitionTrie) TryGetAccount(key []byte) (*types.StateAccount, error) { + data, err := t.overlay.TryGetAccount(key) + if err != nil { + // WORKAROUND, see the definition of errDeletedAccount + // for an explainer of why this if is needed. + if err == errDeletedAccount { + return nil, nil + } + return nil, err + } + if data != nil { + if t.overlay.db.HasStorageRootConversion(key) { + data.Root = t.overlay.db.StorageRootConversion(key) + } + return data, nil + } + // TODO also insert value into overlay + return t.base.TryGetAccount(key) +} + +// TryUpdate associates key with value in the trie. If value has length zero, any +// existing value is deleted from the trie. The value bytes must not be modified +// by the caller while they are stored in the trie. If a node was not found in the +// database, a trie.MissingNodeError is returned. +func (t *TransitionTrie) TryUpdate(address, key []byte, value []byte) error { + return t.overlay.TryUpdate(address, key, value) +} + +// TryUpdateAccount abstract an account write to the trie. +func (t *TransitionTrie) TryUpdateAccount(key []byte, account *types.StateAccount) error { + if account.Root != (common.Hash{}) && account.Root != emptyRoot { + t.overlay.db.SetStorageRootConversion(key, account.Root) + } + return t.overlay.TryUpdateAccount(key, account) +} + +// TryDelete removes any existing value for key from the trie. If a node was not +// found in the database, a trie.MissingNodeError is returned. +func (t *TransitionTrie) TryDelete(addr, key []byte) error { + return t.overlay.TryDelete(addr, key) +} + +// TryDeleteAccount abstracts an account deletion from the trie. +func (t *TransitionTrie) TryDeleteAccount(key []byte) error { + return t.overlay.TryDeleteAccount(key) +} + +// Hash returns the root hash of the trie. It does not write to the database and +// can be used even if the trie doesn't have one. +func (t *TransitionTrie) Hash() common.Hash { + return t.overlay.Hash() +} + +// Commit collects all dirty nodes in the trie and replace them with the +// corresponding node hash. All collected nodes(including dirty leaves if +// collectLeaf is true) will be encapsulated into a nodeset for return. +// The returned nodeset can be nil if the trie is clean(nothing to commit). +// Once the trie is committed, it's not usable anymore. A new trie must +// be created with new root and updated trie database for following usage +func (t *TransitionTrie) Commit(collectLeaf bool) (common.Hash, *NodeSet, error) { + // Just return if the trie is a storage trie: otherwise, + // the overlay trie will be committed as many times as + // there are storage tries. This would kill performance. + if t.storage { + return common.Hash{}, nil, nil + } + return t.overlay.Commit(collectLeaf) +} + +// NodeIterator returns an iterator that returns nodes of the trie. Iteration +// starts at the key after the given start key. +func (t *TransitionTrie) NodeIterator(startKey []byte) NodeIterator { + panic("not implemented") // TODO: Implement +} + +// Prove constructs a Merkle proof for key. The result contains all encoded nodes +// on the path to the value at key. The value itself is also included in the last +// node and can be retrieved by verifying the proof. +// +// If the trie does not contain a value for key, the returned proof contains all +// nodes of the longest existing prefix of the key (at least the root), ending +// with the node that proves the absence of the key. +func (t *TransitionTrie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error { + panic("not implemented") // TODO: Implement +} + +// IsVerkle returns true if the trie is verkle-tree based +func (t *TransitionTrie) IsVerkle() bool { + // For all intents and purposes, the calling code should treat this as a verkle trie + return true +} + +func (t *TransitionTrie) TryUpdateStem(key []byte, values [][]byte) error { + trie := t.overlay + resolver := func(h []byte) ([]byte, error) { + return trie.db.diskdb.Get(append([]byte("flat-"), h...)) + } + switch root := trie.root.(type) { + case *verkle.InternalNode: + return root.InsertStem(key, values, resolver) + default: + panic("invalid root type") + } +} + +func (t *TransitionTrie) Copy() *TransitionTrie { + return &TransitionTrie{ + overlay: t.overlay.Copy(), + base: t.base.Copy(), + storage: t.storage, + } +} diff --git a/trie/verkle.go b/trie/verkle.go index 295cf3cf949d..0b305fba1080 100644 --- a/trie/verkle.go +++ b/trie/verkle.go @@ -50,6 +50,12 @@ func NewVerkleTrie(root verkle.VerkleNode, db *Database, pointCache *utils.Point } } +func (trie *VerkleTrie) InsertMigratedLeaves(leaves []verkle.LeafNode) error { + return trie.root.(*verkle.InternalNode).InsertMigratedLeaves(leaves, func(path []byte) ([]byte, error) { + return trie.db.diskdb.Get(append([]byte("flat-"), path...)) + }) +} + var errInvalidProof = errors.New("invalid proof") // GetKey returns the sha3 preimage of a hashed key that was previously used @@ -62,22 +68,37 @@ func (trie *VerkleTrie) GetKey(key []byte) []byte { // not be modified by the caller. If a node was not found in the database, a // trie.MissingNodeError is returned. func (trie *VerkleTrie) TryGet(addr, key []byte) ([]byte, error) { - pointEval := trie.pointCache.GetTreeKeyHeader(key) + resolver := func(path []byte) ([]byte, error) { + return trie.db.diskdb.Get(append([]byte("flat-"), path...)) + } + pointEval := trie.pointCache.GetTreeKeyHeader(addr) k := utils.GetTreeKeyStorageSlotWithEvaluatedAddress(pointEval, key) - return trie.root.Get(k, trie.db.diskdb.Get) + return trie.root.Get(k, resolver) } // GetWithHashedKey returns the value, assuming that the key has already // been hashed. func (trie *VerkleTrie) GetWithHashedKey(key []byte) ([]byte, error) { - return trie.root.Get(key, trie.db.diskdb.Get) + resolver := func(path []byte) ([]byte, error) { + return trie.db.diskdb.Get(append([]byte("flat-"), path...)) + } + return trie.root.Get(key, resolver) } +// WORKAROUND: this special error is returned if it has been +// detected that the account was deleted in the verkle tree. +// This is needed in case an account was translated while it +// was in the MPT, and was selfdestructed in verkle mode. +// +// This is only a problem for replays, and this code is not +// needed after SELFDESTRUCT has been removed. +var errDeletedAccount = errors.New("account deleted in VKT") + func (t *VerkleTrie) TryGetAccount(key []byte) (*types.StateAccount, error) { var ( acc *types.StateAccount = &types.StateAccount{} - resolver = func(hash []byte) ([]byte, error) { - return t.db.diskdb.Get(hash) + resolver = func(path []byte) ([]byte, error) { + return t.db.diskdb.Get(append([]byte("flat-"), path...)) } ) versionkey := t.pointCache.GetTreeKeyVersionCached(key) @@ -92,11 +113,16 @@ func (t *VerkleTrie) TryGetAccount(key []byte) (*types.StateAccount, error) { if len(values[utils.NonceLeafKey]) > 0 { acc.Nonce = binary.LittleEndian.Uint64(values[utils.NonceLeafKey]) } - balance := values[utils.BalanceLeafKey] - if len(balance) > 0 { - for i := 0; i < len(balance)/2; i++ { - balance[len(balance)-i-1], balance[i] = balance[i], balance[len(balance)-i-1] - } + // if the account has been deleted, then values[10] will be 0 and not nil. If it has + // been recreated after that, then its code keccak will NOT be 0. So return `nil` if + // the nonce, and values[10], and code keccak is 0. + if acc.Nonce == 0 && len(values) > 10 && len(values[10]) > 0 && bytes.Equal(values[utils.CodeKeccakLeafKey], zero[:]) { + return nil, errDeletedAccount + } + var balance [32]byte + copy(balance[:], values[utils.BalanceLeafKey]) + for i := 0; i < len(balance)/2; i++ { + balance[len(balance)-i-1], balance[i] = balance[i], balance[len(balance)-i-1] } acc.Balance = new(big.Int).SetBytes(balance[:]) acc.CodeHash = values[utils.CodeKeccakLeafKey] @@ -129,15 +155,13 @@ func (t *VerkleTrie) TryUpdateAccount(key []byte, acc *types.StateAccount) error } } - flusher := func(hash []byte) ([]byte, error) { - return t.db.diskdb.Get(hash) + resolver := func(path []byte) ([]byte, error) { + return t.db.diskdb.Get(append([]byte("flat-"), path...)) } switch root := t.root.(type) { case *verkle.InternalNode: - err = root.InsertStem(stem, values, flusher) - case *verkle.StatelessNode: - err = root.InsertAtStem(stem, values, flusher, true) + err = root.InsertStem(stem, values, resolver) } if err != nil { return fmt.Errorf("TryUpdateAccount (%x) error: %v", key, err) @@ -148,14 +172,12 @@ func (t *VerkleTrie) TryUpdateAccount(key []byte, acc *types.StateAccount) error } func (trie *VerkleTrie) TryUpdateStem(key []byte, values [][]byte) error { - resolver := func(h []byte) ([]byte, error) { - return trie.db.diskdb.Get(h) + resolver := func(path []byte) ([]byte, error) { + return trie.db.diskdb.Get(append([]byte("flat-"), path...)) } switch root := trie.root.(type) { case *verkle.InternalNode: return root.InsertStem(key, values, resolver) - case *verkle.StatelessNode: - return root.InsertAtStem(key, values, resolver, true) default: panic("invalid root type") } @@ -169,8 +191,8 @@ func (trie *VerkleTrie) TryUpdate(address, key, value []byte) error { k := utils.GetTreeKeyStorageSlotWithEvaluatedAddress(trie.pointCache.GetTreeKeyHeader(address), key) var v [32]byte copy(v[:], value[:]) - return trie.root.Insert(k, v[:], func(h []byte) ([]byte, error) { - return trie.db.diskdb.Get(h) + return trie.root.Insert(k, v[:], func(path []byte) ([]byte, error) { + return trie.db.diskdb.Get(append([]byte("flat-"), path...)) }) } @@ -185,15 +207,13 @@ func (t *VerkleTrie) TryDeleteAccount(key []byte) error { values[i] = zero[:] } - resolver := func(hash []byte) ([]byte, error) { - return t.db.diskdb.Get(hash) + resolver := func(path []byte) ([]byte, error) { + return t.db.diskdb.Get(append([]byte("flat-"), path...)) } switch root := t.root.(type) { case *verkle.InternalNode: err = root.InsertStem(stem, values, resolver) - case *verkle.StatelessNode: - err = root.InsertAtStem(stem, values, resolver, true) } if err != nil { return fmt.Errorf("TryDeleteAccount (%x) error: %v", key, err) @@ -206,10 +226,11 @@ func (t *VerkleTrie) TryDeleteAccount(key []byte) error { // TryDelete removes any existing value for key from the trie. If a node was not // found in the database, a trie.MissingNodeError is returned. func (trie *VerkleTrie) TryDelete(addr, key []byte) error { - pointEval := trie.pointCache.GetTreeKeyHeader(key) + pointEval := trie.pointCache.GetTreeKeyHeader(addr) k := utils.GetTreeKeyStorageSlotWithEvaluatedAddress(pointEval, key) - return trie.root.Delete(k, func(h []byte) ([]byte, error) { - return trie.db.diskdb.Get(h) + var zero [32]byte + return trie.root.Insert(k, zero[:], func(path []byte) ([]byte, error) { + return trie.db.diskdb.Get(append([]byte("flat-"), path...)) }) } @@ -236,13 +257,27 @@ func (trie *VerkleTrie) Commit(_ bool) (common.Hash, *NodeSet, error) { return common.Hash{}, nil, fmt.Errorf("serializing tree nodes: %s", err) } + batch := trie.db.diskdb.NewBatch() + const keyPrefix = "flat-" + path := make([]byte, 0, len(keyPrefix)+32) + path = append(path, []byte(keyPrefix)...) for _, node := range nodes { - if err := trie.db.diskdb.Put(node.CommitmentBytes[:], node.SerializedBytes); err != nil { + path := append(path[:len(keyPrefix)], node.Path...) + + if err := batch.Put(path, node.SerializedBytes); err != nil { return common.Hash{}, nil, fmt.Errorf("put node to disk: %s", err) } + + if batch.ValueSize() >= ethdb.IdealBatchSize { + batch.Write() + batch.Reset() + } } + batch.Write() - return nodes[0].CommitmentBytes, NewNodeSet(common.Hash{}), nil + // Serialize root commitment form + rootH := root.Hash().BytesLE() + return common.BytesToHash(rootH[:]), nil, nil } // NodeIterator returns an iterator that returns nodes of the trie. Iteration @@ -262,10 +297,10 @@ func (trie *VerkleTrie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValue panic("not implemented") } -func (trie *VerkleTrie) Copy(db *Database) *VerkleTrie { +func (trie *VerkleTrie) Copy() *VerkleTrie { return &VerkleTrie{ root: trie.root.Copy(), - db: db, + db: trie.db, } } @@ -274,7 +309,7 @@ func (trie *VerkleTrie) IsVerkle() bool { } func (trie *VerkleTrie) ProveAndSerialize(keys [][]byte, kv map[string][]byte) (*verkle.VerkleProof, verkle.StateDiff, error) { - proof, _, _, _, err := verkle.MakeVerkleMultiProof(trie.root, keys, kv) + proof, _, _, _, err := verkle.MakeVerkleMultiProof(trie.root, keys) if err != nil { return nil, nil, err } @@ -350,9 +385,11 @@ func deserializeVerkleProof(vp *verkle.VerkleProof, rootC *verkle.Point, statedi } } - pe, _, _ := tree.GetProofItems(proof.Keys) + // no need to resolve as the tree has been reconstructed from the proof + // and must not contain any unresolved nodes. + pe, _, _, err := tree.GetProofItems(proof.Keys) - return proof, pe.Cis, pe.Zis, pe.Yis, nil + return proof, pe.Cis, pe.Zis, pe.Yis, err } // ChunkedCode represents a sequence of 32-bytes chunks of code (31 bytes of which @@ -420,3 +457,11 @@ func ChunkifyCode(code []byte) ChunkedCode { return chunks } + +func (t *VerkleTrie) SetStorageRootConversion(key []byte, root common.Hash) { + t.db.SetStorageRootConversion(key, root) +} + +func (t *VerkleTrie) ClearStrorageRootConversion(addr []byte) { + t.db.ClearStorageRootConversion(addr) +} diff --git a/trie/verkle_iterator.go b/trie/verkle_iterator.go index 9c57e5da1956..fef5b0242b11 100644 --- a/trie/verkle_iterator.go +++ b/trie/verkle_iterator.go @@ -104,7 +104,7 @@ func (it *verkleNodeIterator) Next(descend bool) bool { if err != nil { panic(err) } - it.current, err = verkle.ParseNode(data, byte(len(it.stack)-1), nodeToDBKey(node)) + it.current, err = verkle.ParseNode(data, byte(len(it.stack)-1)) if err != nil { panic(err) }