From 94e8cc200e92179156c730facc03fe07ed946eb9 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 31 Oct 2024 09:51:32 -0500 Subject: [PATCH 01/52] Combine domain payloads and provide OTF migration This commit: 1. Combines all domain (non-atree) payloads into one account (non-atree) payload per account. 2. Combines all domain (atree) storage maps into one account (atree) storage map per account. 3. Uses on-the-fly (OTF) migration when an account is modified (write ops). Currently, accounts store data on-chain under pre-defined domains, such as "storage". Each domain requires domain payload (8-byte non-atree payload) and domain storage map payload (atree payload). Also, each payload requires ~2 mtrie nodes (~2x96 byte overhead). New domains were added in Cadence 1.0 and domain payloads count increased to 150 million on Sept. 4 (was 80 million pre-spork). Nearly 25% of total payloads on-chain are 8-byte domain payloads. Each account on mainnet has an average of ~4 domain payloads and ~4 domain storage maps. This commit creates 1 account (non-atree) payload and 1 account (atree) storage map per account, eliminating all domain (non-atree) payloads and domain storage maps for that given account. Based on preliminary estimates using Sept. 17, 2024 mainnet state, this approach can: - eliminate mtrie nodes: -425 million (-28.5%) - reduce payload count: -174 million (also -28.5%) This commit also includes on-the-fly migration so we can see improvements to accounts that have write activity without requiring downtime. Given this, we won't see the full benefits/impact until all accounts (including idle accounts) are eventually migrated (e.g. using full migration or other means). --- cmd/decode-state-values/main.go | 2 +- go.mod | 4 +- go.sum | 10 +- interpreter/account_storagemap.go | 341 ++++ interpreter/account_storagemap_test.go | 766 ++++++++ interpreter/interpreter.go | 12 +- interpreter/interpreter_tracing.go | 21 +- interpreter/misc_test.go | 2 +- interpreter/storage.go | 9 +- interpreter/storage_test.go | 2 +- interpreter/storagemap.go | 170 +- interpreter/storagemap_test.go | 711 ++++++++ interpreter/stringatreevalue_test.go | 13 +- interpreter/value_test.go | 2 +- runtime/capabilitycontrollers_test.go | 6 + runtime/contract_test.go | 2 +- runtime/environment.go | 1 + runtime/ft_test.go | 2 + runtime/migrate_domain_registers.go | 187 ++ runtime/migrate_domain_registers_test.go | 594 ++++++ runtime/runtime_memory_metering_test.go | 12 +- runtime/runtime_test.go | 45 +- runtime/sharedstate_test.go | 49 + runtime/storage.go | 483 ++++- runtime/storage_test.go | 1825 ++++++++++++++++++- stdlib/account.go | 6 + test_utils/interpreter_utils/interpreter.go | 16 +- test_utils/runtime_utils/storage.go | 42 + test_utils/runtime_utils/testledger.go | 32 +- 29 files changed, 5200 insertions(+), 167 deletions(-) create mode 100644 interpreter/account_storagemap.go create mode 100644 interpreter/account_storagemap_test.go create mode 100644 interpreter/storagemap_test.go create mode 100644 runtime/migrate_domain_registers.go create mode 100644 runtime/migrate_domain_registers_test.go create mode 100644 test_utils/runtime_utils/storage.go diff --git a/cmd/decode-state-values/main.go b/cmd/decode-state-values/main.go index 70ecd7e7b4..b6f781258f 100644 --- a/cmd/decode-state-values/main.go +++ b/cmd/decode-state-values/main.go @@ -234,7 +234,7 @@ type interpreterStorage struct { var _ interpreter.Storage = &interpreterStorage{} -func (i interpreterStorage) GetStorageMap(_ common.Address, _ string, _ bool) *interpreter.StorageMap { +func (i interpreterStorage) GetStorageMap(_ *interpreter.Interpreter, _ common.Address, _ string, _ bool) *interpreter.DomainStorageMap { panic("unexpected GetStorageMap call") } diff --git a/go.mod b/go.mod index 4ee8098b0f..95b571dee7 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/kr/pretty v0.3.1 github.com/leanovate/gopter v0.2.9 github.com/logrusorgru/aurora/v4 v4.0.0 - github.com/onflow/atree v0.8.0 + github.com/onflow/atree v0.8.1-0.20241028213850-07c884e4abcf github.com/rivo/uniseg v0.4.4 github.com/schollz/progressbar/v3 v3.13.1 github.com/stretchr/testify v1.9.0 @@ -54,7 +54,7 @@ require ( github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/zeebo/assert v1.3.0 // indirect - github.com/zeebo/blake3 v0.2.3 // indirect + github.com/zeebo/blake3 v0.2.4 // indirect golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect golang.org/x/sys v0.15.0 // indirect golang.org/x/term v0.6.0 // indirect diff --git a/go.sum b/go.sum index c5711d90b7..f88da9f71f 100644 --- a/go.sum +++ b/go.sum @@ -37,7 +37,6 @@ github.com/k0kubun/pp v3.0.1+incompatible h1:3tqvf7QgUnZ5tXO6pNAZlrvHgl6DvifjDrd github.com/k0kubun/pp v3.0.1+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= github.com/k0kubun/pp/v3 v3.2.0 h1:h33hNTZ9nVFNP3u2Fsgz8JXiF5JINoZfFq4SvKJwNcs= github.com/k0kubun/pp/v3 v3.2.0/go.mod h1:ODtJQbQcIRfAD3N+theGCV1m/CBxweERz2dapdz1EwA= -github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= github.com/klauspost/cpuid/v2 v2.2.0 h1:4ZexSFt8agMNzNisrsilL6RClWDC5YJnLHNIfTy4iuc= github.com/klauspost/cpuid/v2 v2.2.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/kodova/html-to-markdown v1.0.1 h1:MJxQAnqxtss3DaPnm72DRV65HZiMQZF3DUAfEaTg+14= @@ -75,8 +74,8 @@ github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2Em github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/onflow/atree v0.8.0 h1:qg5c6J1gVDNObughpEeWm8oxqhPGdEyGrda121GM4u0= -github.com/onflow/atree v0.8.0/go.mod h1:yccR+LR7xc1Jdic0mrjocbHvUD7lnVvg8/Ct1AA5zBo= +github.com/onflow/atree v0.8.1-0.20241028213850-07c884e4abcf h1:MDB/hdwr5GMsZHIIrAw3gBnmWBy5XjsZ4/6kftv9d5c= +github.com/onflow/atree v0.8.1-0.20241028213850-07c884e4abcf/go.mod h1:U8PGG42VrSJqjdfJ9NGQ2fenkyFRYlgtfHsZM61H4zY= github.com/onflow/crypto v0.25.0 h1:BeWbLsh3ZD13Ej+Uky6kg1PL1ZIVBDVX+2MVBNwqddg= github.com/onflow/crypto v0.25.0/go.mod h1:C8FbaX0x8y+FxWjbkHy0Q4EASCDR9bSPWZqlpCLYyVI= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -108,11 +107,10 @@ github.com/turbolent/prettier v0.0.0-20220320183459-661cc755135d h1:5JInRQbk5UBX github.com/turbolent/prettier v0.0.0-20220320183459-661cc755135d/go.mod h1:Nlx5Y115XQvNcIdIy7dZXaNSUpzwBSge4/Ivk93/Yog= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= -github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg= -github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ= +github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI= +github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE= github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= go.opentelemetry.io/otel v1.8.0 h1:zcvBFizPbpa1q7FehvFiHbQwGzmPILebO0tyqIR5Djg= diff --git a/interpreter/account_storagemap.go b/interpreter/account_storagemap.go new file mode 100644 index 0000000000..67a51c1c35 --- /dev/null +++ b/interpreter/account_storagemap.go @@ -0,0 +1,341 @@ +/* + * Cadence - The resource-oriented smart contract programming language + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package interpreter + +import ( + goerrors "errors" + + "github.com/onflow/atree" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/errors" +) + +// AccountStorageMap stores domain storage maps in an account. +type AccountStorageMap struct { + orderedMap *atree.OrderedMap +} + +// NewAccountStorageMap creates account storage map. +func NewAccountStorageMap( + memoryGauge common.MemoryGauge, + storage atree.SlabStorage, + address atree.Address, +) *AccountStorageMap { + common.UseMemory(memoryGauge, common.StorageMapMemoryUsage) + + orderedMap, err := atree.NewMap( + storage, + address, + atree.NewDefaultDigesterBuilder(), + emptyTypeInfo, + ) + if err != nil { + panic(errors.NewExternalError(err)) + } + + return &AccountStorageMap{ + orderedMap: orderedMap, + } +} + +// NewAccountStorageMapWithRootID loads existing account storage map with given atree SlabID. +func NewAccountStorageMapWithRootID( + storage atree.SlabStorage, + slabID atree.SlabID, +) *AccountStorageMap { + orderedMap, err := atree.NewMapWithRootID( + storage, + slabID, + atree.NewDefaultDigesterBuilder(), + ) + if err != nil { + panic(errors.NewExternalError(err)) + } + + return &AccountStorageMap{ + orderedMap: orderedMap, + } +} + +// DomainExists returns true if the given domain exists in the account storage map. +func (s *AccountStorageMap) DomainExists(domain string) bool { + key := StringStorageMapKey(domain) + + exists, err := s.orderedMap.Has( + key.AtreeValueCompare, + key.AtreeValueHashInput, + key.AtreeValue(), + ) + if err != nil { + panic(errors.NewExternalError(err)) + } + + return exists +} + +// GetDomain returns domain storage map for the given domain. +// If createIfNotExists is true and domain doesn't exist, new domain storage map +// is created and inserted into account storage map with given domain as key. +func (s *AccountStorageMap) GetDomain( + gauge common.MemoryGauge, + interpreter *Interpreter, + domain string, + createIfNotExists bool, +) *DomainStorageMap { + key := StringStorageMapKey(domain) + + storedValue, err := s.orderedMap.Get( + key.AtreeValueCompare, + key.AtreeValueHashInput, + key.AtreeValue(), + ) + if err != nil { + var keyNotFoundError *atree.KeyNotFoundError + if goerrors.As(err, &keyNotFoundError) { + // Create domain storage map if needed. + + if createIfNotExists { + return s.NewDomain(gauge, interpreter, domain) + } + + return nil + } + + panic(errors.NewExternalError(err)) + } + + // Create domain storage map from raw atree value. + return NewDomainStorageMapWithAtreeValue(storedValue) +} + +// NewDomain creates new domain storage map and inserts it to AccountStorageMap with given domain as key. +func (s *AccountStorageMap) NewDomain( + gauge common.MemoryGauge, + interpreter *Interpreter, + domain string, +) *DomainStorageMap { + interpreter.recordStorageMutation() + + domainStorageMap := NewDomainStorageMap(gauge, s.orderedMap.Storage, s.orderedMap.Address()) + + key := StringStorageMapKey(domain) + + existingStorable, err := s.orderedMap.Set( + key.AtreeValueCompare, + key.AtreeValueHashInput, + key.AtreeValue(), + domainStorageMap.orderedMap, + ) + if err != nil { + panic(errors.NewExternalError(err)) + } + if existingStorable != nil { + panic(errors.NewUnexpectedError( + "account %x domain %s should not exist", + s.orderedMap.Address(), domain, + )) + } + + return domainStorageMap +} + +// WriteDomain sets or removes domain storage map in account storage map. +// If the given storage map is nil, domain is removed. +// If the given storage map is non-nil, domain is added/updated. +// Returns true if domain storage map previously existed at the given domain. +func (s *AccountStorageMap) WriteDomain( + interpreter *Interpreter, + domain string, + storageMap *DomainStorageMap, +) (existed bool) { + if storageMap == nil { + return s.removeDomain(interpreter, domain) + } + return s.setDomain(interpreter, domain, storageMap) +} + +// setDomain sets domain storage map in the account storage map and returns true if domain previously existed. +// If the given domain already stores a domain storage map, it is overwritten. +func (s *AccountStorageMap) setDomain( + interpreter *Interpreter, + domain string, + storageMap *DomainStorageMap, +) (existed bool) { + interpreter.recordStorageMutation() + + key := StringStorageMapKey(domain) + + existingValueStorable, err := s.orderedMap.Set( + key.AtreeValueCompare, + key.AtreeValueHashInput, + key.AtreeValue(), + storageMap.orderedMap, + ) + if err != nil { + panic(errors.NewExternalError(err)) + } + + existed = existingValueStorable != nil + if existed { + // Create domain storage map from overwritten storable + domainStorageMap := newDomainStorageMapWithAtreeStorable(s.orderedMap.Storage, existingValueStorable) + + // Deep remove elements in domain storage map + domainStorageMap.DeepRemove(interpreter, true) + + // Remove domain storage map slab + interpreter.RemoveReferencedSlab(existingValueStorable) + } + + interpreter.maybeValidateAtreeValue(s.orderedMap) + + // NOTE: Don't call maybeValidateAtreeStorage() here because it is possible + // that domain storage map is in the process of being migrated to account + // storage map and state isn't consistent during migration. + + return +} + +// removeDomain removes domain storage map with given domain in account storage map, if it exists. +func (s *AccountStorageMap) removeDomain(interpreter *Interpreter, domain string) (existed bool) { + interpreter.recordStorageMutation() + + key := StringStorageMapKey(domain) + + existingKeyStorable, existingValueStorable, err := s.orderedMap.Remove( + key.AtreeValueCompare, + key.AtreeValueHashInput, + key.AtreeValue(), + ) + if err != nil { + var keyNotFoundError *atree.KeyNotFoundError + if goerrors.As(err, &keyNotFoundError) { + // No-op to remove non-existent domain. + return + } + panic(errors.NewExternalError(err)) + } + + // Key + + // NOTE: Key is just an atree.Value (StringAtreeValue), not an interpreter.Value, + // so do not need (can) convert and not need to deep remove + interpreter.RemoveReferencedSlab(existingKeyStorable) + + // Value + + existed = existingValueStorable != nil + if existed { + // Create domain storage map from removed storable + domainStorageMap := newDomainStorageMapWithAtreeStorable(s.orderedMap.Storage, existingValueStorable) + + // Deep remove elements in domain storage map + domainStorageMap.DeepRemove(interpreter, true) + + // Remove domain storage map slab + interpreter.RemoveReferencedSlab(existingValueStorable) + } + + interpreter.maybeValidateAtreeValue(s.orderedMap) + interpreter.maybeValidateAtreeStorage() + + return +} + +func (s *AccountStorageMap) SlabID() atree.SlabID { + return s.orderedMap.SlabID() +} + +func (s *AccountStorageMap) Count() uint64 { + return s.orderedMap.Count() +} + +// Domains returns a set of domains in account storage map +func (s *AccountStorageMap) Domains() map[string]struct{} { + domains := make(map[string]struct{}) + + iterator := s.Iterator() + + for { + k, err := iterator.mapIterator.NextKey() + if err != nil { + panic(errors.NewExternalError(err)) + } + + if k == nil { + break + } + + domain := convertKeyToDomain(k) + domains[domain] = struct{}{} + } + + return domains +} + +// Iterator returns a mutable iterator (AccountStorageMapIterator), +// which allows iterating over the domain and domain storage map. +func (s *AccountStorageMap) Iterator() *AccountStorageMapIterator { + mapIterator, err := s.orderedMap.Iterator( + StorageMapKeyAtreeValueComparator, + StorageMapKeyAtreeValueHashInput, + ) + if err != nil { + panic(errors.NewExternalError(err)) + } + + return &AccountStorageMapIterator{ + mapIterator: mapIterator, + storage: s.orderedMap.Storage, + } +} + +// AccountStorageMapIterator is an iterator over AccountStorageMap. +type AccountStorageMapIterator struct { + mapIterator atree.MapIterator + storage atree.SlabStorage +} + +// Next returns the next domain and domain storage map. +// If there is no more domain, ("", nil) is returned. +func (i *AccountStorageMapIterator) Next() (string, *DomainStorageMap) { + k, v, err := i.mapIterator.Next() + if err != nil { + panic(errors.NewExternalError(err)) + } + + if k == nil || v == nil { + return "", nil + } + + key := convertKeyToDomain(k) + + value := NewDomainStorageMapWithAtreeValue(v) + + return key, value +} + +func convertKeyToDomain(v atree.Value) string { + key, ok := v.(StringAtreeValue) + if !ok { + panic(errors.NewUnexpectedError("domain key type %T isn't expected", key)) + } + return string(key) +} diff --git a/interpreter/account_storagemap_test.go b/interpreter/account_storagemap_test.go new file mode 100644 index 0000000000..cdc6fc868c --- /dev/null +++ b/interpreter/account_storagemap_test.go @@ -0,0 +1,766 @@ +/* + * Cadence - The resource-oriented smart contract programming language + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package interpreter_test + +import ( + "math/rand" + "slices" + "strconv" + "strings" + "testing" + + "github.com/onflow/atree" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/interpreter" + "github.com/onflow/cadence/runtime" + . "github.com/onflow/cadence/test_utils/interpreter_utils" + . "github.com/onflow/cadence/test_utils/runtime_utils" + + "github.com/stretchr/testify/require" +) + +func TestAccountStorageMapDomainExists(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, accountStorageMap) + require.Equal(t, uint64(0), accountStorageMap.Count()) + + for _, domain := range runtime.AccountDomains { + exist := accountStorageMap.DomainExists(domain) + require.False(t, exist) + } + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) + + t.Run("non-empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + existingDomains := []string{common.PathDomainStorage.Identifier()} + + const count = 10 + accountStorageMap, _ := createAccountStorageMap(storage, inter, address, existingDomains, count, random) + + // Check if domain exists + for _, domain := range runtime.AccountDomains { + exist := accountStorageMap.DomainExists(domain) + require.Equal(t, slices.Contains(existingDomains, domain), exist) + } + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) +} + +func TestAccountStorageMapGetDomain(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, accountStorageMap) + require.Equal(t, uint64(0), accountStorageMap.Count()) + + for _, domain := range runtime.AccountDomains { + const createIfNotExists = false + storagemap := accountStorageMap.GetDomain(nil, inter, domain, createIfNotExists) + require.Nil(t, storagemap) + } + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) + + t.Run("non-empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + existingDomains := []string{common.PathDomainStorage.Identifier()} + + const count = 10 + accountStorageMap, accountValues := createAccountStorageMap(storage, inter, address, existingDomains, count, random) + + for _, domain := range runtime.AccountDomains { + const createIfNotExists = false + domainStoragemap := accountStorageMap.GetDomain(nil, inter, domain, createIfNotExists) + require.Equal(t, slices.Contains(existingDomains, domain), domainStoragemap != nil) + + if domainStoragemap != nil { + checkDomainStorageMapData(t, inter, domainStoragemap, accountValues[domain]) + } + } + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) +} + +func TestAccountStorageMapCreateDomain(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + accountValues := make(accountStorageMapValues) + + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, accountStorageMap) + require.Equal(t, uint64(0), accountStorageMap.Count()) + + for _, domain := range runtime.AccountDomains { + const createIfNotExists = true + domainStoragemap := accountStorageMap.GetDomain(nil, inter, domain, createIfNotExists) + require.NotNil(t, domainStoragemap) + require.Equal(t, uint64(0), domainStoragemap.Count()) + + accountValues[domain] = make(domainStorageMapValues) + } + + checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) + + t.Run("non-empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + existingDomains := []string{common.PathDomainStorage.Identifier()} + + const count = 10 + accountStorageMap, accountValues := createAccountStorageMap(storage, inter, address, existingDomains, count, random) + + for _, domain := range runtime.AccountDomains { + const createIfNotExists = true + domainStoragemap := accountStorageMap.GetDomain(nil, inter, domain, createIfNotExists) + require.NotNil(t, domainStoragemap) + require.Equal(t, uint64(len(accountValues[domain])), domainStoragemap.Count()) + + if !slices.Contains(existingDomains, domain) { + accountValues[domain] = make(domainStorageMapValues) + } + } + + checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) +} + +func TestAccountStorageMapSetAndUpdateDomain(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + accountValues := make(accountStorageMapValues) + + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, accountStorageMap) + require.Equal(t, uint64(0), accountStorageMap.Count()) + + const count = 10 + for _, domain := range runtime.AccountDomains { + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + domainValues := writeRandomValuesToDomainStorageMap(inter, domainStorageMap, count, random) + + existed := accountStorageMap.WriteDomain(inter, domain, domainStorageMap) + require.False(t, existed) + + accountValues[domain] = domainValues + } + + checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) + + t.Run("non-empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + existingDomains := []string{common.PathDomainStorage.Identifier()} + + const count = 10 + accountStorageMap, accountValues := createAccountStorageMap(storage, inter, address, existingDomains, count, random) + + for _, domain := range runtime.AccountDomains { + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + domainValues := writeRandomValuesToDomainStorageMap(inter, domainStorageMap, count, random) + + existed := accountStorageMap.WriteDomain(inter, domain, domainStorageMap) + require.Equal(t, slices.Contains(existingDomains, domain), existed) + + accountValues[domain] = domainValues + } + + checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) +} + +func TestAccountStorageMapRemoveDomain(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + accountValues := make(accountStorageMapValues) + + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, accountStorageMap) + require.Equal(t, uint64(0), accountStorageMap.Count()) + + for _, domain := range runtime.AccountDomains { + existed := accountStorageMap.WriteDomain(inter, domain, nil) + require.False(t, existed) + } + + checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) + + t.Run("non-empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + existingDomains := []string{common.PathDomainStorage.Identifier()} + + const count = 10 + accountStorageMap, accountValues := createAccountStorageMap(storage, inter, address, existingDomains, count, random) + + for _, domain := range runtime.AccountDomains { + + existed := accountStorageMap.WriteDomain(inter, domain, nil) + require.Equal(t, slices.Contains(existingDomains, domain), existed) + + delete(accountValues, domain) + } + + checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) +} + +func TestAccountStorageMapIterator(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + accountValues := make(accountStorageMapValues) + + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, accountStorageMap) + require.Equal(t, uint64(0), accountStorageMap.Count()) + + iterator := accountStorageMap.Iterator() + + // Test calling Next() twice on empty account storage map. + for range 2 { + domain, domainStorageMap := iterator.Next() + require.Empty(t, domain) + require.Nil(t, domainStorageMap) + } + + checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) + + t.Run("non-empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + existingDomains := []string{ + common.PathDomainStorage.Identifier(), + common.PathDomainPublic.Identifier(), + } + + const count = 10 + accountStorageMap, accountValues := createAccountStorageMap(storage, inter, address, existingDomains, count, random) + + iterator := accountStorageMap.Iterator() + + domainCount := 0 + for { + domain, domainStorageMap := iterator.Next() + if domain == "" { + break + } + + domainCount++ + + require.True(t, slices.Contains(existingDomains, domain)) + require.NotNil(t, domainStorageMap) + + checkDomainStorageMapData(t, inter, domainStorageMap, accountValues[domain]) + } + + // Test calling Next() after iterator reaches the end. + domain, domainStorageMap := iterator.Next() + require.True(t, domain == "") + require.Nil(t, domainStorageMap) + + require.Equal(t, len(existingDomains), domainCount) + + checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) +} + +func TestAccountStorageMapDomains(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, accountStorageMap) + require.Equal(t, uint64(0), accountStorageMap.Count()) + + domains := accountStorageMap.Domains() + require.Equal(t, 0, len(domains)) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) + + t.Run("non-empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off automatic AtreeStorageValidationEnabled and explicitly check atree storage health directly. + // This is because AccountStorageMap isn't created through storage, so there isn't any account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(t, storage, atreeValueValidationEnabled, atreeStorageValidationEnabled) + + existingDomains := []string{ + common.PathDomainStorage.Identifier(), + common.PathDomainPublic.Identifier(), + common.PathDomainPrivate.Identifier(), + } + + const count = 10 + accountStorageMap, _ := createAccountStorageMap(storage, inter, address, existingDomains, count, random) + + domains := accountStorageMap.Domains() + require.Equal(t, len(existingDomains), len(domains)) + + for _, domain := range existingDomains { + _, exist := domains[domain] + require.True(t, exist) + } + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) +} + +func TestAccountStorageMapLoadFromRootSlabID(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + init := func() (atree.SlabID, accountStorageMapValues, map[string][]byte, map[string]uint64) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, accountStorageMap) + require.Equal(t, uint64(0), accountStorageMap.Count()) + + err := storage.Commit(inter, false) + require.NoError(t, err) + + return accountStorageMap.SlabID(), make(accountStorageMapValues), ledger.StoredValues, ledger.StorageIndices + } + + accountStorageMapRootSlabID, accountValues, storedValues, storageIndices := init() + + ledger := NewTestLedgerWithData(nil, nil, storedValues, storageIndices) + storage := runtime.NewStorage(ledger, nil) + + accountStorageMap := interpreter.NewAccountStorageMapWithRootID(storage, accountStorageMapRootSlabID) + require.Equal(t, uint64(0), accountStorageMap.Count()) + require.Equal(t, accountStorageMapRootSlabID, accountStorageMap.SlabID()) + + inter := NewTestInterpreterWithStorage(t, storage) + + checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) + + t.Run("non-empty", func(t *testing.T) { + existingDomains := []string{ + common.PathDomainStorage.Identifier(), + common.PathDomainPublic.Identifier(), + common.PathDomainPrivate.Identifier(), + } + + init := func() (atree.SlabID, accountStorageMapValues, map[string][]byte, map[string]uint64) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off automatic AtreeStorageValidationEnabled and explicitly check atree storage health directly. + // This is because AccountStorageMap isn't created through storage, so there isn't any account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(t, storage, atreeValueValidationEnabled, atreeStorageValidationEnabled) + + const count = 10 + accountStorageMap, accountValues := createAccountStorageMap(storage, inter, address, existingDomains, count, random) + + err := storage.Commit(inter, false) + require.NoError(t, err) + + return accountStorageMap.SlabID(), accountValues, ledger.StoredValues, ledger.StorageIndices + } + + accountStorageMapRootSlabID, accountValues, storedValues, storageIndices := init() + + ledger := NewTestLedgerWithData(nil, nil, storedValues, storageIndices) + storage := runtime.NewStorage(ledger, nil) + + accountStorageMap := interpreter.NewAccountStorageMapWithRootID(storage, accountStorageMapRootSlabID) + require.Equal(t, uint64(len(existingDomains)), accountStorageMap.Count()) + require.Equal(t, accountStorageMapRootSlabID, accountStorageMap.SlabID()) + + inter := NewTestInterpreterWithStorage(t, storage) + + checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + }) +} + +type ( + domainStorageMapValues map[interpreter.StorageMapKey]interpreter.Value + accountStorageMapValues map[string]domainStorageMapValues +) + +func createAccountStorageMap( + storage atree.SlabStorage, + inter *interpreter.Interpreter, + address common.Address, + domains []string, + count int, + random *rand.Rand, +) (*interpreter.AccountStorageMap, accountStorageMapValues) { + + // Create account storage map + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + + accountValues := make(accountStorageMapValues) + + for _, domain := range domains { + // Create domain storage map + domainStorageMap := accountStorageMap.NewDomain(nil, inter, domain) + + // Write to new domain storage map + domainValues := writeRandomValuesToDomainStorageMap(inter, domainStorageMap, count, random) + + accountValues[domain] = domainValues + } + + return accountStorageMap, accountValues +} + +func writeRandomValuesToDomainStorageMap( + inter *interpreter.Interpreter, + domainStorageMap *interpreter.DomainStorageMap, + count int, + random *rand.Rand, +) domainStorageMapValues { + + domainValues := make(domainStorageMapValues) + + for len(domainValues) < count { + n := random.Int() + + key := interpreter.StringStorageMapKey(strconv.Itoa(n)) + + var value interpreter.Value + + if len(domainValues) == 0 { + // First element is a large value that is stored in its own slabs. + value = interpreter.NewUnmeteredStringValue(strings.Repeat("a", 1_000)) + } else { + value = interpreter.NewUnmeteredIntValueFromInt64(int64(n)) + } + + domainStorageMap.WriteValue(inter, key, value) + + domainValues[key] = value + } + + return domainValues +} + +// checkAccountStorageMapData iterates account storage map and compares values with given expectedAccountValues. +func checkAccountStorageMapData( + tb testing.TB, + inter *interpreter.Interpreter, + accountStorageMap *interpreter.AccountStorageMap, + expectedAccountValues accountStorageMapValues, +) { + require.Equal(tb, uint64(len(expectedAccountValues)), accountStorageMap.Count()) + + domainCount := 0 + iter := accountStorageMap.Iterator() + for { + domain, domainStorageMap := iter.Next() + if domain == "" { + break + } + + domainCount++ + + expectedDomainValues, exist := expectedAccountValues[domain] + require.True(tb, exist) + + checkDomainStorageMapData(tb, inter, domainStorageMap, expectedDomainValues) + } + + require.Equal(tb, len(expectedAccountValues), domainCount) +} + +// checkDomainStorageMapData iterates domain storage map and compares values with given expectedDomainValues. +func checkDomainStorageMapData( + tb testing.TB, + inter *interpreter.Interpreter, + domainStorageMap *interpreter.DomainStorageMap, + expectedDomainValues domainStorageMapValues, +) { + require.Equal(tb, uint64(len(expectedDomainValues)), domainStorageMap.Count()) + + count := 0 + iter := domainStorageMap.Iterator(nil) + for { + k, v := iter.Next() + if k == nil { + break + } + + count++ + + kv := k.(interpreter.StringAtreeValue) + + expectedValue := expectedDomainValues[interpreter.StringStorageMapKey(kv)] + + checkCadenceValue(tb, inter, v, expectedValue) + } + + require.Equal(tb, len(expectedDomainValues), count) +} + +func checkCadenceValue( + tb testing.TB, + inter *interpreter.Interpreter, + value, + expectedValue interpreter.Value, +) { + ev, ok := value.(interpreter.EquatableValue) + require.True(tb, ok) + require.True(tb, ev.Equal(inter, interpreter.EmptyLocationRange, expectedValue)) +} diff --git a/interpreter/interpreter.go b/interpreter/interpreter.go index 2835c13112..034d9bead4 100644 --- a/interpreter/interpreter.go +++ b/interpreter/interpreter.go @@ -237,7 +237,7 @@ func (c TypeCodes) Merge(codes TypeCodes) { type Storage interface { atree.SlabStorage - GetStorageMap(address common.Address, domain string, createIfNotExists bool) *StorageMap + GetStorageMap(inter *Interpreter, address common.Address, domain string, createIfNotExists bool) *DomainStorageMap CheckHealth() error } @@ -2681,7 +2681,7 @@ func (interpreter *Interpreter) StoredValueExists( domain string, identifier StorageMapKey, ) bool { - accountStorage := interpreter.Storage().GetStorageMap(storageAddress, domain, false) + accountStorage := interpreter.Storage().GetStorageMap(interpreter, storageAddress, domain, false) if accountStorage == nil { return false } @@ -2693,7 +2693,7 @@ func (interpreter *Interpreter) ReadStored( domain string, identifier StorageMapKey, ) Value { - accountStorage := interpreter.Storage().GetStorageMap(storageAddress, domain, false) + accountStorage := interpreter.Storage().GetStorageMap(interpreter, storageAddress, domain, false) if accountStorage == nil { return nil } @@ -2706,7 +2706,7 @@ func (interpreter *Interpreter) WriteStored( key StorageMapKey, value Value, ) (existed bool) { - accountStorage := interpreter.Storage().GetStorageMap(storageAddress, domain, true) + accountStorage := interpreter.Storage().GetStorageMap(interpreter, storageAddress, domain, true) return accountStorage.WriteValue(interpreter, key, value) } @@ -4069,7 +4069,7 @@ func (interpreter *Interpreter) IsSubTypeOfSemaType(staticSubType StaticType, su } func (interpreter *Interpreter) domainPaths(address common.Address, domain common.PathDomain) []Value { - storageMap := interpreter.Storage().GetStorageMap(address, domain.Identifier(), false) + storageMap := interpreter.Storage().GetStorageMap(interpreter, address, domain.Identifier(), false) if storageMap == nil { return []Value{} } @@ -4164,7 +4164,7 @@ func (interpreter *Interpreter) newStorageIterationFunction( parameterTypes := fnType.ParameterTypes() returnType := fnType.ReturnTypeAnnotation.Type - storageMap := config.Storage.GetStorageMap(address, domain.Identifier(), false) + storageMap := config.Storage.GetStorageMap(interpreter, address, domain.Identifier(), false) if storageMap == nil { // if nothing is stored, no iteration is required return Void diff --git a/interpreter/interpreter_tracing.go b/interpreter/interpreter_tracing.go index 365eebed42..10ee3dd418 100644 --- a/interpreter/interpreter_tracing.go +++ b/interpreter/interpreter_tracing.go @@ -30,9 +30,10 @@ const ( tracingImportPrefix = "import." // type prefixes - tracingArrayPrefix = "array." - tracingDictionaryPrefix = "dictionary." - tracingCompositePrefix = "composite." + tracingArrayPrefix = "array." + tracingDictionaryPrefix = "dictionary." + tracingCompositePrefix = "composite." + tracingDomainStorageMapPrefix = "domainstoragemap." // Value operation postfixes tracingConstructPostfix = "construct" @@ -162,6 +163,20 @@ func (interpreter *Interpreter) reportDictionaryValueDeepRemoveTrace( ) } +func (interpreter *Interpreter) reportDomainStorageMapDeepRemoveTrace( + typeInfo string, + count int, + duration time.Duration, +) { + config := interpreter.SharedState.Config + config.OnRecordTrace( + interpreter, + tracingDomainStorageMapPrefix+tracingDeepRemovePostfix, + duration, + prepareArrayAndMapValueTraceAttrs(typeInfo, count), + ) +} + func (interpreter *Interpreter) reportDictionaryValueDestroyTrace( typeInfo string, count int, diff --git a/interpreter/misc_test.go b/interpreter/misc_test.go index 9c8e7c8886..35d7dd56be 100644 --- a/interpreter/misc_test.go +++ b/interpreter/misc_test.go @@ -5350,7 +5350,7 @@ func TestInterpretReferenceFailableDowncasting(t *testing.T) { ) domain := storagePath.Domain.Identifier() - storageMap := storage.GetStorageMap(storageAddress, domain, true) + storageMap := storage.GetStorageMap(inter, storageAddress, domain, true) storageMapKey := interpreter.StringStorageMapKey(storagePath.Identifier) storageMap.WriteValue(inter, storageMapKey, r) diff --git a/interpreter/storage.go b/interpreter/storage.go index c5bdcb0b87..992a039e69 100644 --- a/interpreter/storage.go +++ b/interpreter/storage.go @@ -130,7 +130,7 @@ func (k StorageKey) IsLess(o StorageKey) bool { // InMemoryStorage type InMemoryStorage struct { *atree.BasicSlabStorage - StorageMaps map[StorageKey]*StorageMap + StorageMaps map[StorageKey]*DomainStorageMap memoryGauge common.MemoryGauge } @@ -158,22 +158,23 @@ func NewInMemoryStorage(memoryGauge common.MemoryGauge) InMemoryStorage { return InMemoryStorage{ BasicSlabStorage: slabStorage, - StorageMaps: make(map[StorageKey]*StorageMap), + StorageMaps: make(map[StorageKey]*DomainStorageMap), memoryGauge: memoryGauge, } } func (i InMemoryStorage) GetStorageMap( + _ *Interpreter, address common.Address, domain string, createIfNotExists bool, ) ( - storageMap *StorageMap, + storageMap *DomainStorageMap, ) { key := NewStorageKey(i.memoryGauge, address, domain) storageMap = i.StorageMaps[key] if storageMap == nil && createIfNotExists { - storageMap = NewStorageMap(i.memoryGauge, i, atree.Address(address)) + storageMap = NewDomainStorageMap(i.memoryGauge, i, atree.Address(address)) i.StorageMaps[key] = storageMap } return storageMap diff --git a/interpreter/storage_test.go b/interpreter/storage_test.go index 0693df2d9b..16fd679a19 100644 --- a/interpreter/storage_test.go +++ b/interpreter/storage_test.go @@ -524,7 +524,7 @@ func TestStorageOverwriteAndRemove(t *testing.T) { const storageMapKey = StringStorageMapKey("test") - storageMap := storage.GetStorageMap(address, "storage", true) + storageMap := storage.GetStorageMap(inter, address, "storage", true) storageMap.WriteValue(inter, storageMapKey, array1) // Overwriting delete any existing child slabs diff --git a/interpreter/storagemap.go b/interpreter/storagemap.go index 54c9e2acbb..336828ce64 100644 --- a/interpreter/storagemap.go +++ b/interpreter/storagemap.go @@ -20,6 +20,7 @@ package interpreter import ( goerrors "errors" + "time" "github.com/onflow/atree" @@ -27,12 +28,13 @@ import ( "github.com/onflow/cadence/errors" ) -// StorageMap is an ordered map which stores values in an account. -type StorageMap struct { +// DomainStorageMap is an ordered map which stores values in an account domain. +type DomainStorageMap struct { orderedMap *atree.OrderedMap } -func NewStorageMap(memoryGauge common.MemoryGauge, storage atree.SlabStorage, address atree.Address) *StorageMap { +// NewDomainStorageMap creates new domain storage map for given address. +func NewDomainStorageMap(memoryGauge common.MemoryGauge, storage atree.SlabStorage, address atree.Address) *DomainStorageMap { common.UseMemory(memoryGauge, common.StorageMapMemoryUsage) orderedMap, err := atree.NewMap( @@ -45,12 +47,16 @@ func NewStorageMap(memoryGauge common.MemoryGauge, storage atree.SlabStorage, ad panic(errors.NewExternalError(err)) } - return &StorageMap{ + return &DomainStorageMap{ orderedMap: orderedMap, } } -func NewStorageMapWithRootID(storage atree.SlabStorage, slabID atree.SlabID) *StorageMap { +// NewDomainStorageMapWithRootID loads domain storage map with given slabID. +// This function is only used with legacy domain registers for unmigrated accounts. +// For migrated accounts, NewDomainStorageMapWithAtreeValue() is used to load +// domain storage map as an element of AccountStorageMap. +func NewDomainStorageMapWithRootID(storage atree.SlabStorage, slabID atree.SlabID) *DomainStorageMap { orderedMap, err := atree.NewMapWithRootID( storage, slabID, @@ -60,13 +66,51 @@ func NewStorageMapWithRootID(storage atree.SlabStorage, slabID atree.SlabID) *St panic(errors.NewExternalError(err)) } - return &StorageMap{ + return &DomainStorageMap{ orderedMap: orderedMap, } } +// newDomainStorageMapWithAtreeStorable loads domain storage map with given atree.Storable. +func newDomainStorageMapWithAtreeStorable(storage atree.SlabStorage, storable atree.Storable) *DomainStorageMap { + + // NOTE: Don't use interpreter.StoredValue() to convert given storable + // to DomainStorageMap because DomainStorageMap isn't interpreter.Value. + + value, err := storable.StoredValue(storage) + if err != nil { + panic(errors.NewExternalError(err)) + } + + return NewDomainStorageMapWithAtreeValue(value) +} + +// NewDomainStorageMapWithAtreeValue loads domain storage map with given atree.Value. +// This function is used by migrated account to load domain as an element of AccountStorageMap. +func NewDomainStorageMapWithAtreeValue(value atree.Value) *DomainStorageMap { + // Check if type of given value is *atree.OrderedMap + dm, isAtreeOrderedMap := value.(*atree.OrderedMap) + if !isAtreeOrderedMap { + panic(errors.NewUnexpectedError( + "domain storage map has unexpected type %T, expect *atree.OrderedMap", + value, + )) + } + + // Check if TypeInfo of atree.OrderedMap is EmptyTypeInfo + dt, isEmptyTypeInfo := dm.Type().(EmptyTypeInfo) + if !isEmptyTypeInfo { + panic(errors.NewUnexpectedError( + "domain storage map has unexpected encoded type %T, expect EmptyTypeInfo", + dt, + )) + } + + return &DomainStorageMap{orderedMap: dm} +} + // ValueExists returns true if the given key exists in the storage map. -func (s StorageMap) ValueExists(key StorageMapKey) bool { +func (s DomainStorageMap) ValueExists(key StorageMapKey) bool { exists, err := s.orderedMap.Has( key.AtreeValueCompare, key.AtreeValueHashInput, @@ -81,7 +125,7 @@ func (s StorageMap) ValueExists(key StorageMapKey) bool { // ReadValue returns the value for the given key. // Returns nil if the key does not exist. -func (s StorageMap) ReadValue(gauge common.MemoryGauge, key StorageMapKey) Value { +func (s DomainStorageMap) ReadValue(gauge common.MemoryGauge, key StorageMapKey) Value { storedValue, err := s.orderedMap.Get( key.AtreeValueCompare, key.AtreeValueHashInput, @@ -102,7 +146,7 @@ func (s StorageMap) ReadValue(gauge common.MemoryGauge, key StorageMapKey) Value // If the given value is nil, the key is removed. // If the given value is non-nil, the key is added/updated. // Returns true if a value previously existed at the given key. -func (s StorageMap) WriteValue(interpreter *Interpreter, key StorageMapKey, value atree.Value) (existed bool) { +func (s DomainStorageMap) WriteValue(interpreter *Interpreter, key StorageMapKey, value atree.Value) (existed bool) { if value == nil { return s.RemoveValue(interpreter, key) } else { @@ -112,8 +156,8 @@ func (s StorageMap) WriteValue(interpreter *Interpreter, key StorageMapKey, valu // SetValue sets a value in the storage map. // If the given key already stores a value, it is overwritten. -// Returns true if -func (s StorageMap) SetValue(interpreter *Interpreter, key StorageMapKey, value atree.Value) (existed bool) { +// Returns true if given key already exists and existing value is overwritten. +func (s DomainStorageMap) SetValue(interpreter *Interpreter, key StorageMapKey, value atree.Value) (existed bool) { interpreter.recordStorageMutation() existingStorable, err := s.orderedMap.Set( @@ -126,20 +170,21 @@ func (s StorageMap) SetValue(interpreter *Interpreter, key StorageMapKey, value panic(errors.NewExternalError(err)) } - interpreter.maybeValidateAtreeValue(s.orderedMap) - interpreter.maybeValidateAtreeStorage() - existed = existingStorable != nil if existed { existingValue := StoredValue(interpreter, existingStorable, interpreter.Storage()) existingValue.DeepRemove(interpreter, true) // existingValue is standalone because it was overwritten in parent container. interpreter.RemoveReferencedSlab(existingStorable) } + + interpreter.maybeValidateAtreeValue(s.orderedMap) + interpreter.maybeValidateAtreeStorage() + return } // RemoveValue removes a value in the storage map, if it exists. -func (s StorageMap) RemoveValue(interpreter *Interpreter, key StorageMapKey) (existed bool) { +func (s DomainStorageMap) RemoveValue(interpreter *Interpreter, key StorageMapKey) (existed bool) { interpreter.recordStorageMutation() existingKeyStorable, existingValueStorable, err := s.orderedMap.Remove( @@ -155,9 +200,6 @@ func (s StorageMap) RemoveValue(interpreter *Interpreter, key StorageMapKey) (ex panic(errors.NewExternalError(err)) } - interpreter.maybeValidateAtreeValue(s.orderedMap) - interpreter.maybeValidateAtreeStorage() - // Key // NOTE: Key is just an atree.Value, not an interpreter.Value, @@ -172,12 +214,78 @@ func (s StorageMap) RemoveValue(interpreter *Interpreter, key StorageMapKey) (ex existingValue.DeepRemove(interpreter, true) // existingValue is standalone because it was removed from parent container. interpreter.RemoveReferencedSlab(existingValueStorable) } + + interpreter.maybeValidateAtreeValue(s.orderedMap) + interpreter.maybeValidateAtreeStorage() + return } +// DeepRemove removes all elements (and their slabs) of domain storage map. +func (s *DomainStorageMap) DeepRemove(interpreter *Interpreter, hasNoParentContainer bool) { + + config := interpreter.SharedState.Config + + if config.TracingEnabled { + startTime := time.Now() + + typeInfo := "DomainStorageMap" + count := s.Count() + + defer func() { + interpreter.reportDomainStorageMapDeepRemoveTrace( + typeInfo, + int(count), + time.Since(startTime), + ) + }() + } + + // Remove nested values and storables + + // Remove keys and values + + storage := s.orderedMap.Storage + + err := s.orderedMap.PopIterate(func(keyStorable atree.Storable, valueStorable atree.Storable) { + // Key + + // NOTE: Key is just an atree.Value, not an interpreter.Value, + // so do not need (can) convert and not need to deep remove + interpreter.RemoveReferencedSlab(keyStorable) + + // Value + + value := StoredValue(interpreter, valueStorable, storage) + value.DeepRemove(interpreter, false) // value is an element of v.dictionary because it is from PopIterate() callback. + interpreter.RemoveReferencedSlab(valueStorable) + }) + if err != nil { + panic(errors.NewExternalError(err)) + } + + interpreter.maybeValidateAtreeValue(s.orderedMap) + if hasNoParentContainer { + interpreter.maybeValidateAtreeStorage() + } +} + +func (s DomainStorageMap) ValueID() atree.ValueID { + return s.orderedMap.ValueID() +} + +func (s DomainStorageMap) Count() uint64 { + return s.orderedMap.Count() +} + +func (s DomainStorageMap) Inlined() bool { + // This is only used for testing currently. + return s.orderedMap.Inlined() +} + // Iterator returns an iterator (StorageMapIterator), // which allows iterating over the keys and values of the storage map -func (s StorageMap) Iterator(gauge common.MemoryGauge) StorageMapIterator { +func (s DomainStorageMap) Iterator(gauge common.MemoryGauge) DomainStorageMapIterator { mapIterator, err := s.orderedMap.Iterator( StorageMapKeyAtreeValueComparator, StorageMapKeyAtreeValueHashInput, @@ -186,31 +294,23 @@ func (s StorageMap) Iterator(gauge common.MemoryGauge) StorageMapIterator { panic(errors.NewExternalError(err)) } - return StorageMapIterator{ + return DomainStorageMapIterator{ gauge: gauge, mapIterator: mapIterator, storage: s.orderedMap.Storage, } } -func (s StorageMap) SlabID() atree.SlabID { - return s.orderedMap.SlabID() -} - -func (s StorageMap) Count() uint64 { - return s.orderedMap.Count() -} - -// StorageMapIterator is an iterator over StorageMap -type StorageMapIterator struct { +// DomainStorageMapIterator is an iterator over DomainStorageMap +type DomainStorageMapIterator struct { gauge common.MemoryGauge mapIterator atree.MapIterator storage atree.SlabStorage } // Next returns the next key and value of the storage map iterator. -// If there is no further key-value pair, ("", nil) is returned. -func (i StorageMapIterator) Next() (atree.Value, Value) { +// If there is no further key-value pair, (nil, nil) is returned. +func (i DomainStorageMapIterator) Next() (atree.Value, Value) { k, v, err := i.mapIterator.Next() if err != nil { panic(errors.NewExternalError(err)) @@ -230,7 +330,7 @@ func (i StorageMapIterator) Next() (atree.Value, Value) { // NextKey returns the next key of the storage map iterator. // If there is no further key, "" is returned. -func (i StorageMapIterator) NextKey() atree.Value { +func (i DomainStorageMapIterator) NextKey() atree.Value { k, err := i.mapIterator.NextKey() if err != nil { panic(errors.NewExternalError(err)) @@ -240,8 +340,8 @@ func (i StorageMapIterator) NextKey() atree.Value { } // NextValue returns the next value in the storage map iterator. -// If there is nop further value, nil is returned. -func (i StorageMapIterator) NextValue() Value { +// If there is no further value, nil is returned. +func (i DomainStorageMapIterator) NextValue() Value { v, err := i.mapIterator.NextValue() if err != nil { panic(errors.NewExternalError(err)) diff --git a/interpreter/storagemap_test.go b/interpreter/storagemap_test.go new file mode 100644 index 0000000000..9b1985dce1 --- /dev/null +++ b/interpreter/storagemap_test.go @@ -0,0 +1,711 @@ +/* + * Cadence - The resource-oriented smart contract programming language + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package interpreter_test + +import ( + "math/rand" + "strconv" + "testing" + + "github.com/onflow/atree" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/interpreter" + "github.com/onflow/cadence/runtime" + . "github.com/onflow/cadence/test_utils/interpreter_utils" + . "github.com/onflow/cadence/test_utils/runtime_utils" + + "github.com/stretchr/testify/require" +) + +func TestDomainStorageMapValueExists(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + key := interpreter.StringAtreeValue("key") + exist := domainStorageMap.ValueExists(interpreter.StringStorageMapKey(key)) + require.False(t, exist) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) + + t.Run("non-empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because DomainStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match DomainStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + const count = 10 + domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random) + + // Check if value exists + for key := range domainValues { + exist := domainStorageMap.ValueExists(key) + require.True(t, exist) + } + + // Check if random value exists + for range 10 { + n := random.Int() + key := interpreter.StringStorageMapKey(strconv.Itoa(n)) + _, keyExist := domainValues[key] + + exist := domainStorageMap.ValueExists(key) + require.Equal(t, keyExist, exist) + } + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) +} + +func TestDomainStorageMapReadValue(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + key := interpreter.StringAtreeValue("key") + v := domainStorageMap.ReadValue(nil, interpreter.StringStorageMapKey(key)) + require.Nil(t, v) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) + + t.Run("non-empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because DomainStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match DomainStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + const count = 10 + domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random) + + for key, expectedValue := range domainValues { + value := domainStorageMap.ReadValue(nil, key) + require.NotNil(t, value) + + checkCadenceValue(t, inter, value, expectedValue) + } + + // Get non-existent value + for range 10 { + n := random.Int() + key := interpreter.StringStorageMapKey(strconv.Itoa(n)) + if _, keyExist := domainValues[key]; keyExist { + continue + } + + value := domainStorageMap.ReadValue(nil, key) + require.Nil(t, value) + } + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) +} + +func TestDomainStorageMapSetAndUpdateValue(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + const count = 10 + domainValues := writeRandomValuesToDomainStorageMap(inter, domainStorageMap, count, random) + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) + + t.Run("non-empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + const count = 10 + domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random) + + for key := range domainValues { + // Overwrite existing values + n := random.Int() + + value := interpreter.NewUnmeteredIntValueFromInt64(int64(n)) + + domainStorageMap.WriteValue(inter, key, value) + + domainValues[key] = value + } + require.Equal(t, uint64(count), domainStorageMap.Count()) + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) +} + +func TestDomainStorageMapRemoveValue(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + key := interpreter.StringAtreeValue("key") + existed := domainStorageMap.WriteValue(inter, interpreter.StringStorageMapKey(key), nil) + require.False(t, existed) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) + + t.Run("non-empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + const count = 10 + domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random) + + for key := range domainValues { + existed := domainStorageMap.WriteValue(inter, key, nil) + require.True(t, existed) + } + + // Remove non-existent value + for range 10 { + n := random.Int() + key := interpreter.StringStorageMapKey(strconv.Itoa(n)) + if _, keyExist := domainValues[key]; keyExist { + continue + } + + existed := domainStorageMap.WriteValue(inter, key, nil) + require.False(t, existed) + } + + clear(domainValues) + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) +} + +func TestDomainStorageMapIteratorNext(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + domainValues := make(domainStorageMapValues) + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + iterator := domainStorageMap.Iterator(nil) + + // Test calling Next() twice on empty account storage map. + for range 2 { + k, v := iterator.Next() + require.Nil(t, k) + require.Nil(t, v) + } + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) + + t.Run("non-empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + const count = 10 + domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random) + + iterator := domainStorageMap.Iterator(nil) + + elementCount := 0 + for { + k, v := iterator.Next() + if k == nil { + break + } + + elementCount++ + + kv := k.(interpreter.StringAtreeValue) + + expectedValue, expectedValueExist := domainValues[interpreter.StringStorageMapKey(kv)] + require.True(t, expectedValueExist) + + checkCadenceValue(t, inter, v, expectedValue) + } + require.Equal(t, uint64(elementCount), domainStorageMap.Count()) + + // Test calling Next() after iterator reaches the end. + for range 2 { + k, v := iterator.Next() + require.Nil(t, k) + require.Nil(t, v) + } + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) +} + +func TestDomainStorageMapIteratorNextKey(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + domainValues := make(domainStorageMapValues) + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + iterator := domainStorageMap.Iterator(nil) + + // Test calling NextKey() twice on empty account storage map. + for range 2 { + k := iterator.NextKey() + require.Nil(t, k) + } + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) + + t.Run("non-empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + const count = 10 + domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random) + + iterator := domainStorageMap.Iterator(nil) + + elementCount := 0 + for { + k := iterator.NextKey() + if k == nil { + break + } + + elementCount++ + + kv := k.(interpreter.StringAtreeValue) + + _, expectedValueExist := domainValues[interpreter.StringStorageMapKey(kv)] + require.True(t, expectedValueExist) + } + require.Equal(t, uint64(elementCount), domainStorageMap.Count()) + + // Test calling Next() after iterator reaches the end. + for range 2 { + k := iterator.NextKey() + require.Nil(t, k) + } + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) +} + +func TestDomainStorageMapIteratorNextValue(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + domainValues := make(domainStorageMapValues) + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + iterator := domainStorageMap.Iterator(nil) + + // Test calling NextKey() twice on empty account storage map. + for range 2 { + v := iterator.NextValue() + require.Nil(t, v) + } + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) + + t.Run("non-empty", func(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any + // account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + t, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + const count = 10 + domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random) + + iterator := domainStorageMap.Iterator(nil) + + elementCount := 0 + for { + v := iterator.NextValue() + if v == nil { + break + } + + elementCount++ + + ev, ok := v.(interpreter.EquatableValue) + require.True(t, ok) + + match := false + for _, expectedValue := range domainValues { + if ev.Equal(inter, interpreter.EmptyLocationRange, expectedValue) { + match = true + break + } + } + require.True(t, match) + } + require.Equal(t, uint64(elementCount), domainStorageMap.Count()) + + // Test calling NextValue() after iterator reaches the end. + for range 2 { + v := iterator.NextValue() + require.Nil(t, v) + } + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + valueID := domainStorageMap.ValueID() + CheckAtreeStorageHealth(t, storage, []atree.SlabID{atreeValueIDToSlabID(valueID)}) + }) +} + +func TestDomainStorageMapLoadFromRootSlabID(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + t.Run("empty", func(t *testing.T) { + init := func() (atree.SlabID, domainStorageMapValues, map[string][]byte, map[string]uint64) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + err := storage.Commit(inter, false) + require.NoError(t, err) + + valueID := domainStorageMap.ValueID() + return atreeValueIDToSlabID(valueID), make(domainStorageMapValues), ledger.StoredValues, ledger.StorageIndices + } + + domainStorageMapRootSlabID, domainValues, storedValues, storageIndices := init() + + ledger := NewTestLedgerWithData(nil, nil, storedValues, storageIndices) + storage := runtime.NewStorage(ledger, nil) + + domainStorageMap := interpreter.NewDomainStorageMapWithRootID(storage, domainStorageMapRootSlabID) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + inter := NewTestInterpreterWithStorage(t, storage) + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{domainStorageMapRootSlabID}) + }) + + t.Run("non-empty", func(t *testing.T) { + + init := func() (atree.SlabID, domainStorageMapValues, map[string][]byte, map[string]uint64) { + random := rand.New(rand.NewSource(42)) + + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off automatic AtreeStorageValidationEnabled and explicitly check atree storage health directly. + // This is because AccountStorageMap isn't created through storage, so there isn't any account register to match AccountStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig(t, storage, atreeValueValidationEnabled, atreeStorageValidationEnabled) + + const count = 10 + domainStorageMap, domainValues := createDomainStorageMap(storage, inter, address, count, random) + + err := storage.Commit(inter, false) + require.NoError(t, err) + + valueID := domainStorageMap.ValueID() + return atreeValueIDToSlabID(valueID), domainValues, ledger.StoredValues, ledger.StorageIndices + } + + domainStorageMapRootSlabID, domainValues, storedValues, storageIndices := init() + + ledger := NewTestLedgerWithData(nil, nil, storedValues, storageIndices) + storage := runtime.NewStorage(ledger, nil) + + domainStorageMap := interpreter.NewDomainStorageMapWithRootID(storage, domainStorageMapRootSlabID) + + inter := NewTestInterpreterWithStorage(t, storage) + + checkDomainStorageMapData(t, inter, domainStorageMap, domainValues) + + CheckAtreeStorageHealth(t, storage, []atree.SlabID{domainStorageMapRootSlabID}) + }) +} + +func createDomainStorageMap( + storage atree.SlabStorage, + inter *interpreter.Interpreter, + address common.Address, + count int, + random *rand.Rand, +) (*interpreter.DomainStorageMap, domainStorageMapValues) { + + // Create domain storage map + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + + // Write to new domain storage map + domainValues := writeRandomValuesToDomainStorageMap(inter, domainStorageMap, count, random) + + return domainStorageMap, domainValues +} + +func atreeValueIDToSlabID(vid atree.ValueID) atree.SlabID { + return atree.NewSlabID( + atree.Address(vid[:8]), + atree.SlabIndex(vid[8:]), + ) +} diff --git a/interpreter/stringatreevalue_test.go b/interpreter/stringatreevalue_test.go index f2e622a8a9..366b59b1ee 100644 --- a/interpreter/stringatreevalue_test.go +++ b/interpreter/stringatreevalue_test.go @@ -36,12 +36,6 @@ func TestLargeStringAtreeValueInSeparateSlab(t *testing.T) { storage := NewInMemoryStorage(nil) - storageMap := storage.GetStorageMap( - common.MustBytesToAddress([]byte{0x1}), - common.PathDomainStorage.Identifier(), - true, - ) - inter, err := NewInterpreter( nil, common.StringLocation("test"), @@ -51,6 +45,13 @@ func TestLargeStringAtreeValueInSeparateSlab(t *testing.T) { ) require.NoError(t, err) + storageMap := storage.GetStorageMap( + inter, + common.MustBytesToAddress([]byte{0x1}), + common.PathDomainStorage.Identifier(), + true, + ) + // Generate a large key to force the string to get stored in a separate slab keyValue := NewStringAtreeValue(nil, strings.Repeat("x", 10_000)) diff --git a/interpreter/value_test.go b/interpreter/value_test.go index 295f5a4346..26cfac8621 100644 --- a/interpreter/value_test.go +++ b/interpreter/value_test.go @@ -3806,7 +3806,7 @@ func TestValue_ConformsToStaticType(t *testing.T) { ) require.NoError(t, err) - storageMap := storage.GetStorageMap(testAddress, "storage", true) + storageMap := storage.GetStorageMap(inter, testAddress, "storage", true) storageMap.WriteValue(inter, StringStorageMapKey("test"), TrueValue) value := valueFactory(inter) diff --git a/runtime/capabilitycontrollers_test.go b/runtime/capabilitycontrollers_test.go index fc6f692fe4..d00fb18dae 100644 --- a/runtime/capabilitycontrollers_test.go +++ b/runtime/capabilitycontrollers_test.go @@ -3251,7 +3251,10 @@ func TestRuntimeCapabilityControllers(t *testing.T) { ) require.NoError(t, err) + // Use *interpreter.Interpreter(nil) here because createIfNotExists is false. + storageMap := storage.GetStorageMap( + nil, common.MustBytesToAddress([]byte{0x1}), stdlib.PathCapabilityStorageDomain, false, @@ -3841,6 +3844,7 @@ func TestRuntimeCapabilitiesGetBackwardCompatibility(t *testing.T) { require.NoError(t, err) publicStorageMap := storage.GetStorageMap( + inter, testAddress, common.PathDomainPublic.Identifier(), true, @@ -3948,6 +3952,7 @@ func TestRuntimeCapabilitiesPublishBackwardCompatibility(t *testing.T) { require.NoError(t, err) publicStorageMap := storage.GetStorageMap( + inter, testAddress, common.PathDomainStorage.Identifier(), true, @@ -4038,6 +4043,7 @@ func TestRuntimeCapabilitiesUnpublishBackwardCompatibility(t *testing.T) { require.NoError(t, err) publicStorageMap := storage.GetStorageMap( + inter, testAddress, common.PathDomainPublic.Identifier(), true, diff --git a/runtime/contract_test.go b/runtime/contract_test.go index 2bb89a2cb5..a009906aa0 100644 --- a/runtime/contract_test.go +++ b/runtime/contract_test.go @@ -223,7 +223,7 @@ func TestRuntimeContract(t *testing.T) { getContractValueExists := func() bool { storageMap := NewStorage(storage, nil). - GetStorageMap(signerAddress, StorageDomainContract, false) + GetStorageMap(inter, signerAddress, StorageDomainContract, false) if storageMap == nil { return false } diff --git a/runtime/environment.go b/runtime/environment.go index ef3d0130d6..9736a4f2bc 100644 --- a/runtime/environment.go +++ b/runtime/environment.go @@ -1106,6 +1106,7 @@ func (e *interpreterEnvironment) loadContract( location := compositeType.Location if addressLocation, ok := location.(common.AddressLocation); ok { storageMap := e.storage.GetStorageMap( + inter, addressLocation.Address, StorageDomainContract, false, diff --git a/runtime/ft_test.go b/runtime/ft_test.go index b9374d986e..3e37164cf7 100644 --- a/runtime/ft_test.go +++ b/runtime/ft_test.go @@ -1084,6 +1084,7 @@ func TestRuntimeBrokenFungibleTokenRecovery(t *testing.T) { ) contractStorage := storage.GetStorageMap( + inter, contractsAddress, StorageDomainContract, true, @@ -1119,6 +1120,7 @@ func TestRuntimeBrokenFungibleTokenRecovery(t *testing.T) { ) userStorage := storage.GetStorageMap( + inter, userAddress, common.PathDomainStorage.Identifier(), true, diff --git a/runtime/migrate_domain_registers.go b/runtime/migrate_domain_registers.go new file mode 100644 index 0000000000..bba37d1d4f --- /dev/null +++ b/runtime/migrate_domain_registers.go @@ -0,0 +1,187 @@ +/* + * Cadence - The resource-oriented smart contract programming language + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package runtime + +import ( + "github.com/onflow/atree" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/common/orderedmap" + "github.com/onflow/cadence/errors" + "github.com/onflow/cadence/interpreter" +) + +type GetDomainStorageMapFunc func( + ledger atree.Ledger, + storage atree.SlabStorage, + address common.Address, + domain string, +) (*interpreter.DomainStorageMap, error) + +type DomainRegisterMigration struct { + ledger atree.Ledger + storage atree.SlabStorage + inter *interpreter.Interpreter + memoryGauge common.MemoryGauge + getDomainStorageMap GetDomainStorageMapFunc +} + +func NewDomainRegisterMigration( + ledger atree.Ledger, + storage atree.SlabStorage, + inter *interpreter.Interpreter, + memoryGauge common.MemoryGauge, +) *DomainRegisterMigration { + return &DomainRegisterMigration{ + ledger: ledger, + storage: storage, + inter: inter, + memoryGauge: memoryGauge, + getDomainStorageMap: getDomainStorageMapFromLegacyDomainRegister, + } +} + +// SetGetDomainStorageMapFunc allows user to provide custom GetDomainStorageMap function. +func (m *DomainRegisterMigration) SetGetDomainStorageMapFunc( + getDomainStorageMapFunc GetDomainStorageMapFunc, +) { + m.getDomainStorageMap = getDomainStorageMapFunc +} + +// MigrateAccounts migrates given accounts. +func (m *DomainRegisterMigration) MigrateAccounts( + accounts *orderedmap.OrderedMap[common.Address, struct{}], + pred func(common.Address) bool, +) ( + *orderedmap.OrderedMap[common.Address, *interpreter.AccountStorageMap], + error, +) { + if accounts == nil || accounts.Len() == 0 { + return nil, nil + } + + var migratedAccounts *orderedmap.OrderedMap[common.Address, *interpreter.AccountStorageMap] + + for pair := accounts.Oldest(); pair != nil; pair = pair.Next() { + address := pair.Key + + if !pred(address) { + continue + } + + migrated, err := isMigrated(m.ledger, address) + if err != nil { + return nil, err + } + if migrated { + continue + } + + accountStorageMap, err := m.migrateAccount(address) + if err != nil { + return nil, err + } + + if migratedAccounts == nil { + migratedAccounts = &orderedmap.OrderedMap[common.Address, *interpreter.AccountStorageMap]{} + } + migratedAccounts.Set(address, accountStorageMap) + } + + return migratedAccounts, nil +} + +func (m *DomainRegisterMigration) migrateAccount( + address common.Address, +) (*interpreter.AccountStorageMap, error) { + // Create account storage map + accountStorageMap := interpreter.NewAccountStorageMap(m.memoryGauge, m.storage, atree.Address(address)) + + // Migrate existing domains + err := m.migrateDomains(address, accountStorageMap) + if err != nil { + return nil, err + } + + accountStorageMapSlabIndex := accountStorageMap.SlabID().Index() + + // Write account register + errors.WrapPanic(func() { + err = m.ledger.SetValue( + address[:], + []byte(AccountStorageKey), + accountStorageMapSlabIndex[:], + ) + }) + if err != nil { + return nil, interpreter.WrappedExternalError(err) + } + + return accountStorageMap, nil +} + +// migrateDomains migrates existing domain storage maps and removes domain registers. +func (m *DomainRegisterMigration) migrateDomains( + address common.Address, + accountStorageMap *interpreter.AccountStorageMap, +) error { + for _, domain := range AccountDomains { + + domainStorageMap, err := m.getDomainStorageMap(m.ledger, m.storage, address, domain) + if err != nil { + return err + } + + if domainStorageMap == nil { + // Skip non-existent domain + continue + } + + // Migrate (insert) existing domain storage map to account storage map + existed := accountStorageMap.WriteDomain(m.inter, domain, domainStorageMap) + if existed { + // This shouldn't happen because we are inserting domain storage map into empty account storage map. + return errors.NewUnexpectedError( + "failed to migrate domain %s for account %x: domain already exists in account storage map", domain, address, + ) + } + + // Remove migrated domain registers + errors.WrapPanic(func() { + // NOTE: removing non-existent domain registers is no-op. + err = m.ledger.SetValue( + address[:], + []byte(domain), + nil) + }) + if err != nil { + return interpreter.WrappedExternalError(err) + } + } + + return nil +} + +func isMigrated(ledger atree.Ledger, address common.Address) (bool, error) { + _, registerExists, err := getSlabIndexFromRegisterValue(ledger, address, []byte(AccountStorageKey)) + if err != nil { + return false, err + } + return registerExists, nil +} diff --git a/runtime/migrate_domain_registers_test.go b/runtime/migrate_domain_registers_test.go new file mode 100644 index 0000000000..c21bd31162 --- /dev/null +++ b/runtime/migrate_domain_registers_test.go @@ -0,0 +1,594 @@ +/* + * Cadence - The resource-oriented smart contract programming language + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package runtime_test + +import ( + "math" + "math/rand" + goruntime "runtime" + "strconv" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/onflow/atree" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/common/orderedmap" + "github.com/onflow/cadence/errors" + "github.com/onflow/cadence/interpreter" + "github.com/onflow/cadence/runtime" + . "github.com/onflow/cadence/test_utils/interpreter_utils" + . "github.com/onflow/cadence/test_utils/runtime_utils" +) + +func TestMigrateDomainRegisters(t *testing.T) { + + alwaysMigrate := func(common.Address) bool { + return true + } + + neverMigrate := func(common.Address) bool { + return false + } + + migrateSpecificAccount := func(addressToMigrate common.Address) func(common.Address) bool { + return func(address common.Address) bool { + return address == addressToMigrate + } + } + + isAtreeRegister := func(key string) bool { + return key[0] == '$' && len(key) == 9 + } + + getNonAtreeRegisters := func(values map[string][]byte) map[string][]byte { + nonAtreeRegisters := make(map[string][]byte) + for k, v := range values { + ks := strings.Split(k, "|") + if !isAtreeRegister(ks[1]) && len(v) > 0 { + nonAtreeRegisters[k] = v + } + } + return nonAtreeRegisters + } + + address1 := common.MustBytesToAddress([]byte{0x1}) + address2 := common.MustBytesToAddress([]byte{0x2}) + + t.Run("no accounts", func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + migrator := runtime.NewDomainRegisterMigration(ledger, storage, inter, nil) + + migratedAccounts, err := migrator.MigrateAccounts(nil, alwaysMigrate) + require.NoError(t, err) + require.True(t, migratedAccounts == nil || migratedAccounts.Len() == 0) + }) + + t.Run("accounts without domain registers", func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + migrator := runtime.NewDomainRegisterMigration(ledger, storage, inter, nil) + + accounts := &orderedmap.OrderedMap[common.Address, struct{}]{} + accounts.Set(address2, struct{}{}) + accounts.Set(address1, struct{}{}) + + migratedAccounts, err := migrator.MigrateAccounts(accounts, alwaysMigrate) + require.NoError(t, err) + require.NotNil(t, migratedAccounts) + require.Equal(t, accounts.Len(), migratedAccounts.Len()) + require.Equal(t, address2, migratedAccounts.Oldest().Key) + require.Equal(t, address1, migratedAccounts.Newest().Key) + + err = storage.FastCommit(goruntime.NumCPU()) + require.NoError(t, err) + + // Check non-atree registers + nonAtreeRegisters := getNonAtreeRegisters(ledger.StoredValues) + require.Equal(t, accounts.Len(), len(nonAtreeRegisters)) + require.Contains(t, nonAtreeRegisters, string(address1[:])+"|"+runtime.AccountStorageKey) + require.Contains(t, nonAtreeRegisters, string(address2[:])+"|"+runtime.AccountStorageKey) + + // Check atree storage + expectedRootSlabIDs := make([]atree.SlabID, 0, migratedAccounts.Len()) + for pair := migratedAccounts.Oldest(); pair != nil; pair = pair.Next() { + accountStorageMap := pair.Value + expectedRootSlabIDs = append(expectedRootSlabIDs, accountStorageMap.SlabID()) + } + + CheckAtreeStorageHealth(t, storage, expectedRootSlabIDs) + + // Check account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address1, make(accountStorageMapValues)) + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address2, make(accountStorageMapValues)) + }) + + t.Run("accounts with domain registers", func(t *testing.T) { + + accountsInfo := []accountInfo{ + { + address: address1, + domains: []domainInfo{ + {domain: common.PathDomainStorage.Identifier(), domainStorageMapCount: 10, maxDepth: 3}, + {domain: common.PathDomainPrivate.Identifier(), domainStorageMapCount: 10, maxDepth: 3}, + }, + }, + { + address: address2, + domains: []domainInfo{ + {domain: common.PathDomainPublic.Identifier(), domainStorageMapCount: 10, maxDepth: 3}, + }, + }, + } + + ledger, accountsValues := newTestLedgerWithUnmigratedAccounts(t, nil, nil, accountsInfo) + storage := runtime.NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + migrator := runtime.NewDomainRegisterMigration(ledger, storage, inter, nil) + + accounts := &orderedmap.OrderedMap[common.Address, struct{}]{} + accounts.Set(address2, struct{}{}) + accounts.Set(address1, struct{}{}) + + migratedAccounts, err := migrator.MigrateAccounts(accounts, alwaysMigrate) + require.NoError(t, err) + require.NotNil(t, migratedAccounts) + require.Equal(t, accounts.Len(), migratedAccounts.Len()) + require.Equal(t, address2, migratedAccounts.Oldest().Key) + require.Equal(t, address1, migratedAccounts.Newest().Key) + + err = storage.FastCommit(goruntime.NumCPU()) + require.NoError(t, err) + + // Check non-atree registers + nonAtreeRegisters := getNonAtreeRegisters(ledger.StoredValues) + require.Equal(t, accounts.Len(), len(nonAtreeRegisters)) + require.Contains(t, nonAtreeRegisters, string(address1[:])+"|"+runtime.AccountStorageKey) + require.Contains(t, nonAtreeRegisters, string(address2[:])+"|"+runtime.AccountStorageKey) + + // Check atree storage + expectedRootSlabIDs := make([]atree.SlabID, 0, migratedAccounts.Len()) + for pair := migratedAccounts.Oldest(); pair != nil; pair = pair.Next() { + accountStorageMap := pair.Value + expectedRootSlabIDs = append(expectedRootSlabIDs, accountStorageMap.SlabID()) + } + + CheckAtreeStorageHealth(t, storage, expectedRootSlabIDs) + + // Check account storage map data + for address, accountValues := range accountsValues { + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + }) + + t.Run("migrated accounts", func(t *testing.T) { + accountsInfo := []accountInfo{ + { + address: address1, + domains: []domainInfo{ + {domain: common.PathDomainStorage.Identifier(), domainStorageMapCount: 10, maxDepth: 3}, + {domain: common.PathDomainPrivate.Identifier(), domainStorageMapCount: 10, maxDepth: 3}, + }, + }, + { + address: address2, + domains: []domainInfo{ + {domain: common.PathDomainPublic.Identifier(), domainStorageMapCount: 10, maxDepth: 3}, + }, + }, + } + + ledger, accountsValues := newTestLedgerWithMigratedAccounts(t, nil, nil, accountsInfo) + storage := runtime.NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + migrator := runtime.NewDomainRegisterMigration(ledger, storage, inter, nil) + + accounts := &orderedmap.OrderedMap[common.Address, struct{}]{} + accounts.Set(address2, struct{}{}) + accounts.Set(address1, struct{}{}) + + migratedAccounts, err := migrator.MigrateAccounts(accounts, alwaysMigrate) + require.NoError(t, err) + require.True(t, migratedAccounts == nil || migratedAccounts.Len() == 0) + + // Check account storage map data + for address, accountValues := range accountsValues { + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + }) + + t.Run("never migration predicate", func(t *testing.T) { + + accountsInfo := []accountInfo{ + { + address: address1, + domains: []domainInfo{ + {domain: common.PathDomainStorage.Identifier(), domainStorageMapCount: 10, maxDepth: 3}, + }, + }, + { + address: address2, + domains: []domainInfo{ + {domain: common.PathDomainPublic.Identifier(), domainStorageMapCount: 10, maxDepth: 3}, + }, + }, + } + + ledger, _ := newTestLedgerWithUnmigratedAccounts(t, nil, nil, accountsInfo) + storage := runtime.NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + migrator := runtime.NewDomainRegisterMigration(ledger, storage, inter, nil) + + accounts := &orderedmap.OrderedMap[common.Address, struct{}]{} + accounts.Set(address2, struct{}{}) + accounts.Set(address1, struct{}{}) + + migratedAccounts, err := migrator.MigrateAccounts(accounts, neverMigrate) + require.NoError(t, err) + require.True(t, migratedAccounts == nil || migratedAccounts.Len() == 0) + }) + + t.Run("selective migration predicate", func(t *testing.T) { + + accountsInfo := []accountInfo{ + { + address: address1, + domains: []domainInfo{ + {domain: common.PathDomainStorage.Identifier(), domainStorageMapCount: 10, maxDepth: 3}, + }, + }, + { + address: address2, + domains: []domainInfo{ + {domain: common.PathDomainPublic.Identifier(), domainStorageMapCount: 10, maxDepth: 3}, + }, + }, + } + + ledger, _ := newTestLedgerWithUnmigratedAccounts(t, nil, nil, accountsInfo) + storage := runtime.NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + migrator := runtime.NewDomainRegisterMigration(ledger, storage, inter, nil) + + accounts := &orderedmap.OrderedMap[common.Address, struct{}]{} + accounts.Set(address2, struct{}{}) + accounts.Set(address1, struct{}{}) + + migratedAccounts, err := migrator.MigrateAccounts(accounts, migrateSpecificAccount(address2)) + require.NoError(t, err) + require.NotNil(t, migratedAccounts) + require.Equal(t, 1, migratedAccounts.Len()) + require.Equal(t, address2, migratedAccounts.Oldest().Key) + }) +} + +type domainInfo struct { + domain string + domainStorageMapCount int + maxDepth int +} + +type accountInfo struct { + address common.Address + domains []domainInfo +} + +func newTestLedgerWithUnmigratedAccounts( + tb testing.TB, + onRead LedgerOnRead, + onWrite LedgerOnWrite, + accounts []accountInfo, +) (TestLedger, map[common.Address]accountStorageMapValues) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because DomainStorageMap isn't created through runtime.Storage, so there isn't any + // domain register to match DomainStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + tb, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + random := rand.New(rand.NewSource(42)) + + accountsValues := make(map[common.Address]accountStorageMapValues) + + var expectedDomainRootSlabIDs []atree.SlabID + + for _, account := range accounts { + + address := account.address + + accountValues := make(accountStorageMapValues) + + accountsValues[address] = accountValues + + for _, domainInfo := range account.domains { + + domain := domainInfo.domain + domainStorageMapCount := domainInfo.domainStorageMapCount + maxDepth := domainInfo.maxDepth + + accountValues[domain] = make(domainStorageMapValues) + + // Create domain storage map + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + + // Write domain register + domainStorageMapValueID := domainStorageMap.ValueID() + err := ledger.SetValue(address[:], []byte(domain), domainStorageMapValueID[8:]) + require.NoError(tb, err) + + vid := domainStorageMap.ValueID() + expectedDomainRootSlabIDs = append( + expectedDomainRootSlabIDs, + atree.NewSlabID(atree.Address(address), atree.SlabIndex(vid[8:]))) + + // Write elements to to domain storage map + for len(accountValues[domain]) < domainStorageMapCount { + + key := interpreter.StringStorageMapKey(strconv.Itoa(random.Int())) + + depth := random.Intn(maxDepth + 1) + value := randomCadenceValues(inter, address, depth, random) + + _ = domainStorageMap.WriteValue(inter, key, value) + + accountValues[domain][key] = value + } + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(tb, err) + + CheckAtreeStorageHealth(tb, storage, expectedDomainRootSlabIDs) + + // Create a new storage + newLedger := NewTestLedgerWithData(onRead, onWrite, ledger.StoredValues, ledger.StorageIndices) + + return newLedger, accountsValues +} + +func newTestLedgerWithMigratedAccounts( + tb testing.TB, + onRead LedgerOnRead, + onWrite LedgerOnWrite, + accounts []accountInfo, +) (TestLedger, map[common.Address]accountStorageMapValues) { + ledger := NewTestLedger(nil, nil) + storage := runtime.NewStorage(ledger, nil) + + // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. + // This is because DomainStorageMap isn't created through runtime.Storage, so there isn't any + // domain register to match DomainStorageMap root slab. + const atreeValueValidationEnabled = true + const atreeStorageValidationEnabled = false + inter := NewTestInterpreterWithStorageAndAtreeValidationConfig( + tb, + storage, + atreeValueValidationEnabled, + atreeStorageValidationEnabled, + ) + + random := rand.New(rand.NewSource(42)) + + expectedRootSlabIDs := make([]atree.SlabID, 0, len(accounts)) + + accountsValues := make(map[common.Address]accountStorageMapValues) + + for _, account := range accounts { + + address := account.address + + accountValues := make(accountStorageMapValues) + + accountsValues[address] = accountValues + + accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) + + // Write account register + accountStorageMapSlabIndex := accountStorageMap.SlabID().Index() + err := ledger.SetValue(address[:], []byte(runtime.AccountStorageKey), accountStorageMapSlabIndex[:]) + require.NoError(tb, err) + + expectedRootSlabIDs = append(expectedRootSlabIDs, accountStorageMap.SlabID()) + + for _, domainInfo := range account.domains { + + domain := domainInfo.domain + domainStorageMapCount := domainInfo.domainStorageMapCount + maxDepth := domainInfo.maxDepth + + accountValues[domain] = make(domainStorageMapValues) + + // Create domain storage map + domainStorageMap := accountStorageMap.NewDomain(nil, inter, domain) + + // Write elements to to domain storage map + for len(accountValues[domain]) < domainStorageMapCount { + + key := interpreter.StringStorageMapKey(strconv.Itoa(random.Int())) + + depth := random.Intn(maxDepth + 1) + value := randomCadenceValues(inter, address, depth, random) + + _ = domainStorageMap.WriteValue(inter, key, value) + + accountValues[domain][key] = value + } + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(tb, err) + + CheckAtreeStorageHealth(tb, storage, expectedRootSlabIDs) + + newLedger := NewTestLedgerWithData(onRead, onWrite, ledger.StoredValues, ledger.StorageIndices) + + return newLedger, accountsValues +} + +func randomCadenceValues( + inter *interpreter.Interpreter, + address common.Address, + depth int, + random *rand.Rand, +) interpreter.EquatableValue { + var typeIndex int + if depth == 0 { + typeIndex = random.Intn(typeLargeString + 1) + } else { + typeIndex = random.Intn(maxType) + } + + switch typeIndex { + case typeUint8: + num := random.Intn(math.MaxUint8 + 1) + return interpreter.NewUnmeteredUInt8Value(uint8(num)) + + case typeUint16: + num := random.Intn(math.MaxUint16 + 1) + return interpreter.NewUnmeteredUInt16Value(uint16(num)) + + case typeUint32: + num := random.Uint32() + return interpreter.NewUnmeteredUInt32Value(num) + + case typeUint64: + num := random.Uint64() + return interpreter.NewUnmeteredUInt64Value(num) + + case typeSmallString: + const maxSmallStringLength = 32 + + size := random.Intn(maxSmallStringLength + 1) + + b := make([]byte, size) + random.Read(b) + s := strings.ToValidUTF8(string(b), "$") + return interpreter.NewUnmeteredStringValue(s) + + case typeLargeString: + const minLargeStringLength = 256 + const maxLargeStringLength = 1024 + + size := random.Intn(maxLargeStringLength+1-minLargeStringLength) + minLargeStringLength + + b := make([]byte, size) + random.Read(b) + s := strings.ToValidUTF8(string(b), "$") + return interpreter.NewUnmeteredStringValue(s) + + case typeArray: + const minArrayLength = 1 + const maxArrayLength = 20 + + size := random.Intn(maxArrayLength+1-minArrayLength) + minArrayLength + + arrayType := interpreter.NewVariableSizedStaticType( + nil, + interpreter.PrimitiveStaticTypeAny, + ) + + depth-- + + values := make([]interpreter.Value, size) + for i := range size { + values[i] = randomCadenceValues(inter, common.ZeroAddress, depth, random) + } + + return interpreter.NewArrayValue( + inter, + interpreter.EmptyLocationRange, + arrayType, + address, + values..., + ) + + case typeDictionary: + const minDictLength = 1 + const maxDictLength = 20 + + size := random.Intn(maxDictLength+1-minDictLength) + minDictLength + + dictType := interpreter.NewDictionaryStaticType( + nil, + interpreter.PrimitiveStaticTypeAny, + interpreter.PrimitiveStaticTypeAny, + ) + + depth-- + + keyAndValues := make([]interpreter.Value, 0, size*2) + for i := range size * 2 { + if i%2 == 0 { + // Key (0 depth for element) + keyAndValues = append(keyAndValues, randomCadenceValues(inter, common.ZeroAddress, 0, random)) + } else { + // Value (decremented depth for element) + keyAndValues = append(keyAndValues, randomCadenceValues(inter, common.ZeroAddress, depth, random)) + } + } + + return interpreter.NewDictionaryValueWithAddress(inter, interpreter.EmptyLocationRange, dictType, address, keyAndValues...) + + default: + panic(errors.NewUnreachableError()) + } +} + +const ( + typeUint8 = iota + typeUint16 + typeUint32 + typeUint64 + typeSmallString + typeLargeString + typeArray + typeDictionary + maxType +) diff --git a/runtime/runtime_memory_metering_test.go b/runtime/runtime_memory_metering_test.go index d6d389ff73..669b6152c1 100644 --- a/runtime/runtime_memory_metering_test.go +++ b/runtime/runtime_memory_metering_test.go @@ -871,7 +871,7 @@ func TestRuntimeStorageCommitsMetering(t *testing.T) { ) require.NoError(t, err) - assert.Equal(t, uint64(4), meter.getMemory(common.MemoryKindAtreeEncodedSlab)) + assert.Equal(t, uint64(5), meter.getMemory(common.MemoryKindAtreeEncodedSlab)) }) t.Run("storage used non empty", func(t *testing.T) { @@ -898,7 +898,7 @@ func TestRuntimeStorageCommitsMetering(t *testing.T) { OnGetStorageUsed: func(_ Address) (uint64, error) { // Before the storageUsed function is invoked, the deltas must have been committed. // So the encoded slabs must have been metered at this point. - assert.Equal(t, uint64(4), meter.getMemory(common.MemoryKindAtreeEncodedSlab)) + assert.Equal(t, uint64(5), meter.getMemory(common.MemoryKindAtreeEncodedSlab)) storageUsedInvoked = true return 1, nil }, @@ -918,7 +918,7 @@ func TestRuntimeStorageCommitsMetering(t *testing.T) { require.NoError(t, err) assert.True(t, storageUsedInvoked) - assert.Equal(t, uint64(4), meter.getMemory(common.MemoryKindAtreeEncodedSlab)) + assert.Equal(t, uint64(5), meter.getMemory(common.MemoryKindAtreeEncodedSlab)) }) } @@ -1073,7 +1073,7 @@ func TestRuntimeMeterEncoding(t *testing.T) { ) require.NoError(t, err) - assert.Equal(t, 75, int(meter.getMemory(common.MemoryKindBytes))) + assert.Equal(t, 114, int(meter.getMemory(common.MemoryKindBytes))) }) t.Run("string in loop", func(t *testing.T) { @@ -1122,7 +1122,7 @@ func TestRuntimeMeterEncoding(t *testing.T) { ) require.NoError(t, err) - assert.Equal(t, 61455, int(meter.getMemory(common.MemoryKindBytes))) + assert.Equal(t, 61501, int(meter.getMemory(common.MemoryKindBytes))) }) t.Run("composite", func(t *testing.T) { @@ -1173,6 +1173,6 @@ func TestRuntimeMeterEncoding(t *testing.T) { ) require.NoError(t, err) - assert.Equal(t, 58323, int(meter.getMemory(common.MemoryKindBytes))) + assert.Equal(t, 58369, int(meter.getMemory(common.MemoryKindBytes))) }) } diff --git a/runtime/runtime_test.go b/runtime/runtime_test.go index 91b7bcdc86..4c6cb948fc 100644 --- a/runtime/runtime_test.go +++ b/runtime/runtime_test.go @@ -5780,21 +5780,28 @@ func TestRuntimeContractWriteback(t *testing.T) { assert.Equal(t, []ownerKeyPair{ - // storage index to contract domain storage map + // storage index to account storage map { addressValue[:], - []byte("contract"), + []byte(AccountStorageKey), }, // contract value + // NOTE: contract value is empty because it is inlined in contract domain storage map { addressValue[:], []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, }, // contract domain storage map + // NOTE: contract domain storage map is empty because it is inlined in account storage map { addressValue[:], []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, }, + // account storage map + { + addressValue[:], + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3}, + }, }, writes, ) @@ -5829,8 +5836,10 @@ func TestRuntimeContractWriteback(t *testing.T) { assert.Equal(t, []ownerKeyPair{ - // Storage map is modified because contract value is inlined in contract storage map. - // NOTE: contract value slab doesn't exist. + // Account storage map is modified because: + // - contract value is inlined in contract storage map, and + // - contract storage map is inlined in account storage map. + // NOTE: both contract storage map slab and contract value slab don't exist. { addressValue[:], []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, @@ -5927,10 +5936,10 @@ func TestRuntimeStorageWriteback(t *testing.T) { assert.Equal(t, []ownerKeyPair{ - // storage index to contract domain storage map + // storage index to account storage map { addressValue[:], - []byte("contract"), + []byte(AccountStorageKey), }, // contract value // NOTE: contract value slab is empty because it is inlined in contract domain storage map @@ -5939,10 +5948,16 @@ func TestRuntimeStorageWriteback(t *testing.T) { []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, }, // contract domain storage map + // NOTE: contract domain storage map is empty because it is inlined in account storage map { addressValue[:], []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, }, + // account storage map + { + addressValue[:], + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3}, + }, }, writes, ) @@ -5971,22 +5986,23 @@ func TestRuntimeStorageWriteback(t *testing.T) { assert.Equal(t, []ownerKeyPair{ - // storage index to storage domain storage map + // account storage map + // NOTE: account storage map is updated with new storage domain storage map (inlined). { addressValue[:], - []byte("storage"), + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, }, // resource value // NOTE: resource value slab is empty because it is inlined in storage domain storage map { addressValue[:], - []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3}, + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4}, }, // storage domain storage map - // NOTE: resource value slab is inlined. + // NOTE: storage domain storage map is empty because it is inlined in account storage map. { addressValue[:], - []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4}, + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5}, }, }, writes, @@ -6045,11 +6061,12 @@ func TestRuntimeStorageWriteback(t *testing.T) { assert.Equal(t, []ownerKeyPair{ - // Storage map is modified because resource value is inlined in storage map + // Account storage map is modified because resource value is inlined in storage map, + // and storage map is inlined in account storage map. // NOTE: resource value slab is empty. { addressValue[:], - []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4}, + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, }, }, writes, @@ -7562,7 +7579,7 @@ func TestRuntimeComputationMetring(t *testing.T) { `, ok: true, hits: 3, - intensity: 76, + intensity: 115, }, } diff --git a/runtime/sharedstate_test.go b/runtime/sharedstate_test.go index 3008c85fff..f1d0ad703e 100644 --- a/runtime/sharedstate_test.go +++ b/runtime/sharedstate_test.go @@ -28,6 +28,7 @@ import ( "github.com/onflow/cadence/common" "github.com/onflow/cadence/interpreter" . "github.com/onflow/cadence/runtime" + "github.com/onflow/cadence/stdlib" . "github.com/onflow/cadence/test_utils/runtime_utils" ) @@ -211,14 +212,62 @@ func TestRuntimeSharedState(t *testing.T) { require.Equal(t, []ownerKeyPair{ + // Read account domain register to check if it is a migrated account + // Read returns no value. + { + owner: signerAddress[:], + key: []byte(AccountStorageKey), + }, + // Read contract domain register to check if it is a unmigrated account + // Read returns no value. { owner: signerAddress[:], key: []byte(StorageDomainContract), }, + // Read all available domain registers to check if it is a new account + // Read returns no value. + { + owner: signerAddress[:], + key: []byte(common.PathDomainStorage.Identifier()), + }, + { + owner: signerAddress[:], + key: []byte(common.PathDomainPrivate.Identifier()), + }, + { + owner: signerAddress[:], + key: []byte(common.PathDomainPublic.Identifier()), + }, { owner: signerAddress[:], key: []byte(StorageDomainContract), }, + { + owner: signerAddress[:], + key: []byte(stdlib.InboxStorageDomain), + }, + { + owner: signerAddress[:], + key: []byte(stdlib.CapabilityControllerStorageDomain), + }, + { + owner: signerAddress[:], + key: []byte(stdlib.CapabilityControllerTagStorageDomain), + }, + { + owner: signerAddress[:], + key: []byte(stdlib.PathCapabilityStorageDomain), + }, + { + owner: signerAddress[:], + key: []byte(stdlib.AccountCapabilityStorageDomain), + }, + // Read account domain register + { + owner: signerAddress[:], + key: []byte(AccountStorageKey), + }, + // Read account storage map { owner: signerAddress[:], key: []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, diff --git a/runtime/storage.go b/runtime/storage.go index 7b9a567285..d2559818ad 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -30,17 +30,40 @@ import ( "github.com/onflow/cadence/common/orderedmap" "github.com/onflow/cadence/errors" "github.com/onflow/cadence/interpreter" + "github.com/onflow/cadence/stdlib" ) -const StorageDomainContract = "contract" +const ( + StorageDomainContract = "contract" + AccountStorageKey = "stored" +) type Storage struct { *atree.PersistentSlabStorage - NewStorageMaps *orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex] - storageMaps map[interpreter.StorageKey]*interpreter.StorageMap + + // NewAccountStorageMapSlabIndices contains root slab index of new accounts' storage map. + // The indices are saved using Ledger.SetValue() during Commit(). + // Key is StorageKey{address, accountStorageKey} and value is 8-byte slab index. + NewAccountStorageMapSlabIndices *orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex] + + // unmigratedAccounts are accounts that were accessed but not migrated. + unmigratedAccounts *orderedmap.OrderedMap[common.Address, struct{}] + + // cachedAccountStorageMaps is a cache of account storage maps. + // Key is StorageKey{address, accountStorageKey} and value is account storage map. + cachedAccountStorageMaps map[interpreter.StorageKey]*interpreter.AccountStorageMap + + // cachedDomainStorageMaps is a cache of domain storage maps. + // Key is StorageKey{address, domain} and value is domain storage map. + cachedDomainStorageMaps map[interpreter.StorageKey]*interpreter.DomainStorageMap + + // contractUpdates is a cache of contract updates. + // Key is StorageKey{contract_address, contract_name} and value is contract composite value. contractUpdates *orderedmap.OrderedMap[interpreter.StorageKey, *interpreter.CompositeValue] - Ledger atree.Ledger - memoryGauge common.MemoryGauge + + Ledger atree.Ledger + + memoryGauge common.MemoryGauge } var _ atree.SlabStorage = &Storage{} @@ -76,88 +99,319 @@ func NewStorage(ledger atree.Ledger, memoryGauge common.MemoryGauge) *Storage { decodeTypeInfo, ) return &Storage{ - Ledger: ledger, - PersistentSlabStorage: persistentSlabStorage, - storageMaps: map[interpreter.StorageKey]*interpreter.StorageMap{}, - memoryGauge: memoryGauge, + Ledger: ledger, + PersistentSlabStorage: persistentSlabStorage, + cachedAccountStorageMaps: map[interpreter.StorageKey]*interpreter.AccountStorageMap{}, + cachedDomainStorageMaps: map[interpreter.StorageKey]*interpreter.DomainStorageMap{}, + memoryGauge: memoryGauge, } } const storageIndexLength = 8 +// GetStorageMap returns existing or new domain storage map for the given account and domain. func (s *Storage) GetStorageMap( + inter *interpreter.Interpreter, address common.Address, domain string, createIfNotExists bool, ) ( - storageMap *interpreter.StorageMap, + storageMap *interpreter.DomainStorageMap, ) { - key := interpreter.NewStorageKey(s.memoryGauge, address, domain) - storageMap = s.storageMaps[key] - if storageMap == nil { + // Account can be migrated account, new account, or unmigrated account. + // + // + // ### Migrated Account + // + // Migrated account is account with AccountStorageKey register. + // Migrated account has account storage map, which contains domain storage maps + // with domain as key. + // + // If domain exists in the account storage map, domain storage map is returned. + // + // If domain doesn't exist and createIfNotExists is true, + // new domain storage map is created, inserted into account storage map, and returned. + // + // If domain doesn't exist and createIfNotExists is false, nil is returned. + // + // + // ### New Account + // + // New account is account without AccountStorageKey register and without any domain registers. + // NOTE: new account's AccountStorageKey register is persisted in Commit(). + // + // If createIfNotExists is true, + // - new account storage map is created + // - new domain storage map is created + // - domain storage map is inserted into account storage map + // - domain storage map is returned + // + // If createIfNotExists is false, nil is returned. + // + // + // ### Unmigrated Account + // + // Unmigrated account is account with at least one domain register. + // Unmigrated account has domain registers and corresponding domain storage maps. + // + // If domain exists (domain register exists), domain storage map is loaded and returned. + // + // If domain doesn't exist and createIfNotExists is true, + // new domain storage map is created and returned. + // NOTE: given account would be migrated in Commit() since this is write op. + // + // If domain doesn't exist and createIfNotExists is false, nil is returned. + // + // + // ### Migration of unmigrated accounts + // + // Migration happens in Commit() if unmigrated account has write ops. + // NOTE: Commit() is not called by this function. + // + // Specifically, + // - unmigrated account is migrated in Commit() if there are write ops. + // For example, inserting values in unmigrated account triggers migration in Commit(). + // - unmigrated account is unchanged if there are only read ops. + // For example, iterating values in unmigrated account doesn't trigger migration, + // and checking if domain exists doesn't trigger migration. + + // Get cached domain storage map if it exists. + + domainStorageKey := interpreter.NewStorageKey(s.memoryGauge, address, domain) + + if domainStorageMap := s.cachedDomainStorageMaps[domainStorageKey]; domainStorageMap != nil { + return domainStorageMap + } - // Load data through the runtime interface + // Get (or create) domain storage map from existing account storage map + // if account is migrated account. - var data []byte - var err error - errors.WrapPanic(func() { - data, err = s.Ledger.GetValue(key.Address[:], []byte(key.Key)) - }) - if err != nil { - panic(interpreter.WrappedExternalError(err)) + accountStorageKey := interpreter.NewStorageKey(s.memoryGauge, address, AccountStorageKey) + + accountStorageMap, err := s.getAccountStorageMap(accountStorageKey) + if err != nil { + panic(err) + } + + if accountStorageMap != nil { + // This is migrated account. + + // Get (or create) domain storage map from account storage map. + domainStorageMap := accountStorageMap.GetDomain(s.memoryGauge, inter, domain, createIfNotExists) + + // Cache domain storage map + if domainStorageMap != nil { + s.cachedDomainStorageMaps[domainStorageKey] = domainStorageMap } - dataLength := len(data) - isStorageIndex := dataLength == storageIndexLength - if dataLength > 0 && !isStorageIndex { - // TODO: add dedicated error type? + return domainStorageMap + } + + // At this point, account is either new or unmigrated account. + + domainStorageMap, err := getDomainStorageMapFromLegacyDomainRegister(s.Ledger, s.PersistentSlabStorage, address, domain) + if err != nil { + panic(err) + } + + if domainStorageMap != nil { + // This is a unmigrated account with given domain register. + + // Sanity check that domain is among expected domains. + if _, exist := accountDomainsSet[domain]; !exist { + // TODO: maybe also log unexpected domain. panic(errors.NewUnexpectedError( - "invalid storage index for storage map with domain '%s': expected length %d, got %d", - domain, storageIndexLength, dataLength, + "unexpected domain %s exists for account %s", domain, address.String(), )) } - // Load existing storage or create and store new one + // Cache domain storage map + s.cachedDomainStorageMaps[domainStorageKey] = domainStorageMap - atreeAddress := atree.Address(address) + // Add account to unmigrated account list + s.addUnmigratedAccount(address) - if isStorageIndex { - var slabIndex atree.SlabIndex - copy(slabIndex[:], data[:]) - storageMap = s.loadExistingStorageMap(atreeAddress, slabIndex) - } else if createIfNotExists { - storageMap = s.StoreNewStorageMap(atreeAddress, domain) - } + return domainStorageMap + } - if storageMap != nil { - s.storageMaps[key] = storageMap - } + // At this point, account is either new account or unmigrated account without given domain. + + // Domain doesn't exist. Return early if createIfNotExists is false. + + if !createIfNotExists { + return nil + } + + // Handle unmigrated account + unmigrated, err := s.isUnmigratedAccount(address) + if err != nil { + panic(err) + } + if unmigrated { + // Add account to unmigrated account list + s.addUnmigratedAccount(address) + + // Create new domain storage map + domainStorageMap := interpreter.NewDomainStorageMap(s.memoryGauge, s, atree.Address(address)) + + // Cache new domain storage map + s.cachedDomainStorageMaps[domainStorageKey] = domainStorageMap + + return domainStorageMap + } + + // Handle new account + + // Create account storage map + accountStorageMap = interpreter.NewAccountStorageMap(s.memoryGauge, s, atree.Address(address)) + + // Cache account storage map + s.cachedAccountStorageMaps[accountStorageKey] = accountStorageMap + + // Create new domain storage map as an element in account storage map + domainStorageMap = accountStorageMap.NewDomain(s.memoryGauge, inter, domain) + + // Cache domain storage map + s.cachedDomainStorageMaps[domainStorageKey] = domainStorageMap + + // Save new account and its account storage map root SlabID to new accout list + s.addNewAccount(accountStorageKey, accountStorageMap.SlabID().Index()) + + return domainStorageMap +} + +// getAccountStorageMap returns AccountStorageMap if exists, or nil otherwise. +func (s *Storage) getAccountStorageMap(accountStorageKey interpreter.StorageKey) (*interpreter.AccountStorageMap, error) { + + // Return cached account storage map if available. + + accountStorageMap := s.cachedAccountStorageMaps[accountStorageKey] + if accountStorageMap != nil { + return accountStorageMap, nil + } + + // Load account storage map if account storage register exists. + + accountStorageSlabIndex, accountStorageRegisterExists, err := getSlabIndexFromRegisterValue( + s.Ledger, + accountStorageKey.Address, + []byte(accountStorageKey.Key), + ) + if err != nil { + return nil, err + } + if !accountStorageRegisterExists { + return nil, nil + } + + slabID := atree.NewSlabID( + atree.Address(accountStorageKey.Address), + accountStorageSlabIndex, + ) + + accountStorageMap = interpreter.NewAccountStorageMapWithRootID(s, slabID) + + // Cache account storage map + + s.cachedAccountStorageMaps[accountStorageKey] = accountStorageMap + + return accountStorageMap, nil +} + +// getDomainStorageMapFromLegacyDomainRegister returns domain storage map from legacy domain register. +func getDomainStorageMapFromLegacyDomainRegister( + ledger atree.Ledger, + storage atree.SlabStorage, + address common.Address, + domain string, +) (*interpreter.DomainStorageMap, error) { + domainStorageSlabIndex, domainRegisterExists, err := getSlabIndexFromRegisterValue(ledger, address, []byte(domain)) + if err != nil { + return nil, err + } + if !domainRegisterExists { + return nil, nil } - return storageMap + slabID := atree.NewSlabID(atree.Address(address), domainStorageSlabIndex) + return interpreter.NewDomainStorageMapWithRootID(storage, slabID), nil } -func (s *Storage) loadExistingStorageMap(address atree.Address, slabIndex atree.SlabIndex) *interpreter.StorageMap { +func (s *Storage) addUnmigratedAccount(address common.Address) { + if s.unmigratedAccounts == nil { + s.unmigratedAccounts = &orderedmap.OrderedMap[common.Address, struct{}]{} + } + if !s.unmigratedAccounts.Contains(address) { + s.unmigratedAccounts.Set(address, struct{}{}) + } +} - slabID := atree.NewSlabID(address, slabIndex) +func (s *Storage) addNewAccount(accountStorageKey interpreter.StorageKey, slabIndex atree.SlabIndex) { + if s.NewAccountStorageMapSlabIndices == nil { + s.NewAccountStorageMapSlabIndices = &orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex]{} + } + s.NewAccountStorageMapSlabIndices.Set(accountStorageKey, slabIndex) +} - return interpreter.NewStorageMapWithRootID(s, slabID) +// isUnmigratedAccount returns true if given account has any domain registers. +func (s *Storage) isUnmigratedAccount(address common.Address) (bool, error) { + if s.unmigratedAccounts != nil && + s.unmigratedAccounts.Contains(address) { + return true, nil + } + + // Check most frequently used domains first, such as storage, public, private. + for _, domain := range AccountDomains { + _, domainExists, err := getSlabIndexFromRegisterValue(s.Ledger, address, []byte(domain)) + if err != nil { + return false, err + } + if domainExists { + return true, nil + } + } + + return false, nil } -func (s *Storage) StoreNewStorageMap(address atree.Address, domain string) *interpreter.StorageMap { - storageMap := interpreter.NewStorageMap(s.memoryGauge, s, address) +// getSlabIndexFromRegisterValue returns register value as atree.SlabIndex. +// This function returns error if +// - underlying ledger panics, or +// - underlying ledger returns error when retrieving ledger value, or +// - retrieved ledger value is invalid (for atree.SlabIndex). +func getSlabIndexFromRegisterValue( + ledger atree.Ledger, + address common.Address, + key []byte, +) (atree.SlabIndex, bool, error) { + var data []byte + var err error + errors.WrapPanic(func() { + data, err = ledger.GetValue(address[:], key) + }) + if err != nil { + return atree.SlabIndex{}, false, interpreter.WrappedExternalError(err) + } + + dataLength := len(data) - slabIndex := storageMap.SlabID().Index() + if dataLength == 0 { + return atree.SlabIndex{}, false, nil + } - storageKey := interpreter.NewStorageKey(s.memoryGauge, common.Address(address), domain) + isStorageIndex := dataLength == storageIndexLength + if !isStorageIndex { + // Invalid data in register - if s.NewStorageMaps == nil { - s.NewStorageMaps = &orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex]{} + // TODO: add dedicated error type? + return atree.SlabIndex{}, false, errors.NewUnexpectedError( + "invalid storage index for storage map of account '%x': expected length %d, got %d", + address[:], storageIndexLength, dataLength, + ) } - s.NewStorageMaps.Set(storageKey, slabIndex) - return storageMap + return atree.SlabIndex(data), true, nil } func (s *Storage) recordContractUpdate( @@ -216,7 +470,7 @@ func (s *Storage) writeContractUpdate( key interpreter.StorageKey, contractValue *interpreter.CompositeValue, ) { - storageMap := s.GetStorageMap(key.Address, StorageDomainContract, true) + storageMap := s.GetStorageMap(inter, key.Address, StorageDomainContract, true) // NOTE: pass nil instead of allocating a Value-typed interface that points to nil storageMapKey := interpreter.StringStorageMapKey(key.Key) if contractValue == nil { @@ -249,6 +503,12 @@ func (s *Storage) commit(inter *interpreter.Interpreter, commitContractUpdates b return err } + // Migrate accounts that have write ops before calling PersistentSlabStorage.FastCommit(). + err = s.migrateAccountsIfNeeded(inter) + if err != nil { + return err + } + // Commit the underlying slab storage's writes size := s.PersistentSlabStorage.DeltasSizeWithoutTempAddresses() @@ -270,11 +530,11 @@ func (s *Storage) commit(inter *interpreter.Interpreter, commitContractUpdates b } func (s *Storage) commitNewStorageMaps() error { - if s.NewStorageMaps == nil { + if s.NewAccountStorageMapSlabIndices == nil { return nil } - for pair := s.NewStorageMaps.Oldest(); pair != nil; pair = pair.Next() { + for pair := s.NewAccountStorageMapSlabIndices.Oldest(); pair != nil; pair = pair.Next() { var err error errors.WrapPanic(func() { err = s.Ledger.SetValue( @@ -291,6 +551,68 @@ func (s *Storage) commitNewStorageMaps() error { return nil } +func (s *Storage) migrateAccountsIfNeeded(inter *interpreter.Interpreter) error { + if s.unmigratedAccounts == nil || s.unmigratedAccounts.Len() == 0 { + return nil + } + return s.migrateAccounts(inter) +} + +func (s *Storage) migrateAccounts(inter *interpreter.Interpreter) error { + // Predicate function allows migration for accounts with write ops. + migrateAccountPred := func(address common.Address) bool { + return s.PersistentSlabStorage.HasUnsavedChanges(atree.Address(address)) + } + + // getDomainStorageMap function returns cached domain storage map if it is available + // before loading domain storage map from storage. + // This is necessary to migrate uncommitted (new) but cached domain storage map. + getDomainStorageMap := func( + ledger atree.Ledger, + storage atree.SlabStorage, + address common.Address, + domain string, + ) (*interpreter.DomainStorageMap, error) { + domainStorageKey := interpreter.NewStorageKey(s.memoryGauge, address, domain) + + // Get cached domain storage map if available. + domainStorageMap := s.cachedDomainStorageMaps[domainStorageKey] + + if domainStorageMap != nil { + return domainStorageMap, nil + } + + return getDomainStorageMapFromLegacyDomainRegister(ledger, storage, address, domain) + } + + migrator := NewDomainRegisterMigration(s.Ledger, s.PersistentSlabStorage, inter, s.memoryGauge) + migrator.SetGetDomainStorageMapFunc(getDomainStorageMap) + + migratedAccounts, err := migrator.MigrateAccounts(s.unmigratedAccounts, migrateAccountPred) + if err != nil { + return err + } + + if migratedAccounts == nil { + return nil + } + + // Update internal state with migrated accounts + for pair := migratedAccounts.Oldest(); pair != nil; pair = pair.Next() { + address := pair.Key + accountStorageMap := pair.Value + + // Cache migrated account storage map + accountStorageKey := interpreter.NewStorageKey(s.memoryGauge, address, AccountStorageKey) + s.cachedAccountStorageMaps[accountStorageKey] = accountStorageMap + + // Remove migrated accounts from unmigratedAccounts + s.unmigratedAccounts.Delete(address) + } + + return nil +} + func (s *Storage) CheckHealth() error { // Check slab storage health rootSlabIDs, err := atree.CheckStorageHealth(s, -1) @@ -311,28 +633,51 @@ func (s *Storage) CheckHealth() error { accountRootSlabIDs[rootSlabID] = struct{}{} } - // Check that each storage map refers to an existing slab. - - found := map[atree.SlabID]struct{}{} + // Check that account storage maps and unmigrated domain storage maps + // match returned root slabs from atree.CheckStorageHealth. var storageMapStorageIDs []atree.SlabID - for _, storageMap := range s.storageMaps { //nolint:maprange + // Get cached account storage map slab IDs. + for _, storageMap := range s.cachedAccountStorageMaps { //nolint:maprange storageMapStorageIDs = append( storageMapStorageIDs, storageMap.SlabID(), ) } + // Get cached unmigrated domain storage map slab IDs + for storageKey, storageMap := range s.cachedDomainStorageMaps { //nolint:maprange + address := storageKey.Address + + if s.unmigratedAccounts != nil && + s.unmigratedAccounts.Contains(address) { + + domainValueID := storageMap.ValueID() + + slabID := atree.NewSlabID( + atree.Address(address), + atree.SlabIndex(domainValueID[8:]), + ) + + storageMapStorageIDs = append( + storageMapStorageIDs, + slabID, + ) + } + } + sort.Slice(storageMapStorageIDs, func(i, j int) bool { a := storageMapStorageIDs[i] b := storageMapStorageIDs[j] return a.Compare(b) < 0 }) + found := map[atree.SlabID]struct{}{} + for _, storageMapStorageID := range storageMapStorageIDs { if _, ok := accountRootSlabIDs[storageMapStorageID]; !ok { - return errors.NewUnexpectedError("account storage map points to non-existing slab %s", storageMapStorageID) + return errors.NewUnexpectedError("account storage map (and unmigrated domain storage map) points to non-root slab %s", storageMapStorageID) } found[storageMapStorageID] = struct{}{} @@ -381,3 +726,23 @@ func (UnreferencedRootSlabsError) IsInternalError() {} func (e UnreferencedRootSlabsError) Error() string { return fmt.Sprintf("slabs not referenced: %s", e.UnreferencedRootSlabIDs) } + +var AccountDomains = []string{ + common.PathDomainStorage.Identifier(), + common.PathDomainPrivate.Identifier(), + common.PathDomainPublic.Identifier(), + StorageDomainContract, + stdlib.InboxStorageDomain, + stdlib.CapabilityControllerStorageDomain, + stdlib.CapabilityControllerTagStorageDomain, + stdlib.PathCapabilityStorageDomain, + stdlib.AccountCapabilityStorageDomain, +} + +var accountDomainsSet = func() map[string]struct{} { + m := make(map[string]struct{}) + for _, domain := range AccountDomains { + m[domain] = struct{}{} + } + return m +}() diff --git a/runtime/storage_test.go b/runtime/storage_test.go index 5a7bb60bc9..55090be2be 100644 --- a/runtime/storage_test.go +++ b/runtime/storage_test.go @@ -23,7 +23,10 @@ import ( "encoding/hex" "fmt" "math/rand" + "slices" "sort" + "strconv" + "strings" "testing" "github.com/onflow/atree" @@ -36,6 +39,7 @@ import ( "github.com/onflow/cadence/encoding/json" "github.com/onflow/cadence/interpreter" . "github.com/onflow/cadence/runtime" + "github.com/onflow/cadence/stdlib" . "github.com/onflow/cadence/test_utils/common_utils" . "github.com/onflow/cadence/test_utils/interpreter_utils" . "github.com/onflow/cadence/test_utils/runtime_utils" @@ -67,10 +71,10 @@ func withWritesToStorage( var slabIndex atree.SlabIndex binary.BigEndian.PutUint32(slabIndex[:], randomIndex) - if storage.NewStorageMaps == nil { - storage.NewStorageMaps = &orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex]{} + if storage.NewAccountStorageMapSlabIndices == nil { + storage.NewAccountStorageMapSlabIndices = &orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex]{} } - storage.NewStorageMaps.Set(storageKey, slabIndex) + storage.NewAccountStorageMapSlabIndices.Set(storageKey, slabIndex) } handler(storage, inter) @@ -197,16 +201,22 @@ func TestRuntimeStorageWrite(t *testing.T) { assert.Equal(t, []ownerKeyPair{ - // storage index to storage domain storage map + // storage index to account storage map { []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, - []byte("storage"), + []byte(AccountStorageKey), }, // storage domain storage map + // NOTE: storage domain storage map is empty because it is inlined in account storage map { []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, }, + // account storage map + { + []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, + }, }, writes, ) @@ -1766,18 +1776,18 @@ func TestRuntimeResourceOwnerChange(t *testing.T) { assert.Equal(t, []string{ // account 0x1: - // NOTE: with atree inlining, contract is inlined in contract map - // storage map (domain key + map slab) - // + contract map (domain key + map slab) + // NOTE: with account storage map and atree inlining, + // both storage domain storage map (with inlined storage data) + // and contract domain storage map (with inlined contract data) + // are inlined in account storage map. "\x00\x00\x00\x00\x00\x00\x00\x01|$\x00\x00\x00\x00\x00\x00\x00\x02", - "\x00\x00\x00\x00\x00\x00\x00\x01|$\x00\x00\x00\x00\x00\x00\x00\x04", - "\x00\x00\x00\x00\x00\x00\x00\x01|contract", - "\x00\x00\x00\x00\x00\x00\x00\x01|storage", + "\x00\x00\x00\x00\x00\x00\x00\x01|stored", // account 0x2 - // NOTE: with atree inlining, resource is inlined in storage map - // storage map (domain key + map slab) + // NOTE: with account storage map and atree inlining, + // storage domain storage map (with inlined resource) + // is inlined in account storage map. "\x00\x00\x00\x00\x00\x00\x00\x02|$\x00\x00\x00\x00\x00\x00\x00\x02", - "\x00\x00\x00\x00\x00\x00\x00\x02|storage", + "\x00\x00\x00\x00\x00\x00\x00\x02|stored", }, nonEmptyKeys, ) @@ -3100,7 +3110,7 @@ func TestRuntimeStorageInternalAccess(t *testing.T) { }) require.NoError(t, err) - storageMap := storage.GetStorageMap(address, common.PathDomainStorage.Identifier(), false) + storageMap := storage.GetStorageMap(inter, address, common.PathDomainStorage.Identifier(), false) require.NotNil(t, storageMap) // Read first @@ -6229,3 +6239,1788 @@ func TestRuntimeStorageReferenceAccess(t *testing.T) { require.ErrorAs(t, err, &interpreter.DereferenceError{}) }) } + +type ( + domainStorageMapValues map[interpreter.StorageMapKey]interpreter.Value + accountStorageMapValues map[string]domainStorageMapValues +) + +func TestRuntimeStorageForNewAccount(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + // This test reads non-existent domain storage map and commit changes. + // pre-condition: empty storage + // post-condition: empty storage + // migration: no migration + t.Run("read non-existent domain storage map", func(t *testing.T) { + + var writeCount int + + // Create empty storage + ledger := NewTestLedger(nil, LedgerOnWriteCounter(&writeCount)) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + domain := common.PathDomainStorage.Identifier() + + // Get non-existent domain storage map + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.Nil(t, domainStorageMap) + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health + err = storage.CheckHealth() + require.NoError(t, err) + + // Check number of writes to underlying storage + require.Equal(t, 0, writeCount) + }) + + // This test creates and writes to new domain storage map and commit changes. + // pre-condition: empty storage + // post-condition: storage containing + // - account register + // - account storage map + // - zero or more non-inlined domain storage map + // migration: no migraiton for new account. + createDomainTestCases := []struct { + name string + newDomains []string + domainStorageMapCount int + inlined bool + }{ + {name: "empty domain storage map", newDomains: []string{common.PathDomainStorage.Identifier()}, domainStorageMapCount: 0, inlined: true}, + {name: "small domain storage map", newDomains: []string{common.PathDomainStorage.Identifier()}, domainStorageMapCount: 10, inlined: true}, + {name: "large domain storage map", newDomains: []string{common.PathDomainStorage.Identifier()}, domainStorageMapCount: 20, inlined: false}, + } + + for _, tc := range createDomainTestCases { + t.Run("create "+tc.name, func(t *testing.T) { + + var writeEntries []OwnerKeyValue + + // Create empty storage + ledger := NewTestLedger(nil, LedgerOnWriteEntries(&writeEntries)) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + random := rand.New(rand.NewSource(42)) + + accountValues := make(accountStorageMapValues) + + // Create and write to domain storage map (createIfNotExists is true) + for _, domain := range tc.newDomains { + // Create new domain storage map + const createIfNotExists = true + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + // Write to domain storage map + accountValues[domain] = writeToDomainStorageMap(inter, domainStorageMap, tc.domainStorageMapCount, random) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Check writes to underlying storage + require.Equal(t, 2+len(tc.newDomains), len(writeEntries)) + + // writes[0]: account register + require.Equal(t, address[:], writeEntries[0].Owner) + require.Equal(t, []byte(AccountStorageKey), writeEntries[0].Key) + require.Equal(t, []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, writeEntries[0].Value) + + // writes[1]: account storage map + require.Equal(t, address[:], writeEntries[1].Owner) + require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, writeEntries[1].Key) + require.True(t, len(writeEntries[1].Value) > 0) + + for i := range len(tc.newDomains) { + // writes[2+i]: domain storage map + + writeEntryIndex := 2 + i + owner := writeEntries[writeEntryIndex].Owner + key := writeEntries[writeEntryIndex].Key + value := writeEntries[writeEntryIndex].Value + + var slabKey [9]byte + slabKey[0] = '$' + binary.BigEndian.PutUint64(slabKey[1:], uint64(2+i)) + + require.Equal(t, address[:], owner) + require.Equal(t, slabKey[:], key) + + // Domain storage map value is empty if it is inlined in account storage map + if tc.inlined { + require.True(t, len(value) == 0) + } else { + require.True(t, len(value) > 0) + } + } + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + }) + } + + // This test tests storage map operations with intermittent Commit(): + // - create domain storage map and commit + // - write to domain storage map and commit + // - remove all elements from domain storage map and commit + // - read domain storage map and commit + t.Run("create, commit, write, commit, remove, commit", func(t *testing.T) { + // Create empty storage + ledger := NewTestLedger(nil, nil) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + random := rand.New(rand.NewSource(42)) + + accountValues := make(accountStorageMapValues) + + domains := []string{ + common.PathDomainStorage.Identifier(), + common.PathDomainPublic.Identifier(), + } + + // Create empty domain storage map and commit + { + for _, domain := range domains { + const createIfNotExists = true + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + accountValues[domain] = make(domainStorageMapValues) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + + // Write to existing domain storage map and commit + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + // Write to domain storage map + const domainStorageMapCount = 2 + accountValues[domain] = writeToDomainStorageMap(inter, domainStorageMap, domainStorageMapCount, random) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + + // Remove all elements from existing domain storage map and commit + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + expectedDomainValues := accountValues[domain] + require.Equal(t, uint64(len(expectedDomainValues)), domainStorageMap.Count()) + + // Remove elements from domain storage map + for k := range expectedDomainValues { + existed := domainStorageMap.WriteValue(inter, k, nil) + require.True(t, existed) + + delete(expectedDomainValues, k) + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + + // Read domain storage map and commit + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + }) +} + +func TestRuntimeStorageForMigratedAccount(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + // newTestLedgerWithMigratedAccount creates a new TestLedger containing + // account storage map with given domains for given address. + newTestLedgerWithMigratedAccount := func( + onRead LedgerOnRead, + onWrite LedgerOnWrite, + address common.Address, + domains []string, + domainStorageMapCount int, + ) (TestLedger, accountStorageMapValues) { + ledger := NewTestLedger(nil, nil) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + random := rand.New(rand.NewSource(42)) + + accountValues := createAndWriteAccountStorageMap(t, storage, inter, address, domains, domainStorageMapCount, random) + + newLedger := NewTestLedgerWithData(onRead, onWrite, ledger.StoredValues, ledger.StorageIndices) + + return newLedger, accountValues + } + + // This test reads non-existent domain storage map and commit changes. + // pre-condition: storage contains account register and account storage map + // post-condition: no change + // migration: none + t.Run("read non-existent domain storage map", func(t *testing.T) { + existingDomains := []string{ + common.PathDomainStorage.Identifier(), + } + + nonexistentDomain := common.PathDomainPublic.Identifier() + + var writeCount int + + // Create storage with account storage map + const domainStorageMapCount = 5 + ledger, _ := newTestLedgerWithMigratedAccount( + nil, + LedgerOnWriteCounter(&writeCount), + address, + existingDomains, + domainStorageMapCount) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + // Get non-existent domain storage map + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, nonexistentDomain, createIfNotExists) + require.Nil(t, domainStorageMap) + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check writes to underlying storage + require.Equal(t, 0, writeCount) + }) + + // This test reads existing domain storage map and commit changes. + // pre-condition: storage contains account register and account storage map + // post-condition: no change + // migration: none + readExistingDomainTestCases := []struct { + name string + createIfNotExists bool + }{ + {name: "(createIfNotExists is true)", createIfNotExists: true}, + {name: "(createIfNotExists is false)", createIfNotExists: false}, + } + + for _, tc := range readExistingDomainTestCases { + t.Run("read existing domain storage map "+tc.name, func(t *testing.T) { + + existingDomains := []string{common.PathDomainStorage.Identifier()} + + var writeCount int + + // Create storage with account storage map + const domainStorageMapCount = 5 + ledger, accountValues := newTestLedgerWithMigratedAccount( + nil, + LedgerOnWriteCounter(&writeCount), + address, + existingDomains, + domainStorageMapCount, + ) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + // Read existing domain storage map + for domain, domainValues := range accountValues { + domainStorageMap := storage.GetStorageMap(inter, address, domain, tc.createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + for k, expectedV := range domainValues { + v := domainStorageMap.ReadValue(nil, k) + ev, ok := v.(interpreter.EquatableValue) + require.True(t, ok) + require.True(t, ev.Equal(inter, interpreter.EmptyLocationRange, expectedV)) + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Check writes to underlying storage + require.Equal(t, 0, writeCount) + }) + } + + // This test creates and writes to new domain storage map and commit changes. + // pre-condition: storage contains account register and account storage map + // post-condition: storage contains + // - account register + // - account storage map with new domain storage map. + createDomainTestCases := []struct { + name string + existingDomains []string + newDomains []string + existingDomainStorageMapCount int + newDomainStorageMapCount int + isNewDomainStorageMapInlined bool + }{ + { + name: "empty domain storage map", + existingDomains: []string{common.PathDomainStorage.Identifier()}, + existingDomainStorageMapCount: 5, + newDomains: []string{common.PathDomainPublic.Identifier()}, + newDomainStorageMapCount: 0, + isNewDomainStorageMapInlined: true, + }, + { + name: "small domain storage map", + existingDomains: []string{common.PathDomainStorage.Identifier()}, + existingDomainStorageMapCount: 5, + newDomains: []string{common.PathDomainPublic.Identifier()}, + newDomainStorageMapCount: 10, + isNewDomainStorageMapInlined: true, + }, + { + name: "large domain storage map", + existingDomains: []string{common.PathDomainStorage.Identifier()}, + existingDomainStorageMapCount: 5, + newDomains: []string{common.PathDomainPublic.Identifier()}, + newDomainStorageMapCount: 20, + isNewDomainStorageMapInlined: false, + }, + } + + for _, tc := range createDomainTestCases { + t.Run("create and write "+tc.name, func(t *testing.T) { + + var writeEntries []OwnerKeyValue + + // Create storage with existing account storage map + ledger, accountValues := newTestLedgerWithMigratedAccount( + nil, + LedgerOnWriteEntries(&writeEntries), + address, + tc.existingDomains, + tc.existingDomainStorageMapCount, + ) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + lastIndex := ledger.StorageIndices[string(address[:])] + + random := rand.New(rand.NewSource(42)) + + // Create and write to domain storage map (createIfNotExists is true) + for _, domain := range tc.newDomains { + const createIfNotExists = true + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + // Write elements to to domain storage map + accountValues[domain] = writeToDomainStorageMap(inter, domainStorageMap, tc.newDomainStorageMapCount, random) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Check writes to underlying storage + require.Equal(t, 1+len(tc.newDomains), len(writeEntries)) + + // writes[0]: account storage map + // account storage map is updated to include new domains. + require.Equal(t, address[:], writeEntries[0].Owner) + require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, writeEntries[0].Key) + require.True(t, len(writeEntries[0].Value) > 0) + + for i := range len(tc.newDomains) { + // writes[1+i]: domain storage map + // domain storage map value is empty if it is inlined in account storage map + + writeEntryIndex := 1 + i + owner := writeEntries[writeEntryIndex].Owner + key := writeEntries[writeEntryIndex].Key + value := writeEntries[writeEntryIndex].Value + + var slabKey [9]byte + slabKey[0] = '$' + binary.BigEndian.PutUint64(slabKey[1:], lastIndex+1+uint64(i)) + + require.Equal(t, address[:], owner) + require.Equal(t, slabKey[:], key) + + if tc.isNewDomainStorageMapInlined { + require.True(t, len(value) == 0) + } else { + require.True(t, len(value) > 0) + } + } + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + }) + } + + // This test reads and writes to existing domain storage map and commit changes. + // pre-condition: storage contains account register and account storage map + // post-condition: storage contains + // - account register + // - account storage map with updated domain storage map. + t.Run("read and write to existing domain storage map", func(t *testing.T) { + + var writeEntries []OwnerKeyValue + + existingDomains := []string{common.PathDomainStorage.Identifier()} + const existingDomainStorageMapCount = 5 + + // Create storage with account storage map + ledger, accountValues := newTestLedgerWithMigratedAccount( + nil, + LedgerOnWriteEntries(&writeEntries), + address, + existingDomains, + existingDomainStorageMapCount, + ) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + random := rand.New(rand.NewSource(42)) + + // Write to existing domain storage map (createIfNotExists is false) + for _, domain := range existingDomains { + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + domainValues := accountValues[domain] + + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + domainKeys := make([]interpreter.StorageMapKey, 0, len(domainValues)) + for k := range domainValues { //nolint:maprange + domainKeys = append(domainKeys, k) + } + + // Update or remove existing elements + for i, k := range domainKeys { + if i%2 == 0 { + n := random.Int() + newValue := interpreter.NewUnmeteredIntValueFromInt64(int64(n)) + + // Update existing element + existed := domainStorageMap.WriteValue(inter, k, newValue) + require.True(t, existed) + + domainValues[k] = newValue + } else { + // Remove existing element + existed := domainStorageMap.WriteValue(inter, k, nil) + require.True(t, existed) + + delete(domainValues, k) + } + } + + // Write new elements + const newElementCount = 2 + newDomainValues := writeToDomainStorageMap(inter, domainStorageMap, newElementCount, random) + + for k, v := range newDomainValues { + domainValues[k] = v + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Check writes to underlying storage + require.Equal(t, 1, len(writeEntries)) + + // writes[0]: account storage map + // account storage map is updated because inlined domain storage map is updated. + require.Equal(t, address[:], writeEntries[0].Owner) + require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, writeEntries[0].Key) + require.True(t, len(writeEntries[0].Value) > 0) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + }) + + // This test tests storage map operations with intermittent Commit(): + // - read domain storage map and commit + // - write to domain storage map and commit + // - remove all elements from domain storage map and commit + // - read domain storage map and commit + t.Run("read, commit, update, commit, remove, commit", func(t *testing.T) { + + domains := []string{ + common.PathDomainStorage.Identifier(), + common.PathDomainPublic.Identifier(), + } + const domainStorageMapCount = 5 + + // Create storage with existing account storage map + ledger, accountValues := newTestLedgerWithMigratedAccount( + nil, + nil, + address, + domains, + domainStorageMapCount, + ) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + random := rand.New(rand.NewSource(42)) + + // Read domain storage map and commit + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + domainValues := accountValues[domain] + + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + for k, expectedValue := range domainValues { + v := domainStorageMap.ReadValue(nil, k) + ev := v.(interpreter.EquatableValue) + require.True(t, ev.Equal(inter, interpreter.EmptyLocationRange, expectedValue)) + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + + // Write to existing domain storage map and commit + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + domainValues := accountValues[domain] + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + // Write to domain storage map + const domainStorageMapCount = 2 + newDomainValues := writeToDomainStorageMap(inter, domainStorageMap, domainStorageMapCount, random) + for k, v := range newDomainValues { + domainValues[k] = v + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + + // Remove all elements from existing domain storage map and commit + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + expectedDomainValues := accountValues[domain] + require.Equal(t, uint64(len(expectedDomainValues)), domainStorageMap.Count()) + + // Remove elements from domain storage map + for k := range expectedDomainValues { + existed := domainStorageMap.WriteValue(inter, k, nil) + require.True(t, existed) + + delete(expectedDomainValues, k) + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + + // Read domain storage map + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + }) +} + +func TestRuntimeStorageForUnmigratedAccount(t *testing.T) { + + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + newTestLedgerWithUnmigratedAccount := func( + onRead LedgerOnRead, + onWrite LedgerOnWrite, + address common.Address, + domains []string, + domainStorageMapCount int, + ) (TestLedger, accountStorageMapValues) { + ledger := NewTestLedger(nil, nil) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreter(t) + + accountValues := make(accountStorageMapValues) + + random := rand.New(rand.NewSource(42)) + + for _, domain := range domains { + accountValues[domain] = make(domainStorageMapValues) + + // Create domain storage map + domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) + + // Write domain register + domainStorageMapValueID := domainStorageMap.ValueID() + err := ledger.SetValue(address[:], []byte(domain), domainStorageMapValueID[8:]) + require.NoError(t, err) + + // Write elements to to domain storage map + for len(accountValues[domain]) < domainStorageMapCount { + n := random.Int() + key := interpreter.StringStorageMapKey(strconv.Itoa(n)) + value := interpreter.NewUnmeteredIntValueFromInt64(int64(n)) + + _ = domainStorageMap.WriteValue(inter, key, value) + + accountValues[domain][key] = value + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Create a new storage + newLedger := NewTestLedgerWithData(onRead, onWrite, ledger.StoredValues, ledger.StorageIndices) + + return newLedger, accountValues + } + + // This test reads non-existent domain storage map and commit changes. + // pre-condition: storage contains domain register and domain storage map + // post-condition: no change + // migration: none because only read ops. + t.Run("read non-existent domain storage map", func(t *testing.T) { + existingDomains := []string{ + common.PathDomainStorage.Identifier(), + } + + var writeCount int + + // Create storage with unmigrated accounts + const domainStorageMapCount = 5 + ledger, _ := newTestLedgerWithUnmigratedAccount( + nil, + LedgerOnWriteCounter(&writeCount), + address, + existingDomains, + domainStorageMapCount) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + // Get non-existent domain storage map + const createIfNotExists = false + nonexistingDomain := common.PathDomainPublic.Identifier() + domainStorageMap := storage.GetStorageMap(inter, address, nonexistingDomain, createIfNotExists) + require.Nil(t, domainStorageMap) + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check there are no writes to underlying storage + require.Equal(t, 0, writeCount) + }) + + // This test reads existing domain storage map and commit changes. + // pre-condition: storage contains domain register and domain storage map + // post-condition: no change + // migration: none because only read ops + readExistingDomainTestCases := []struct { + name string + createIfNotExists bool + }{ + {name: "(createIfNotExists is true)", createIfNotExists: true}, + {name: "(createIfNotExists is false)", createIfNotExists: false}, + } + + for _, tc := range readExistingDomainTestCases { + t.Run("read existing domain storage map "+tc.name, func(t *testing.T) { + + var writeCount int + + existingDomains := []string{common.PathDomainStorage.Identifier()} + const existingDomainStorageMapCount = 5 + + // Create storage with existing domain storage map + ledger, accountValues := newTestLedgerWithUnmigratedAccount( + nil, + LedgerOnWriteCounter(&writeCount), + address, + existingDomains, + existingDomainStorageMapCount, + ) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + // Read existing domain storage map + for domain, domainValues := range accountValues { + domainStorageMap := storage.GetStorageMap(inter, address, domain, tc.createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + // Read elements to to domain storage map + for k, expectedV := range domainValues { + v := domainStorageMap.ReadValue(nil, k) + ev, ok := v.(interpreter.EquatableValue) + require.True(t, ok) + require.True(t, ev.Equal(inter, interpreter.EmptyLocationRange, expectedV)) + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Check writes to underlying storage + require.Equal(t, 0, writeCount) + }) + } + + // This test creates and writes to new domain storage map and commit changes. + // pre-condition: storage contains + // - domain register + // - domain storage map + // post-condition: storage contains + // - account register + // - account storage map with existing and new domain storage map. + // migration: yes + createDomainTestCases := []struct { + name string + existingDomains []string + newDomains []string + existingDomainStorageMapCount int + newDomainStorageMapCount int + isNewDomainStorageMapInlined bool + }{ + { + name: "empty domain storage map", + existingDomains: []string{common.PathDomainStorage.Identifier()}, + existingDomainStorageMapCount: 5, + newDomains: []string{common.PathDomainPublic.Identifier()}, + newDomainStorageMapCount: 0, + isNewDomainStorageMapInlined: true, + }, + { + name: "small domain storage map", + existingDomains: []string{common.PathDomainStorage.Identifier()}, + existingDomainStorageMapCount: 5, + newDomains: []string{common.PathDomainPublic.Identifier()}, + newDomainStorageMapCount: 10, + isNewDomainStorageMapInlined: true, + }, + { + name: "large domain storage map", + existingDomains: []string{common.PathDomainStorage.Identifier()}, + existingDomainStorageMapCount: 5, + newDomains: []string{common.PathDomainPublic.Identifier()}, + newDomainStorageMapCount: 20, + isNewDomainStorageMapInlined: false, + }, + } + + for _, tc := range createDomainTestCases { + t.Run("create and write "+tc.name, func(t *testing.T) { + + var writeEntries []OwnerKeyValue + + // Create storage with existing account storage map + ledger, accountValues := newTestLedgerWithUnmigratedAccount( + nil, + LedgerOnWriteEntries(&writeEntries), + address, + tc.existingDomains, + tc.existingDomainStorageMapCount, + ) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + random := rand.New(rand.NewSource(42)) + + // Create and write to new domain storage map + for _, domain := range tc.newDomains { + const createIfNotExists = true + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + // Write elements to to domain storage map + accountValues[domain] = writeToDomainStorageMap(inter, domainStorageMap, tc.newDomainStorageMapCount, random) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Check writes to underlying storage + // writes include: + // - empty registers for all existing and new domains + // - 1 account register + // - 1 account storage map register + // - other non-inlined domain storage map + require.True(t, len(writeEntries) > 1+len(tc.existingDomains)+len(tc.newDomains)) + + i := 0 + for _, domain := range AccountDomains { + + if slices.Contains(tc.existingDomains, domain) || + slices.Contains(tc.newDomains, domain) { + + // Existing and new domain registers are removed. + // Removing new (non-existent) domain registers is no-op. + require.Equal(t, address[:], writeEntries[i].Owner) + require.Equal(t, []byte(domain), writeEntries[i].Key) + require.True(t, len(writeEntries[i].Value) == 0) + + i++ + } + } + + // Account register is created + require.Equal(t, address[:], writeEntries[i].Owner) + require.Equal(t, []byte(AccountStorageKey), writeEntries[i].Key) + require.True(t, len(writeEntries[i].Value) > 0) + + i++ + + // Remaining writes are atree slabs (either empty for migrated domain storage map or non-empty for account storage map) + for ; i < len(writeEntries); i++ { + require.Equal(t, address[:], writeEntries[i].Owner) + require.Equal(t, byte('$'), writeEntries[i].Key[0]) + } + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + }) + } + + // This test reads and writes to existing domain storage map and commit changes. + // pre-condition: storage contains + // - domain register + // - domain storage map + // post-condition: storage contains + // - account register + // - account storage map with updated domain storage map. + // migration: yes + t.Run("read and write to existing domain storage map", func(t *testing.T) { + + var writeEntries []OwnerKeyValue + + domains := []string{common.PathDomainStorage.Identifier()} + const existingDomainStorageMapCount = 5 + + // Create storage with existing domain storage maps + ledger, accountValues := newTestLedgerWithUnmigratedAccount( + nil, + LedgerOnWriteEntries(&writeEntries), + address, + domains, + existingDomainStorageMapCount, + ) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + random := rand.New(rand.NewSource(42)) + + // write to existing domain storage map (createIfNotExists is false) + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + domainValues := accountValues[domain] + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + domainKeys := make([]interpreter.StorageMapKey, 0, len(domainValues)) + for k := range domainValues { //nolint:maprange + domainKeys = append(domainKeys, k) + } + + // Update or remove elements + for i, k := range domainKeys { + if i%2 == 0 { + n := random.Int() + newValue := interpreter.NewUnmeteredIntValueFromInt64(int64(n)) + + // Update existing element + existed := domainStorageMap.WriteValue(inter, k, newValue) + require.True(t, existed) + + domainValues[k] = newValue + } else { + // Remove existing element + existed := domainStorageMap.WriteValue(inter, k, nil) + require.True(t, existed) + + delete(domainValues, k) + } + } + + // Write new elements + const newElementCount = 2 + newDomainValues := writeToDomainStorageMap(inter, domainStorageMap, newElementCount, random) + + for k, v := range newDomainValues { + domainValues[k] = v + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Check writes to underlying storage + require.Equal(t, 4, len(writeEntries)) + + // writes[0]: domain register + // storage domain register is removed + require.Equal(t, address[:], writeEntries[0].Owner) + require.Equal(t, []byte(common.PathDomainStorage.Identifier()), writeEntries[0].Key) + require.True(t, len(writeEntries[0].Value) == 0) + + // writes[1]: account register + // account register is created + require.Equal(t, address[:], writeEntries[1].Owner) + require.Equal(t, []byte(AccountStorageKey), writeEntries[1].Key) + require.True(t, len(writeEntries[1].Value) > 0) + + // writes[2]: storage domain storage map + // storage domain storage map is removed because it is inlined in account storage map. + require.Equal(t, address[:], writeEntries[2].Owner) + require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, writeEntries[2].Key) + require.True(t, len(writeEntries[2].Value) == 0) + + // writes[3]: account storage map + // account storage map is created with inlined domain storage map. + require.Equal(t, address[:], writeEntries[3].Owner) + require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, writeEntries[3].Key) + require.True(t, len(writeEntries[3].Value) > 0) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + }) + + // This test test storage map operations (including account migration) with intermittent Commit() + // - read domain storage map and commit + // - write to domain storage map and commit (including account migration) + // - remove all elements from domain storage map and commit + // - read domain storage map and commit + t.Run("read, commit, update, commit, remove, commit", func(t *testing.T) { + + var writeEntries []OwnerKeyValue + + domains := []string{common.PathDomainStorage.Identifier()} + const domainStorageMapCount = 5 + + // Create storage with existing account storage map + ledger, accountValues := newTestLedgerWithUnmigratedAccount( + nil, + LedgerOnWriteEntries(&writeEntries), + address, + domains, + domainStorageMapCount, + ) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + random := rand.New(rand.NewSource(42)) + + // Read domain storage map and commit + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + domainValues := accountValues[domain] + + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + for k, expectedValue := range domainValues { + v := domainStorageMap.ReadValue(nil, k) + ev := v.(interpreter.EquatableValue) + require.True(t, ev.Equal(inter, interpreter.EmptyLocationRange, expectedValue)) + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + } + + // Update domain storage map, and commit changes (account is migrated during commmit) + { + // update existing domain storage map (loaded from storage) + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + domainValues := accountValues[domain] + + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + domainKeys := make([]interpreter.StorageMapKey, 0, len(domainValues)) + for k := range domainValues { //nolint:maprange + domainKeys = append(domainKeys, k) + } + + // Update elements + for _, k := range domainKeys { + n := random.Int() + newValue := interpreter.NewUnmeteredIntValueFromInt64(int64(n)) + + // Update existing element + existed := domainStorageMap.WriteValue(inter, k, newValue) + require.True(t, existed) + + domainValues[k] = newValue + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Check writes to underlying storage + require.Equal(t, 4, len(writeEntries)) + + // writes[0]: storage domain register + // Storage domain register is removed + require.Equal(t, address[:], writeEntries[0].Owner) + require.Equal(t, []byte(common.PathDomainStorage.Identifier()), writeEntries[0].Key) + require.True(t, len(writeEntries[0].Value) == 0) + + // writes[1]: account register + // Account register is created + require.Equal(t, address[:], writeEntries[1].Owner) + require.Equal(t, []byte(AccountStorageKey), writeEntries[1].Key) + require.True(t, len(writeEntries[1].Value) > 0) + + // writes[2]: storage domain storage map + // storage domain storage map is removed because it is inlined in account storage map. + require.Equal(t, address[:], writeEntries[2].Owner) + require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, writeEntries[2].Key) + require.True(t, len(writeEntries[2].Value) == 0) + + // writes[3]: account storage map + // account storage map is created with inlined domain storage map. + require.Equal(t, address[:], writeEntries[3].Owner) + require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, writeEntries[3].Key) + require.True(t, len(writeEntries[3].Value) > 0) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + + writeEntries = nil + } + + // Remove all elements from domain storage map, and commit changes + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + domainValues := accountValues[domain] + + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + domainKeys := make([]interpreter.StorageMapKey, 0, len(domainValues)) + for k := range domainValues { //nolint:maprange + domainKeys = append(domainKeys, k) + } + + // Remove elements + for _, k := range domainKeys { + + // Update existing element + existed := domainStorageMap.WriteValue(inter, k, nil) + require.True(t, existed) + + delete(domainValues, k) + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Check writes to underlying storage + require.Equal(t, 1, len(writeEntries)) + + // writes[0]: account storage map + // account storage map is modified because inlined domain storage map is modified. + require.Equal(t, address[:], writeEntries[0].Owner) + require.Equal(t, []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, writeEntries[0].Key) + require.True(t, len(writeEntries[0].Value) > 0) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + + // Read domain storage map and commit + { + for _, domain := range domains { + const createIfNotExists = false + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + domainValues := accountValues[domain] + + require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) + + for k, expectedValue := range domainValues { + v := domainStorageMap.ReadValue(nil, k) + ev := v.(interpreter.EquatableValue) + require.True(t, ev.Equal(inter, interpreter.EmptyLocationRange, expectedValue)) + } + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) + } + }) +} + +// TestRuntimeStorageDomainStorageMapInlinedState tests inlined state +// of domain storage map when large number of elements are inserted, +// updated, and removed from domain storage map. +// Initially domain storage map is inlined in account storage map, it +// becomes un-inlined when large number elements are inserted, and then +// inlined again when all elements are removed. +func TestRuntimeStorageDomainStorageMapInlinedState(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + address := common.MustBytesToAddress([]byte{0x1}) + + // Create empty storage + ledger := NewTestLedger(nil, nil) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + domains := []string{ + common.PathDomainStorage.Identifier(), + common.PathDomainPublic.Identifier(), + common.PathDomainPrivate.Identifier(), + } + + const domainStorageMapCount = 500 + + accountValues := make(accountStorageMapValues) + + for _, domain := range domains { + + // Create domain storage map + const createIfNotExists = true + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.True(t, domainStorageMap.Inlined()) + + valueID := domainStorageMap.ValueID() + + accountValues[domain] = make(domainStorageMapValues) + + domainValues := accountValues[domain] + + // Insert new values to domain storage map + for domainStorageMap.Count() < domainStorageMapCount { + n := random.Int() + key := interpreter.StringStorageMapKey(strconv.Itoa(n)) + if _, exists := domainValues[key]; exists { + continue + } + value := interpreter.NewUnmeteredIntValueFromInt64(int64(n)) + + existed := domainStorageMap.WriteValue(inter, key, value) + require.False(t, existed) + + domainValues[key] = value + } + + require.Equal(t, uint64(domainStorageMapCount), domainStorageMap.Count()) + require.Equal(t, valueID, domainStorageMap.ValueID()) + require.False(t, domainStorageMap.Inlined()) + + // Check storage health + err := storage.CheckHealth() + require.NoError(t, err) + + // Overwrite values in domain storage map + for key := range domainValues { + n := random.Int() + value := interpreter.NewUnmeteredIntValueFromInt64(int64(n)) + + existed := domainStorageMap.WriteValue(inter, key, value) + require.True(t, existed) + + domainValues[key] = value + } + + require.Equal(t, uint64(domainStorageMapCount), domainStorageMap.Count()) + require.Equal(t, valueID, domainStorageMap.ValueID()) + require.False(t, domainStorageMap.Inlined()) + + // Check storage health + err = storage.CheckHealth() + require.NoError(t, err) + + // Remove all values in domain storage map + for key := range domainValues { + existed := domainStorageMap.WriteValue(inter, key, nil) + require.True(t, existed) + + delete(domainValues, key) + } + + require.Equal(t, uint64(0), domainStorageMap.Count()) + require.Equal(t, valueID, domainStorageMap.ValueID()) + require.True(t, domainStorageMap.Inlined()) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health + err = storage.CheckHealth() + require.NoError(t, err) + + // There should be 2 non-empty registers in ledger after commits: + // - account register (key is "stored") + // - account storage map (atree slab) + nonEmptyRegisters := make(map[string][]byte) + for k, v := range ledger.StoredValues { + if len(v) > 0 { + nonEmptyRegisters[k] = v + } + } + require.Equal(t, 2, len(nonEmptyRegisters)) + + accountRegisterValue, accountRegisterExists := nonEmptyRegisters[string(address[:])+"|"+AccountStorageKey] + require.True(t, accountRegisterExists) + require.Equal(t, 8, len(accountRegisterValue)) + + _, accountStorageMapRegisterExists := nonEmptyRegisters[string(address[:])+"|$"+string(accountRegisterValue)] + require.True(t, accountStorageMapRegisterExists) + + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) +} + +// TestRuntimeStorageLargeDomainValues tests large values +// in domain storage map. +func TestRuntimeStorageLargeDomainValues(t *testing.T) { + random := rand.New(rand.NewSource(42)) + + address := common.MustBytesToAddress([]byte{0x1}) + + // Create empty storage + ledger := NewTestLedger(nil, nil) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + domains := []string{ + common.PathDomainStorage.Identifier(), + common.PathDomainPublic.Identifier(), + common.PathDomainPrivate.Identifier(), + } + + const domainStorageMapCount = 5 + + accountValues := make(accountStorageMapValues) + + for _, domain := range domains { + + // Create domain storage map + const createIfNotExists = true + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.True(t, domainStorageMap.Inlined()) + + valueID := domainStorageMap.ValueID() + + accountValues[domain] = make(domainStorageMapValues) + + domainValues := accountValues[domain] + + // Insert new values to domain storage map + for domainStorageMap.Count() < domainStorageMapCount { + n := random.Int() + key := interpreter.StringStorageMapKey(strconv.Itoa(n)) + if _, exists := domainValues[key]; exists { + continue + } + value := interpreter.NewUnmeteredStringValue(strings.Repeat("a", 1_000)) + + existed := domainStorageMap.WriteValue(inter, key, value) + require.False(t, existed) + + domainValues[key] = value + } + + require.Equal(t, uint64(domainStorageMapCount), domainStorageMap.Count()) + require.Equal(t, valueID, domainStorageMap.ValueID()) + require.True(t, domainStorageMap.Inlined()) + + // Check storage health + err := storage.CheckHealth() + require.NoError(t, err) + + // Overwrite values in domain storage map + for key := range domainValues { + value := interpreter.NewUnmeteredStringValue(strings.Repeat("b", 1_000)) + + existed := domainStorageMap.WriteValue(inter, key, value) + require.True(t, existed) + + domainValues[key] = value + } + + require.Equal(t, uint64(domainStorageMapCount), domainStorageMap.Count()) + require.Equal(t, valueID, domainStorageMap.ValueID()) + require.True(t, domainStorageMap.Inlined()) + + // Check storage health + err = storage.CheckHealth() + require.NoError(t, err) + + // Remove all values in domain storage map + for key := range domainValues { + existed := domainStorageMap.WriteValue(inter, key, nil) + require.True(t, existed) + + delete(domainValues, key) + } + + require.Equal(t, uint64(0), domainStorageMap.Count()) + require.Equal(t, valueID, domainStorageMap.ValueID()) + require.True(t, domainStorageMap.Inlined()) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health + err = storage.CheckHealth() + require.NoError(t, err) + + // There should be 2 non-empty registers in ledger after commits: + // - account register (key is "stored") + // - account storage map (atree slab) + nonEmptyRegisters := make(map[string][]byte) + for k, v := range ledger.StoredValues { + if len(v) > 0 { + nonEmptyRegisters[k] = v + } + } + require.Equal(t, 2, len(nonEmptyRegisters)) + + accountRegisterValue, accountRegisterExists := nonEmptyRegisters[string(address[:])+"|"+AccountStorageKey] + require.True(t, accountRegisterExists) + require.Equal(t, 8, len(accountRegisterValue)) + + _, accountStorageMapRegisterExists := nonEmptyRegisters[string(address[:])+"|$"+string(accountRegisterValue)] + require.True(t, accountStorageMapRegisterExists) + + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) +} + +func TestDomainRegisterMigrationForLargeAccount(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + var writeCount int + + accountsInfo := []accountInfo{ + { + address: address, + domains: []domainInfo{ + {domain: common.PathDomainStorage.Identifier(), domainStorageMapCount: 100, maxDepth: 3}, + {domain: common.PathDomainPublic.Identifier(), domainStorageMapCount: 100, maxDepth: 3}, + {domain: common.PathDomainPrivate.Identifier(), domainStorageMapCount: 100, maxDepth: 3}, + }, + }, + } + + ledger, accountsValues := newTestLedgerWithUnmigratedAccounts( + t, + nil, + LedgerOnWriteCounter(&writeCount), + accountsInfo, + ) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(t, storage) + + accountValues := accountsValues[address] + + // Create new domain storage map + const createIfNotExists = true + domain := stdlib.InboxStorageDomain + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + + accountValues[domain] = make(domainStorageMapValues) + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check there are writes to underlying storage + require.True(t, writeCount > 0) + + // Check there isn't any domain registers + nonAtreeRegisters := make(map[string][]byte) + for k, v := range ledger.StoredValues { + if len(v) == 0 { + continue + } + ks := strings.Split(k, "|") + if ks[1][0] != '$' { + nonAtreeRegisters[k] = v + } + } + + require.Equal(t, 1, len(nonAtreeRegisters)) + for k := range nonAtreeRegisters { + ks := strings.Split(k, "|") + require.Equal(t, address[:], []byte(ks[0])) + require.Equal(t, AccountStorageKey, ks[1]) + } + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + // Verify account storage map data + checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) +} + +// createAndWriteAccountStorageMap creates account storage map with given domains and writes random values to domain storage map. +func createAndWriteAccountStorageMap( + t testing.TB, + storage *Storage, + inter *interpreter.Interpreter, + address common.Address, + domains []string, + count int, + random *rand.Rand, +) accountStorageMapValues { + + accountValues := make(accountStorageMapValues) + + // Create domain storage map + for _, domain := range domains { + const createIfNotExists = true + domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) + + // Write to to domain storage map + accountValues[domain] = writeToDomainStorageMap(inter, domainStorageMap, count, random) + } + + // Commit changes + const commitContractUpdates = false + err := storage.Commit(inter, commitContractUpdates) + require.NoError(t, err) + + // Check storage health after commit + err = storage.CheckHealth() + require.NoError(t, err) + + return accountValues +} + +func writeToDomainStorageMap( + inter *interpreter.Interpreter, + domainStorageMap *interpreter.DomainStorageMap, + count int, + random *rand.Rand, +) domainStorageMapValues { + domainValues := make(domainStorageMapValues) + + for len(domainValues) < count { + n := random.Int() + + key := interpreter.StringStorageMapKey(strconv.Itoa(n)) + + value := interpreter.NewUnmeteredIntValueFromInt64(int64(n)) + + domainStorageMap.WriteValue(inter, key, value) + + domainValues[key] = value + } + + return domainValues +} + +// checkAccountStorageMapData creates new storage with given storedValues, and compares account storage map values with given expectedAccountValues. +func checkAccountStorageMapData( + tb testing.TB, + storedValues map[string][]byte, + storageIndices map[string]uint64, + address common.Address, + expectedAccountValues accountStorageMapValues, +) { + // Create storage with given storedValues and storageIndices + ledger := NewTestLedgerWithData(nil, nil, storedValues, storageIndices) + storage := NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(tb, storage) + + // Get account register + accountStorageMapSlabIndex, err := ledger.GetValue(address[:], []byte(AccountStorageKey)) + require.NoError(tb, err) + require.Equal(tb, 8, len(accountStorageMapSlabIndex)) + + // Load account storage map + accountSlabID := atree.NewSlabID( + atree.Address(address[:]), + atree.SlabIndex(accountStorageMapSlabIndex[:]), + ) + accountStorageMap := interpreter.NewAccountStorageMapWithRootID(storage, accountSlabID) + require.NotNil(tb, accountStorageMap) + require.Equal(tb, uint64(len(expectedAccountValues)), accountStorageMap.Count()) + + domainCount := 0 + iter := accountStorageMap.Iterator() + for { + domain, domainStorageMap := iter.Next() + if domain == "" { + break + } + + domainCount++ + + expectedDomainValues, exist := expectedAccountValues[domain] + require.True(tb, exist) + require.Equal(tb, uint64(len(expectedDomainValues)), domainStorageMap.Count()) + + // Check values stored in domain storage map + for key, expectedValue := range expectedDomainValues { + value := domainStorageMap.ReadValue(nil, key) + + ev, ok := value.(interpreter.EquatableValue) + require.True(tb, ok) + require.True(tb, ev.Equal(inter, interpreter.EmptyLocationRange, expectedValue)) + } + } + + require.Equal(tb, len(expectedAccountValues), domainCount) + + // Check atree storage health + rootSlabIDs, err := atree.CheckStorageHealth(storage.PersistentSlabStorage, 1) + require.NoError(tb, err) + require.Equal(tb, 1, len(rootSlabIDs)) + require.Contains(tb, rootSlabIDs, accountSlabID) +} diff --git a/stdlib/account.go b/stdlib/account.go index 66907a6f93..34adbe9f71 100644 --- a/stdlib/account.go +++ b/stdlib/account.go @@ -3256,6 +3256,7 @@ func recordStorageCapabilityController( storageMapKey := interpreter.StringStorageMapKey(identifier) storageMap := inter.Storage().GetStorageMap( + inter, address, PathCapabilityStorageDomain, true, @@ -3298,6 +3299,7 @@ func getPathCapabilityIDSet( storageMapKey := interpreter.StringStorageMapKey(identifier) storageMap := inter.Storage().GetStorageMap( + inter, address, PathCapabilityStorageDomain, false, @@ -3348,6 +3350,7 @@ func unrecordStorageCapabilityController( if capabilityIDSet.Count() == 0 { storageMap := inter.Storage().GetStorageMap( + inter, address, PathCapabilityStorageDomain, true, @@ -3420,6 +3423,7 @@ func recordAccountCapabilityController( storageMapKey := interpreter.Uint64StorageMapKey(capabilityIDValue) storageMap := inter.Storage().GetStorageMap( + inter, address, AccountCapabilityStorageDomain, true, @@ -3447,6 +3451,7 @@ func unrecordAccountCapabilityController( storageMapKey := interpreter.Uint64StorageMapKey(capabilityIDValue) storageMap := inter.Storage().GetStorageMap( + inter, address, AccountCapabilityStorageDomain, true, @@ -3466,6 +3471,7 @@ func getAccountCapabilityControllerIDsIterator( count uint64, ) { storageMap := inter.Storage().GetStorageMap( + inter, address, AccountCapabilityStorageDomain, false, diff --git a/test_utils/interpreter_utils/interpreter.go b/test_utils/interpreter_utils/interpreter.go index 46a8182023..e92a5264dc 100644 --- a/test_utils/interpreter_utils/interpreter.go +++ b/test_utils/interpreter_utils/interpreter.go @@ -29,14 +29,26 @@ import ( func NewTestInterpreter(tb testing.TB) *interpreter.Interpreter { storage := NewUnmeteredInMemoryStorage() + return NewTestInterpreterWithStorage(tb, storage) +} + +func NewTestInterpreterWithStorage(tb testing.TB, storage interpreter.Storage) *interpreter.Interpreter { + return NewTestInterpreterWithStorageAndAtreeValidationConfig(tb, storage, true, true) +} +func NewTestInterpreterWithStorageAndAtreeValidationConfig( + tb testing.TB, + storage interpreter.Storage, + atreeValueValidationEnabled bool, + atreeStorageValidationEnabled bool, +) *interpreter.Interpreter { inter, err := interpreter.NewInterpreter( nil, TestLocation, &interpreter.Config{ Storage: storage, - AtreeValueValidationEnabled: true, - AtreeStorageValidationEnabled: true, + AtreeValueValidationEnabled: atreeValueValidationEnabled, + AtreeStorageValidationEnabled: atreeStorageValidationEnabled, }, ) require.NoError(tb, err) diff --git a/test_utils/runtime_utils/storage.go b/test_utils/runtime_utils/storage.go new file mode 100644 index 0000000000..cb4ed93b40 --- /dev/null +++ b/test_utils/runtime_utils/storage.go @@ -0,0 +1,42 @@ +/* + * Cadence - The resource-oriented smart contract programming language + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package runtime_utils + +import ( + "testing" + + "github.com/onflow/atree" + "github.com/stretchr/testify/require" +) + +func CheckAtreeStorageHealth(tb testing.TB, storage atree.SlabStorage, expectedRootSlabIDs []atree.SlabID) { + rootSlabIDs, err := atree.CheckStorageHealth(storage, -1) + require.NoError(tb, err) + + nonTempRootSlabIDs := make([]atree.SlabID, 0, len(rootSlabIDs)) + + for rootSlabID := range rootSlabIDs { + if rootSlabID.HasTempAddress() { + continue + } + nonTempRootSlabIDs = append(nonTempRootSlabIDs, rootSlabID) + } + + require.ElementsMatch(tb, nonTempRootSlabIDs, expectedRootSlabIDs) +} diff --git a/test_utils/runtime_utils/testledger.go b/test_utils/runtime_utils/testledger.go index 4d4c846172..ef77d134f8 100644 --- a/test_utils/runtime_utils/testledger.go +++ b/test_utils/runtime_utils/testledger.go @@ -31,6 +31,7 @@ import ( type TestLedger struct { StoredValues map[string][]byte + StorageIndices map[string]uint64 OnValueExists func(owner, key []byte) (exists bool, err error) OnGetValue func(owner, key []byte) (value []byte, err error) OnSetValue func(owner, key, value []byte) (err error) @@ -92,9 +93,30 @@ func (s TestLedger) Dump() { } } +type LedgerOnRead func(owner, key, value []byte) +type LedgerOnWrite func(owner, key, value []byte) + +type OwnerKeyValue struct { + Owner, Key, Value []byte +} + +var LedgerOnWriteCounter = func(counter *int) LedgerOnWrite { + return func(_, _, _ []byte) { + (*counter)++ + } +} + +var LedgerOnWriteEntries = func(entries *[]OwnerKeyValue) LedgerOnWrite { + return func(owner, key, value []byte) { + *entries = append( + *entries, + OwnerKeyValue{Owner: owner, Key: key, Value: value}) + } +} + func NewTestLedger( - onRead func(owner, key, value []byte), - onWrite func(owner, key, value []byte), + onRead LedgerOnRead, + onWrite LedgerOnWrite, ) TestLedger { storedValues := map[string][]byte{} @@ -102,7 +124,8 @@ func NewTestLedger( storageIndices := map[string]uint64{} return TestLedger{ - StoredValues: storedValues, + StoredValues: storedValues, + StorageIndices: storageIndices, OnValueExists: func(owner, key []byte) (bool, error) { value := storedValues[TestStorageKey(string(owner), string(key))] return len(value) > 0, nil @@ -142,7 +165,8 @@ func NewTestLedgerWithData( } return TestLedger{ - StoredValues: storedValues, + StoredValues: storedValues, + StorageIndices: storageIndices, OnValueExists: func(owner, key []byte) (bool, error) { value := storedValues[storageKey(string(owner), string(key))] return len(value) > 0, nil From 7429be045c4a0bc1ac1868f7bf565513d7a64a6f Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 31 Oct 2024 11:47:50 -0500 Subject: [PATCH 02/52] Lint --- test_utils/runtime_utils/storage.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test_utils/runtime_utils/storage.go b/test_utils/runtime_utils/storage.go index cb4ed93b40..0c971d8c5d 100644 --- a/test_utils/runtime_utils/storage.go +++ b/test_utils/runtime_utils/storage.go @@ -31,7 +31,7 @@ func CheckAtreeStorageHealth(tb testing.TB, storage atree.SlabStorage, expectedR nonTempRootSlabIDs := make([]atree.SlabID, 0, len(rootSlabIDs)) - for rootSlabID := range rootSlabIDs { + for rootSlabID := range rootSlabIDs { //nolint:maprange if rootSlabID.HasTempAddress() { continue } From e6179087d683f06e98f8255da97f953a923db352 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 31 Oct 2024 12:07:07 -0500 Subject: [PATCH 03/52] Update go.mod --- tools/compatibility-check/go.mod | 4 ++-- tools/compatibility-check/go.sum | 10 ++++------ 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/tools/compatibility-check/go.mod b/tools/compatibility-check/go.mod index 628bfdc184..0a4813d95b 100644 --- a/tools/compatibility-check/go.mod +++ b/tools/compatibility-check/go.mod @@ -43,7 +43,7 @@ require ( github.com/multiformats/go-multibase v0.2.0 // indirect github.com/multiformats/go-multihash v0.2.3 // indirect github.com/multiformats/go-varint v0.0.7 // indirect - github.com/onflow/atree v0.8.0 // indirect + github.com/onflow/atree v0.8.1-0.20241028213850-07c884e4abcf // indirect github.com/onflow/crypto v0.25.2 // indirect github.com/onflow/flow-core-contracts/lib/go/templates v1.3.3-0.20241017220455-79fdc6c8ba53 // indirect github.com/onflow/flow-ft/lib/go/contracts v1.0.1 // indirect @@ -73,7 +73,7 @@ require ( github.com/vmihailenco/msgpack/v4 v4.3.11 // indirect github.com/vmihailenco/tagparser v0.1.1 // indirect github.com/x448/float16 v0.8.4 // indirect - github.com/zeebo/blake3 v0.2.3 // indirect + github.com/zeebo/blake3 v0.2.4 // indirect go.opentelemetry.io/otel v1.24.0 // indirect golang.org/x/crypto v0.28.0 // indirect golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect diff --git a/tools/compatibility-check/go.sum b/tools/compatibility-check/go.sum index 3c453fa707..4ab363961b 100644 --- a/tools/compatibility-check/go.sum +++ b/tools/compatibility-check/go.sum @@ -278,7 +278,6 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -343,8 +342,8 @@ github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/n github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onflow/atree v0.8.0 h1:qg5c6J1gVDNObughpEeWm8oxqhPGdEyGrda121GM4u0= -github.com/onflow/atree v0.8.0/go.mod h1:yccR+LR7xc1Jdic0mrjocbHvUD7lnVvg8/Ct1AA5zBo= +github.com/onflow/atree v0.8.1-0.20241028213850-07c884e4abcf h1:MDB/hdwr5GMsZHIIrAw3gBnmWBy5XjsZ4/6kftv9d5c= +github.com/onflow/atree v0.8.1-0.20241028213850-07c884e4abcf/go.mod h1:U8PGG42VrSJqjdfJ9NGQ2fenkyFRYlgtfHsZM61H4zY= github.com/onflow/crypto v0.25.2 h1:GjHunqVt+vPcdqhxxhAXiMIF3YiLX7gTuTR5O+VG2ns= github.com/onflow/crypto v0.25.2/go.mod h1:fY7eLqUdMKV8EGOw301unP8h7PvLVy8/6gVR++/g0BY= github.com/onflow/flow-core-contracts/lib/go/contracts v1.4.0 h1:R86HaOuk6vpuECZnriEUE7bw9inC2AtdSn8lL/iwQLQ= @@ -480,11 +479,10 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= -github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg= -github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ= +github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI= +github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE= github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= From 5843373e8348ea8f72d87510d51b08fb240a4564 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 4 Nov 2024 12:10:23 -0600 Subject: [PATCH 04/52] Export DomainRegisterMigration.MigrateAccount() --- runtime/migrate_domain_registers.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/runtime/migrate_domain_registers.go b/runtime/migrate_domain_registers.go index bba37d1d4f..a5357f1da6 100644 --- a/runtime/migrate_domain_registers.go +++ b/runtime/migrate_domain_registers.go @@ -93,7 +93,7 @@ func (m *DomainRegisterMigration) MigrateAccounts( continue } - accountStorageMap, err := m.migrateAccount(address) + accountStorageMap, err := m.MigrateAccount(address) if err != nil { return nil, err } @@ -107,7 +107,7 @@ func (m *DomainRegisterMigration) MigrateAccounts( return migratedAccounts, nil } -func (m *DomainRegisterMigration) migrateAccount( +func (m *DomainRegisterMigration) MigrateAccount( address common.Address, ) (*interpreter.AccountStorageMap, error) { // Create account storage map From 658f6c1ee589e7320351b03f27942b5e9b21df54 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 4 Nov 2024 12:30:54 -0600 Subject: [PATCH 05/52] Skip domain register migration for no domain accts This is for completeness. --- runtime/migrate_domain_registers.go | 31 +++++++++++++++++------- runtime/migrate_domain_registers_test.go | 29 ++++++---------------- 2 files changed, 29 insertions(+), 31 deletions(-) diff --git a/runtime/migrate_domain_registers.go b/runtime/migrate_domain_registers.go index a5357f1da6..462f588278 100644 --- a/runtime/migrate_domain_registers.go +++ b/runtime/migrate_domain_registers.go @@ -98,6 +98,10 @@ func (m *DomainRegisterMigration) MigrateAccounts( return nil, err } + if accountStorageMap == nil { + continue + } + if migratedAccounts == nil { migratedAccounts = &orderedmap.OrderedMap[common.Address, *interpreter.AccountStorageMap]{} } @@ -110,15 +114,18 @@ func (m *DomainRegisterMigration) MigrateAccounts( func (m *DomainRegisterMigration) MigrateAccount( address common.Address, ) (*interpreter.AccountStorageMap, error) { - // Create account storage map - accountStorageMap := interpreter.NewAccountStorageMap(m.memoryGauge, m.storage, atree.Address(address)) // Migrate existing domains - err := m.migrateDomains(address, accountStorageMap) + accountStorageMap, err := m.migrateDomains(address) if err != nil { return nil, err } + if accountStorageMap == nil { + // Nothing migrated + return nil, nil + } + accountStorageMapSlabIndex := accountStorageMap.SlabID().Index() // Write account register @@ -139,13 +146,15 @@ func (m *DomainRegisterMigration) MigrateAccount( // migrateDomains migrates existing domain storage maps and removes domain registers. func (m *DomainRegisterMigration) migrateDomains( address common.Address, - accountStorageMap *interpreter.AccountStorageMap, -) error { +) (*interpreter.AccountStorageMap, error) { + + var accountStorageMap *interpreter.AccountStorageMap + for _, domain := range AccountDomains { domainStorageMap, err := m.getDomainStorageMap(m.ledger, m.storage, address, domain) if err != nil { - return err + return nil, err } if domainStorageMap == nil { @@ -153,11 +162,15 @@ func (m *DomainRegisterMigration) migrateDomains( continue } + if accountStorageMap == nil { + accountStorageMap = interpreter.NewAccountStorageMap(m.memoryGauge, m.storage, atree.Address(address)) + } + // Migrate (insert) existing domain storage map to account storage map existed := accountStorageMap.WriteDomain(m.inter, domain, domainStorageMap) if existed { // This shouldn't happen because we are inserting domain storage map into empty account storage map. - return errors.NewUnexpectedError( + return nil, errors.NewUnexpectedError( "failed to migrate domain %s for account %x: domain already exists in account storage map", domain, address, ) } @@ -171,11 +184,11 @@ func (m *DomainRegisterMigration) migrateDomains( nil) }) if err != nil { - return interpreter.WrappedExternalError(err) + return nil, interpreter.WrappedExternalError(err) } } - return nil + return accountStorageMap, nil } func isMigrated(ledger atree.Ledger, address common.Address) (bool, error) { diff --git a/runtime/migrate_domain_registers_test.go b/runtime/migrate_domain_registers_test.go index c21bd31162..28769c9e14 100644 --- a/runtime/migrate_domain_registers_test.go +++ b/runtime/migrate_domain_registers_test.go @@ -84,6 +84,11 @@ func TestMigrateDomainRegisters(t *testing.T) { migratedAccounts, err := migrator.MigrateAccounts(nil, alwaysMigrate) require.NoError(t, err) require.True(t, migratedAccounts == nil || migratedAccounts.Len() == 0) + + err = storage.FastCommit(goruntime.NumCPU()) + require.NoError(t, err) + + require.Equal(t, 0, len(ledger.StoredValues)) }) t.Run("accounts without domain registers", func(t *testing.T) { @@ -100,32 +105,12 @@ func TestMigrateDomainRegisters(t *testing.T) { migratedAccounts, err := migrator.MigrateAccounts(accounts, alwaysMigrate) require.NoError(t, err) - require.NotNil(t, migratedAccounts) - require.Equal(t, accounts.Len(), migratedAccounts.Len()) - require.Equal(t, address2, migratedAccounts.Oldest().Key) - require.Equal(t, address1, migratedAccounts.Newest().Key) + require.True(t, migratedAccounts == nil || migratedAccounts.Len() == 0) err = storage.FastCommit(goruntime.NumCPU()) require.NoError(t, err) - // Check non-atree registers - nonAtreeRegisters := getNonAtreeRegisters(ledger.StoredValues) - require.Equal(t, accounts.Len(), len(nonAtreeRegisters)) - require.Contains(t, nonAtreeRegisters, string(address1[:])+"|"+runtime.AccountStorageKey) - require.Contains(t, nonAtreeRegisters, string(address2[:])+"|"+runtime.AccountStorageKey) - - // Check atree storage - expectedRootSlabIDs := make([]atree.SlabID, 0, migratedAccounts.Len()) - for pair := migratedAccounts.Oldest(); pair != nil; pair = pair.Next() { - accountStorageMap := pair.Value - expectedRootSlabIDs = append(expectedRootSlabIDs, accountStorageMap.SlabID()) - } - - CheckAtreeStorageHealth(t, storage, expectedRootSlabIDs) - - // Check account storage map data - checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address1, make(accountStorageMapValues)) - checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address2, make(accountStorageMapValues)) + require.Equal(t, 0, len(ledger.StoredValues)) }) t.Run("accounts with domain registers", func(t *testing.T) { From 3f8513ebfa92579366bf35de929d8aaea07643a0 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 5 Nov 2024 16:13:48 -0600 Subject: [PATCH 06/52] Rename storagemap.go to domain_storagemap.go --- interpreter/{storagemap.go => domain_storagemap.go} | 0 interpreter/{storagemap_test.go => domain_storagemap_test.go} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename interpreter/{storagemap.go => domain_storagemap.go} (100%) rename interpreter/{storagemap_test.go => domain_storagemap_test.go} (100%) diff --git a/interpreter/storagemap.go b/interpreter/domain_storagemap.go similarity index 100% rename from interpreter/storagemap.go rename to interpreter/domain_storagemap.go diff --git a/interpreter/storagemap_test.go b/interpreter/domain_storagemap_test.go similarity index 100% rename from interpreter/storagemap_test.go rename to interpreter/domain_storagemap_test.go From ec7fc828d54ebea829267b40348aa01f5aca3729 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Tue, 5 Nov 2024 15:53:50 -0800 Subject: [PATCH 07/52] refactor current account storage format (payload per domain storage map) into separate type --- cmd/decode-state-values/main.go | 4 +- interpreter/account_storagemap.go | 14 +- interpreter/account_test.go | 2 +- interpreter/interpreter.go | 12 +- interpreter/misc_test.go | 2 +- interpreter/storage.go | 24 +- interpreter/storage_test.go | 2 +- interpreter/storagemap.go | 22 +- interpreter/stringatreevalue_test.go | 2 +- interpreter/value_test.go | 2 +- runtime/account_storage_v1.go | 128 ++++++ runtime/capabilitycontrollers_test.go | 8 +- runtime/contract_test.go | 2 +- runtime/environment.go | 2 +- runtime/ft_test.go | 4 +- runtime/storage.go | 596 +++++++++----------------- runtime/storage_test.go | 80 ++-- stdlib/account.go | 12 +- tools/storage-explorer/main.go | 4 +- 19 files changed, 442 insertions(+), 480 deletions(-) create mode 100644 runtime/account_storage_v1.go diff --git a/cmd/decode-state-values/main.go b/cmd/decode-state-values/main.go index b6f781258f..322aa112e2 100644 --- a/cmd/decode-state-values/main.go +++ b/cmd/decode-state-values/main.go @@ -234,8 +234,8 @@ type interpreterStorage struct { var _ interpreter.Storage = &interpreterStorage{} -func (i interpreterStorage) GetStorageMap(_ *interpreter.Interpreter, _ common.Address, _ string, _ bool) *interpreter.DomainStorageMap { - panic("unexpected GetStorageMap call") +func (i interpreterStorage) GetDomainStorageMap(_ *interpreter.Interpreter, _ common.Address, _ string, _ bool) *interpreter.DomainStorageMap { + panic("unexpected GetDomainStorageMap call") } func (i interpreterStorage) CheckHealth() error { diff --git a/interpreter/account_storagemap.go b/interpreter/account_storagemap.go index 67a51c1c35..bc954dd9ab 100644 --- a/interpreter/account_storagemap.go +++ b/interpreter/account_storagemap.go @@ -163,12 +163,12 @@ func (s *AccountStorageMap) NewDomain( func (s *AccountStorageMap) WriteDomain( interpreter *Interpreter, domain string, - storageMap *DomainStorageMap, + domainStorageMap *DomainStorageMap, ) (existed bool) { - if storageMap == nil { + if domainStorageMap == nil { return s.removeDomain(interpreter, domain) } - return s.setDomain(interpreter, domain, storageMap) + return s.setDomain(interpreter, domain, domainStorageMap) } // setDomain sets domain storage map in the account storage map and returns true if domain previously existed. @@ -176,7 +176,7 @@ func (s *AccountStorageMap) WriteDomain( func (s *AccountStorageMap) setDomain( interpreter *Interpreter, domain string, - storageMap *DomainStorageMap, + newDomainStorageMap *DomainStorageMap, ) (existed bool) { interpreter.recordStorageMutation() @@ -186,7 +186,7 @@ func (s *AccountStorageMap) setDomain( key.AtreeValueCompare, key.AtreeValueHashInput, key.AtreeValue(), - storageMap.orderedMap, + newDomainStorageMap.orderedMap, ) if err != nil { panic(errors.NewExternalError(err)) @@ -195,10 +195,10 @@ func (s *AccountStorageMap) setDomain( existed = existingValueStorable != nil if existed { // Create domain storage map from overwritten storable - domainStorageMap := newDomainStorageMapWithAtreeStorable(s.orderedMap.Storage, existingValueStorable) + existingDomainStorageMap := newDomainStorageMapWithAtreeStorable(s.orderedMap.Storage, existingValueStorable) // Deep remove elements in domain storage map - domainStorageMap.DeepRemove(interpreter, true) + existingDomainStorageMap.DeepRemove(interpreter, true) // Remove domain storage map slab interpreter.RemoveReferencedSlab(existingValueStorable) diff --git a/interpreter/account_test.go b/interpreter/account_test.go index a003477a56..aebc993af6 100644 --- a/interpreter/account_test.go +++ b/interpreter/account_test.go @@ -483,7 +483,7 @@ func testAccountWithErrorHandler( getAccountValues := func() map[storageKey]interpreter.Value { accountValues := make(map[storageKey]interpreter.Value) - for storageMapKey, accountStorage := range inter.Storage().(interpreter.InMemoryStorage).StorageMaps { + for storageMapKey, accountStorage := range inter.Storage().(interpreter.InMemoryStorage).DomainStorageMaps { iterator := accountStorage.Iterator(inter) for { key, value := iterator.Next() diff --git a/interpreter/interpreter.go b/interpreter/interpreter.go index 9fdbf1d135..b082617984 100644 --- a/interpreter/interpreter.go +++ b/interpreter/interpreter.go @@ -237,7 +237,7 @@ func (c TypeCodes) Merge(codes TypeCodes) { type Storage interface { atree.SlabStorage - GetStorageMap(inter *Interpreter, address common.Address, domain string, createIfNotExists bool) *DomainStorageMap + GetDomainStorageMap(inter *Interpreter, address common.Address, domain string, createIfNotExists bool) *DomainStorageMap CheckHealth() error } @@ -2681,7 +2681,7 @@ func (interpreter *Interpreter) StoredValueExists( domain string, identifier StorageMapKey, ) bool { - accountStorage := interpreter.Storage().GetStorageMap(interpreter, storageAddress, domain, false) + accountStorage := interpreter.Storage().GetDomainStorageMap(interpreter, storageAddress, domain, false) if accountStorage == nil { return false } @@ -2693,7 +2693,7 @@ func (interpreter *Interpreter) ReadStored( domain string, identifier StorageMapKey, ) Value { - accountStorage := interpreter.Storage().GetStorageMap(interpreter, storageAddress, domain, false) + accountStorage := interpreter.Storage().GetDomainStorageMap(interpreter, storageAddress, domain, false) if accountStorage == nil { return nil } @@ -2706,7 +2706,7 @@ func (interpreter *Interpreter) WriteStored( key StorageMapKey, value Value, ) (existed bool) { - accountStorage := interpreter.Storage().GetStorageMap(interpreter, storageAddress, domain, true) + accountStorage := interpreter.Storage().GetDomainStorageMap(interpreter, storageAddress, domain, true) return accountStorage.WriteValue(interpreter, key, value) } @@ -4069,7 +4069,7 @@ func (interpreter *Interpreter) IsSubTypeOfSemaType(staticSubType StaticType, su } func (interpreter *Interpreter) domainPaths(address common.Address, domain common.PathDomain) []Value { - storageMap := interpreter.Storage().GetStorageMap(interpreter, address, domain.Identifier(), false) + storageMap := interpreter.Storage().GetDomainStorageMap(interpreter, address, domain.Identifier(), false) if storageMap == nil { return []Value{} } @@ -4164,7 +4164,7 @@ func (interpreter *Interpreter) newStorageIterationFunction( parameterTypes := fnType.ParameterTypes() returnType := fnType.ReturnTypeAnnotation.Type - storageMap := config.Storage.GetStorageMap(interpreter, address, domain.Identifier(), false) + storageMap := config.Storage.GetDomainStorageMap(interpreter, address, domain.Identifier(), false) if storageMap == nil { // if nothing is stored, no iteration is required return Void diff --git a/interpreter/misc_test.go b/interpreter/misc_test.go index e00d00f525..eaf47954eb 100644 --- a/interpreter/misc_test.go +++ b/interpreter/misc_test.go @@ -5350,7 +5350,7 @@ func TestInterpretReferenceFailableDowncasting(t *testing.T) { ) domain := storagePath.Domain.Identifier() - storageMap := storage.GetStorageMap(inter, storageAddress, domain, true) + storageMap := storage.GetDomainStorageMap(inter, storageAddress, domain, true) storageMapKey := interpreter.StringStorageMapKey(storagePath.Identifier) storageMap.WriteValue(inter, storageMapKey, r) diff --git a/interpreter/storage.go b/interpreter/storage.go index 992a039e69..734009111a 100644 --- a/interpreter/storage.go +++ b/interpreter/storage.go @@ -130,8 +130,8 @@ func (k StorageKey) IsLess(o StorageKey) bool { // InMemoryStorage type InMemoryStorage struct { *atree.BasicSlabStorage - StorageMaps map[StorageKey]*DomainStorageMap - memoryGauge common.MemoryGauge + DomainStorageMaps map[StorageKey]*DomainStorageMap + memoryGauge common.MemoryGauge } var _ Storage = InMemoryStorage{} @@ -157,27 +157,27 @@ func NewInMemoryStorage(memoryGauge common.MemoryGauge) InMemoryStorage { ) return InMemoryStorage{ - BasicSlabStorage: slabStorage, - StorageMaps: make(map[StorageKey]*DomainStorageMap), - memoryGauge: memoryGauge, + BasicSlabStorage: slabStorage, + DomainStorageMaps: make(map[StorageKey]*DomainStorageMap), + memoryGauge: memoryGauge, } } -func (i InMemoryStorage) GetStorageMap( +func (i InMemoryStorage) GetDomainStorageMap( _ *Interpreter, address common.Address, domain string, createIfNotExists bool, ) ( - storageMap *DomainStorageMap, + domainStorageMap *DomainStorageMap, ) { key := NewStorageKey(i.memoryGauge, address, domain) - storageMap = i.StorageMaps[key] - if storageMap == nil && createIfNotExists { - storageMap = NewDomainStorageMap(i.memoryGauge, i, atree.Address(address)) - i.StorageMaps[key] = storageMap + domainStorageMap = i.DomainStorageMaps[key] + if domainStorageMap == nil && createIfNotExists { + domainStorageMap = NewDomainStorageMap(i.memoryGauge, i, atree.Address(address)) + i.DomainStorageMaps[key] = domainStorageMap } - return storageMap + return domainStorageMap } func (i InMemoryStorage) CheckHealth() error { diff --git a/interpreter/storage_test.go b/interpreter/storage_test.go index 16fd679a19..86d3234995 100644 --- a/interpreter/storage_test.go +++ b/interpreter/storage_test.go @@ -524,7 +524,7 @@ func TestStorageOverwriteAndRemove(t *testing.T) { const storageMapKey = StringStorageMapKey("test") - storageMap := storage.GetStorageMap(inter, address, "storage", true) + storageMap := storage.GetDomainStorageMap(inter, address, "storage", true) storageMap.WriteValue(inter, storageMapKey, array1) // Overwriting delete any existing child slabs diff --git a/interpreter/storagemap.go b/interpreter/storagemap.go index 336828ce64..e8f79bca35 100644 --- a/interpreter/storagemap.go +++ b/interpreter/storagemap.go @@ -110,7 +110,7 @@ func NewDomainStorageMapWithAtreeValue(value atree.Value) *DomainStorageMap { } // ValueExists returns true if the given key exists in the storage map. -func (s DomainStorageMap) ValueExists(key StorageMapKey) bool { +func (s *DomainStorageMap) ValueExists(key StorageMapKey) bool { exists, err := s.orderedMap.Has( key.AtreeValueCompare, key.AtreeValueHashInput, @@ -125,7 +125,7 @@ func (s DomainStorageMap) ValueExists(key StorageMapKey) bool { // ReadValue returns the value for the given key. // Returns nil if the key does not exist. -func (s DomainStorageMap) ReadValue(gauge common.MemoryGauge, key StorageMapKey) Value { +func (s *DomainStorageMap) ReadValue(gauge common.MemoryGauge, key StorageMapKey) Value { storedValue, err := s.orderedMap.Get( key.AtreeValueCompare, key.AtreeValueHashInput, @@ -146,7 +146,7 @@ func (s DomainStorageMap) ReadValue(gauge common.MemoryGauge, key StorageMapKey) // If the given value is nil, the key is removed. // If the given value is non-nil, the key is added/updated. // Returns true if a value previously existed at the given key. -func (s DomainStorageMap) WriteValue(interpreter *Interpreter, key StorageMapKey, value atree.Value) (existed bool) { +func (s *DomainStorageMap) WriteValue(interpreter *Interpreter, key StorageMapKey, value atree.Value) (existed bool) { if value == nil { return s.RemoveValue(interpreter, key) } else { @@ -157,7 +157,7 @@ func (s DomainStorageMap) WriteValue(interpreter *Interpreter, key StorageMapKey // SetValue sets a value in the storage map. // If the given key already stores a value, it is overwritten. // Returns true if given key already exists and existing value is overwritten. -func (s DomainStorageMap) SetValue(interpreter *Interpreter, key StorageMapKey, value atree.Value) (existed bool) { +func (s *DomainStorageMap) SetValue(interpreter *Interpreter, key StorageMapKey, value atree.Value) (existed bool) { interpreter.recordStorageMutation() existingStorable, err := s.orderedMap.Set( @@ -184,7 +184,7 @@ func (s DomainStorageMap) SetValue(interpreter *Interpreter, key StorageMapKey, } // RemoveValue removes a value in the storage map, if it exists. -func (s DomainStorageMap) RemoveValue(interpreter *Interpreter, key StorageMapKey) (existed bool) { +func (s *DomainStorageMap) RemoveValue(interpreter *Interpreter, key StorageMapKey) (existed bool) { interpreter.recordStorageMutation() existingKeyStorable, existingValueStorable, err := s.orderedMap.Remove( @@ -270,22 +270,26 @@ func (s *DomainStorageMap) DeepRemove(interpreter *Interpreter, hasNoParentConta } } -func (s DomainStorageMap) ValueID() atree.ValueID { +func (s *DomainStorageMap) SlabID() atree.SlabID { + return s.orderedMap.SlabID() +} + +func (s *DomainStorageMap) ValueID() atree.ValueID { return s.orderedMap.ValueID() } -func (s DomainStorageMap) Count() uint64 { +func (s *DomainStorageMap) Count() uint64 { return s.orderedMap.Count() } -func (s DomainStorageMap) Inlined() bool { +func (s *DomainStorageMap) Inlined() bool { // This is only used for testing currently. return s.orderedMap.Inlined() } // Iterator returns an iterator (StorageMapIterator), // which allows iterating over the keys and values of the storage map -func (s DomainStorageMap) Iterator(gauge common.MemoryGauge) DomainStorageMapIterator { +func (s *DomainStorageMap) Iterator(gauge common.MemoryGauge) DomainStorageMapIterator { mapIterator, err := s.orderedMap.Iterator( StorageMapKeyAtreeValueComparator, StorageMapKeyAtreeValueHashInput, diff --git a/interpreter/stringatreevalue_test.go b/interpreter/stringatreevalue_test.go index 366b59b1ee..f9dca34b7f 100644 --- a/interpreter/stringatreevalue_test.go +++ b/interpreter/stringatreevalue_test.go @@ -45,7 +45,7 @@ func TestLargeStringAtreeValueInSeparateSlab(t *testing.T) { ) require.NoError(t, err) - storageMap := storage.GetStorageMap( + storageMap := storage.GetDomainStorageMap( inter, common.MustBytesToAddress([]byte{0x1}), common.PathDomainStorage.Identifier(), diff --git a/interpreter/value_test.go b/interpreter/value_test.go index 26cfac8621..77b22a248f 100644 --- a/interpreter/value_test.go +++ b/interpreter/value_test.go @@ -3806,7 +3806,7 @@ func TestValue_ConformsToStaticType(t *testing.T) { ) require.NoError(t, err) - storageMap := storage.GetStorageMap(inter, testAddress, "storage", true) + storageMap := storage.GetDomainStorageMap(inter, testAddress, "storage", true) storageMap.WriteValue(inter, StringStorageMapKey("test"), TrueValue) value := valueFactory(inter) diff --git a/runtime/account_storage_v1.go b/runtime/account_storage_v1.go new file mode 100644 index 0000000000..7c022a9093 --- /dev/null +++ b/runtime/account_storage_v1.go @@ -0,0 +1,128 @@ +/* + * Cadence - The resource-oriented smart contract programming language + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package runtime + +import ( + "github.com/onflow/atree" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/common/orderedmap" + "github.com/onflow/cadence/interpreter" +) + +type AccountStorageV1 struct { + ledger atree.Ledger + slabStorage atree.SlabStorage + memoryGauge common.MemoryGauge + + // newDomainStorageMapSlabIndices contains root slab index of new domain storage maps. + // The indices are saved using Ledger.SetValue() during Commit(). + // Key is StorageKey{address, accountStorageKey} and value is 8-byte slab index. + newDomainStorageMapSlabIndices *orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex] +} + +func NewAccountStorageV1( + ledger atree.Ledger, + slabStorage atree.SlabStorage, + memoryGauge common.MemoryGauge, +) *AccountStorageV1 { + return &AccountStorageV1{ + ledger: ledger, + slabStorage: slabStorage, + memoryGauge: memoryGauge, + } +} + +func (s *AccountStorageV1) GetDomainStorageMap( + address common.Address, + domain string, + createIfNotExists bool, +) ( + domainStorageMap *interpreter.DomainStorageMap, +) { + var err error + domainStorageMap, err = getDomainStorageMapFromLegacyDomainRegister( + s.ledger, + s.slabStorage, + address, + domain, + ) + if err != nil { + panic(err) + } + + if domainStorageMap == nil && createIfNotExists { + domainStorageMap = s.storeNewDomainStorageMap(address, domain) + } + + return domainStorageMap +} + +func (s *AccountStorageV1) storeNewDomainStorageMap( + address common.Address, + domain string, +) *interpreter.DomainStorageMap { + + domainStorageMap := interpreter.NewDomainStorageMap( + s.memoryGauge, + s.slabStorage, + atree.Address(address), + ) + + slabIndex := domainStorageMap.SlabID().Index() + + storageKey := interpreter.NewStorageKey(s.memoryGauge, address, domain) + + if s.newDomainStorageMapSlabIndices == nil { + s.newDomainStorageMapSlabIndices = &orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex]{} + } + s.newDomainStorageMapSlabIndices.Set(storageKey, slabIndex) + + return domainStorageMap +} + +func (s *AccountStorageV1) commit() error { + if s.newDomainStorageMapSlabIndices == nil { + return nil + } + + return commitSlabIndices( + s.newDomainStorageMapSlabIndices, + s.ledger, + ) +} + +// getDomainStorageMapFromLegacyDomainRegister returns domain storage map from legacy domain register. +func getDomainStorageMapFromLegacyDomainRegister( + ledger atree.Ledger, + storage atree.SlabStorage, + address common.Address, + domain string, +) (*interpreter.DomainStorageMap, error) { + domainStorageSlabIndex, domainRegisterExists, err := getSlabIndexFromRegisterValue(ledger, address, []byte(domain)) + if err != nil { + return nil, err + } + if !domainRegisterExists { + return nil, nil + } + + slabID := atree.NewSlabID(atree.Address(address), domainStorageSlabIndex) + return interpreter.NewDomainStorageMapWithRootID(storage, slabID), nil +} diff --git a/runtime/capabilitycontrollers_test.go b/runtime/capabilitycontrollers_test.go index d00fb18dae..1c0a79db62 100644 --- a/runtime/capabilitycontrollers_test.go +++ b/runtime/capabilitycontrollers_test.go @@ -3253,7 +3253,7 @@ func TestRuntimeCapabilityControllers(t *testing.T) { // Use *interpreter.Interpreter(nil) here because createIfNotExists is false. - storageMap := storage.GetStorageMap( + storageMap := storage.GetDomainStorageMap( nil, common.MustBytesToAddress([]byte{0x1}), stdlib.PathCapabilityStorageDomain, @@ -3843,7 +3843,7 @@ func TestRuntimeCapabilitiesGetBackwardCompatibility(t *testing.T) { }) require.NoError(t, err) - publicStorageMap := storage.GetStorageMap( + publicStorageMap := storage.GetDomainStorageMap( inter, testAddress, common.PathDomainPublic.Identifier(), @@ -3951,7 +3951,7 @@ func TestRuntimeCapabilitiesPublishBackwardCompatibility(t *testing.T) { }) require.NoError(t, err) - publicStorageMap := storage.GetStorageMap( + publicStorageMap := storage.GetDomainStorageMap( inter, testAddress, common.PathDomainStorage.Identifier(), @@ -4042,7 +4042,7 @@ func TestRuntimeCapabilitiesUnpublishBackwardCompatibility(t *testing.T) { }) require.NoError(t, err) - publicStorageMap := storage.GetStorageMap( + publicStorageMap := storage.GetDomainStorageMap( inter, testAddress, common.PathDomainPublic.Identifier(), diff --git a/runtime/contract_test.go b/runtime/contract_test.go index a009906aa0..ec20627064 100644 --- a/runtime/contract_test.go +++ b/runtime/contract_test.go @@ -223,7 +223,7 @@ func TestRuntimeContract(t *testing.T) { getContractValueExists := func() bool { storageMap := NewStorage(storage, nil). - GetStorageMap(inter, signerAddress, StorageDomainContract, false) + GetDomainStorageMap(inter, signerAddress, StorageDomainContract, false) if storageMap == nil { return false } diff --git a/runtime/environment.go b/runtime/environment.go index 1806f18698..a0d8e661e9 100644 --- a/runtime/environment.go +++ b/runtime/environment.go @@ -1106,7 +1106,7 @@ func (e *interpreterEnvironment) loadContract( location := compositeType.Location if addressLocation, ok := location.(common.AddressLocation); ok { - storageMap := e.storage.GetStorageMap( + storageMap := e.storage.GetDomainStorageMap( inter, addressLocation.Address, StorageDomainContract, diff --git a/runtime/ft_test.go b/runtime/ft_test.go index 3e37164cf7..32e81e6df5 100644 --- a/runtime/ft_test.go +++ b/runtime/ft_test.go @@ -1083,7 +1083,7 @@ func TestRuntimeBrokenFungibleTokenRecovery(t *testing.T) { contractsAddress, ) - contractStorage := storage.GetStorageMap( + contractStorage := storage.GetDomainStorageMap( inter, contractsAddress, StorageDomainContract, @@ -1119,7 +1119,7 @@ func TestRuntimeBrokenFungibleTokenRecovery(t *testing.T) { userAddress, ) - userStorage := storage.GetStorageMap( + userStorage := storage.GetDomainStorageMap( inter, userAddress, common.PathDomainStorage.Identifier(), diff --git a/runtime/storage.go b/runtime/storage.go index d2559818ad..e5ce8ead30 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -41,18 +41,6 @@ const ( type Storage struct { *atree.PersistentSlabStorage - // NewAccountStorageMapSlabIndices contains root slab index of new accounts' storage map. - // The indices are saved using Ledger.SetValue() during Commit(). - // Key is StorageKey{address, accountStorageKey} and value is 8-byte slab index. - NewAccountStorageMapSlabIndices *orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex] - - // unmigratedAccounts are accounts that were accessed but not migrated. - unmigratedAccounts *orderedmap.OrderedMap[common.Address, struct{}] - - // cachedAccountStorageMaps is a cache of account storage maps. - // Key is StorageKey{address, accountStorageKey} and value is account storage map. - cachedAccountStorageMaps map[interpreter.StorageKey]*interpreter.AccountStorageMap - // cachedDomainStorageMaps is a cache of domain storage maps. // Key is StorageKey{address, domain} and value is domain storage map. cachedDomainStorageMaps map[interpreter.StorageKey]*interpreter.DomainStorageMap @@ -64,6 +52,10 @@ type Storage struct { Ledger atree.Ledger memoryGauge common.MemoryGauge + + accountStorageV1 *AccountStorageV1 + // TODO: + //accountStorageV2 *AccountStorageV2 } var _ atree.SlabStorage = &Storage{} @@ -98,26 +90,52 @@ func NewStorage(ledger atree.Ledger, memoryGauge common.MemoryGauge) *Storage { decodeStorable, decodeTypeInfo, ) + + accountStorageV1 := NewAccountStorageV1( + ledger, + persistentSlabStorage, + memoryGauge, + ) + // TODO: + //accountStorageV2 := NewAccountStorageV2(ledger, memoryGauge) + return &Storage{ - Ledger: ledger, - PersistentSlabStorage: persistentSlabStorage, - cachedAccountStorageMaps: map[interpreter.StorageKey]*interpreter.AccountStorageMap{}, - cachedDomainStorageMaps: map[interpreter.StorageKey]*interpreter.DomainStorageMap{}, - memoryGauge: memoryGauge, + PersistentSlabStorage: persistentSlabStorage, + cachedDomainStorageMaps: map[interpreter.StorageKey]*interpreter.DomainStorageMap{}, + Ledger: ledger, + memoryGauge: memoryGauge, + accountStorageV1: accountStorageV1, + // TODO: + //accountStorageV2: accountStorageV2, } } const storageIndexLength = 8 -// GetStorageMap returns existing or new domain storage map for the given account and domain. -func (s *Storage) GetStorageMap( +// GetDomainStorageMap returns existing or new domain storage map for the given account and domain. +func (s *Storage) GetDomainStorageMap( inter *interpreter.Interpreter, address common.Address, domain string, createIfNotExists bool, ) ( - storageMap *interpreter.DomainStorageMap, + domainStorageMap *interpreter.DomainStorageMap, ) { + // Get cached domain storage map if it exists. + + domainStorageKey := interpreter.NewStorageKey(s.memoryGauge, address, domain) + + domainStorageMap = s.cachedDomainStorageMaps[domainStorageKey] + if domainStorageMap != nil { + return domainStorageMap + } + + defer func() { + // Cache domain storage map + if domainStorageMap != nil { + s.cachedDomainStorageMaps[domainStorageKey] = domainStorageMap + } + }() // Account can be migrated account, new account, or unmigrated account. // @@ -176,203 +194,18 @@ func (s *Storage) GetStorageMap( // For example, iterating values in unmigrated account doesn't trigger migration, // and checking if domain exists doesn't trigger migration. - // Get cached domain storage map if it exists. - - domainStorageKey := interpreter.NewStorageKey(s.memoryGauge, address, domain) - - if domainStorageMap := s.cachedDomainStorageMaps[domainStorageKey]; domainStorageMap != nil { - return domainStorageMap - } - // Get (or create) domain storage map from existing account storage map // if account is migrated account. - accountStorageKey := interpreter.NewStorageKey(s.memoryGauge, address, AccountStorageKey) - - accountStorageMap, err := s.getAccountStorageMap(accountStorageKey) - if err != nil { - panic(err) - } - - if accountStorageMap != nil { - // This is migrated account. - - // Get (or create) domain storage map from account storage map. - domainStorageMap := accountStorageMap.GetDomain(s.memoryGauge, inter, domain, createIfNotExists) - - // Cache domain storage map - if domainStorageMap != nil { - s.cachedDomainStorageMaps[domainStorageKey] = domainStorageMap - } - - return domainStorageMap - } - - // At this point, account is either new or unmigrated account. - - domainStorageMap, err := getDomainStorageMapFromLegacyDomainRegister(s.Ledger, s.PersistentSlabStorage, address, domain) - if err != nil { - panic(err) - } - - if domainStorageMap != nil { - // This is a unmigrated account with given domain register. - - // Sanity check that domain is among expected domains. - if _, exist := accountDomainsSet[domain]; !exist { - // TODO: maybe also log unexpected domain. - panic(errors.NewUnexpectedError( - "unexpected domain %s exists for account %s", domain, address.String(), - )) - } - - // Cache domain storage map - s.cachedDomainStorageMaps[domainStorageKey] = domainStorageMap - - // Add account to unmigrated account list - s.addUnmigratedAccount(address) - - return domainStorageMap - } - - // At this point, account is either new account or unmigrated account without given domain. - - // Domain doesn't exist. Return early if createIfNotExists is false. - - if !createIfNotExists { - return nil - } - - // Handle unmigrated account - unmigrated, err := s.isUnmigratedAccount(address) - if err != nil { - panic(err) - } - if unmigrated { - // Add account to unmigrated account list - s.addUnmigratedAccount(address) - - // Create new domain storage map - domainStorageMap := interpreter.NewDomainStorageMap(s.memoryGauge, s, atree.Address(address)) - - // Cache new domain storage map - s.cachedDomainStorageMaps[domainStorageKey] = domainStorageMap - - return domainStorageMap - } - - // Handle new account - - // Create account storage map - accountStorageMap = interpreter.NewAccountStorageMap(s.memoryGauge, s, atree.Address(address)) - - // Cache account storage map - s.cachedAccountStorageMaps[accountStorageKey] = accountStorageMap - - // Create new domain storage map as an element in account storage map - domainStorageMap = accountStorageMap.NewDomain(s.memoryGauge, inter, domain) - - // Cache domain storage map - s.cachedDomainStorageMaps[domainStorageKey] = domainStorageMap - - // Save new account and its account storage map root SlabID to new accout list - s.addNewAccount(accountStorageKey, accountStorageMap.SlabID().Index()) - - return domainStorageMap -} - -// getAccountStorageMap returns AccountStorageMap if exists, or nil otherwise. -func (s *Storage) getAccountStorageMap(accountStorageKey interpreter.StorageKey) (*interpreter.AccountStorageMap, error) { - - // Return cached account storage map if available. - - accountStorageMap := s.cachedAccountStorageMaps[accountStorageKey] - if accountStorageMap != nil { - return accountStorageMap, nil - } - - // Load account storage map if account storage register exists. - - accountStorageSlabIndex, accountStorageRegisterExists, err := getSlabIndexFromRegisterValue( - s.Ledger, - accountStorageKey.Address, - []byte(accountStorageKey.Key), - ) - if err != nil { - return nil, err - } - if !accountStorageRegisterExists { - return nil, nil - } - - slabID := atree.NewSlabID( - atree.Address(accountStorageKey.Address), - accountStorageSlabIndex, - ) - - accountStorageMap = interpreter.NewAccountStorageMapWithRootID(s, slabID) - - // Cache account storage map - - s.cachedAccountStorageMaps[accountStorageKey] = accountStorageMap - - return accountStorageMap, nil -} - -// getDomainStorageMapFromLegacyDomainRegister returns domain storage map from legacy domain register. -func getDomainStorageMapFromLegacyDomainRegister( - ledger atree.Ledger, - storage atree.SlabStorage, - address common.Address, - domain string, -) (*interpreter.DomainStorageMap, error) { - domainStorageSlabIndex, domainRegisterExists, err := getSlabIndexFromRegisterValue(ledger, address, []byte(domain)) - if err != nil { - return nil, err - } - if !domainRegisterExists { - return nil, nil - } - - slabID := atree.NewSlabID(atree.Address(address), domainStorageSlabIndex) - return interpreter.NewDomainStorageMapWithRootID(storage, slabID), nil -} - -func (s *Storage) addUnmigratedAccount(address common.Address) { - if s.unmigratedAccounts == nil { - s.unmigratedAccounts = &orderedmap.OrderedMap[common.Address, struct{}]{} - } - if !s.unmigratedAccounts.Contains(address) { - s.unmigratedAccounts.Set(address, struct{}{}) - } -} - -func (s *Storage) addNewAccount(accountStorageKey interpreter.StorageKey, slabIndex atree.SlabIndex) { - if s.NewAccountStorageMapSlabIndices == nil { - s.NewAccountStorageMapSlabIndices = &orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex]{} - } - s.NewAccountStorageMapSlabIndices.Set(accountStorageKey, slabIndex) -} - -// isUnmigratedAccount returns true if given account has any domain registers. -func (s *Storage) isUnmigratedAccount(address common.Address) (bool, error) { - if s.unmigratedAccounts != nil && - s.unmigratedAccounts.Contains(address) { - return true, nil - } - - // Check most frequently used domains first, such as storage, public, private. - for _, domain := range AccountDomains { - _, domainExists, err := getSlabIndexFromRegisterValue(s.Ledger, address, []byte(domain)) - if err != nil { - return false, err - } - if domainExists { - return true, nil - } - } + // TODO: + //domainStorageMap = s.accountStorageV2.GetDomainStorageMap(address, domain, createIfNotExists) + //if domainStorageMap != nil { + // return domainStorageMap + //} + // + //// At this point, account is either new or unmigrated account. - return false, nil + return s.accountStorageV1.GetDomainStorageMap(address, domain, createIfNotExists) } // getSlabIndexFromRegisterValue returns register value as atree.SlabIndex. @@ -470,7 +303,7 @@ func (s *Storage) writeContractUpdate( key interpreter.StorageKey, contractValue *interpreter.CompositeValue, ) { - storageMap := s.GetStorageMap(inter, key.Address, StorageDomainContract, true) + storageMap := s.GetDomainStorageMap(inter, key.Address, StorageDomainContract, true) // NOTE: pass nil instead of allocating a Value-typed interface that points to nil storageMapKey := interpreter.StringStorageMapKey(key.Key) if contractValue == nil { @@ -498,16 +331,16 @@ func (s *Storage) commit(inter *interpreter.Interpreter, commitContractUpdates b s.commitContractUpdates(inter) } - err := s.commitNewStorageMaps() + err := s.accountStorageV1.commit() if err != nil { return err } - // Migrate accounts that have write ops before calling PersistentSlabStorage.FastCommit(). - err = s.migrateAccountsIfNeeded(inter) - if err != nil { - return err - } + // TODO: + //err = s.accountStorageV2.commit() + //if err != nil { + // return err + //} // Commit the underlying slab storage's writes @@ -529,15 +362,14 @@ func (s *Storage) commit(inter *interpreter.Interpreter, commitContractUpdates b } } -func (s *Storage) commitNewStorageMaps() error { - if s.NewAccountStorageMapSlabIndices == nil { - return nil - } - - for pair := s.NewAccountStorageMapSlabIndices.Oldest(); pair != nil; pair = pair.Next() { +func commitSlabIndices( + slabIndices *orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex], + ledger atree.Ledger, +) error { + for pair := slabIndices.Oldest(); pair != nil; pair = pair.Next() { var err error errors.WrapPanic(func() { - err = s.Ledger.SetValue( + err = ledger.SetValue( pair.Key.Address[:], []byte(pair.Key.Key), pair.Value[:], @@ -551,166 +383,162 @@ func (s *Storage) commitNewStorageMaps() error { return nil } -func (s *Storage) migrateAccountsIfNeeded(inter *interpreter.Interpreter) error { - if s.unmigratedAccounts == nil || s.unmigratedAccounts.Len() == 0 { - return nil - } - return s.migrateAccounts(inter) -} - -func (s *Storage) migrateAccounts(inter *interpreter.Interpreter) error { - // Predicate function allows migration for accounts with write ops. - migrateAccountPred := func(address common.Address) bool { - return s.PersistentSlabStorage.HasUnsavedChanges(atree.Address(address)) - } - - // getDomainStorageMap function returns cached domain storage map if it is available - // before loading domain storage map from storage. - // This is necessary to migrate uncommitted (new) but cached domain storage map. - getDomainStorageMap := func( - ledger atree.Ledger, - storage atree.SlabStorage, - address common.Address, - domain string, - ) (*interpreter.DomainStorageMap, error) { - domainStorageKey := interpreter.NewStorageKey(s.memoryGauge, address, domain) - - // Get cached domain storage map if available. - domainStorageMap := s.cachedDomainStorageMaps[domainStorageKey] - - if domainStorageMap != nil { - return domainStorageMap, nil - } - - return getDomainStorageMapFromLegacyDomainRegister(ledger, storage, address, domain) - } - - migrator := NewDomainRegisterMigration(s.Ledger, s.PersistentSlabStorage, inter, s.memoryGauge) - migrator.SetGetDomainStorageMapFunc(getDomainStorageMap) - - migratedAccounts, err := migrator.MigrateAccounts(s.unmigratedAccounts, migrateAccountPred) - if err != nil { - return err - } - - if migratedAccounts == nil { - return nil - } - - // Update internal state with migrated accounts - for pair := migratedAccounts.Oldest(); pair != nil; pair = pair.Next() { - address := pair.Key - accountStorageMap := pair.Value - - // Cache migrated account storage map - accountStorageKey := interpreter.NewStorageKey(s.memoryGauge, address, AccountStorageKey) - s.cachedAccountStorageMaps[accountStorageKey] = accountStorageMap - - // Remove migrated accounts from unmigratedAccounts - s.unmigratedAccounts.Delete(address) - } - - return nil -} +// TODO: +//func (s *Storage) migrateAccounts(inter *interpreter.Interpreter) error { +// // Predicate function allows migration for accounts with write ops. +// migrateAccountPred := func(address common.Address) bool { +// return s.PersistentSlabStorage.HasUnsavedChanges(atree.Address(address)) +// } +// +// // getDomainStorageMap function returns cached domain storage map if it is available +// // before loading domain storage map from storage. +// // This is necessary to migrate uncommitted (new) but cached domain storage map. +// getDomainStorageMap := func( +// ledger atree.Ledger, +// storage atree.SlabStorage, +// address common.Address, +// domain string, +// ) (*interpreter.DomainStorageMap, error) { +// domainStorageKey := interpreter.NewStorageKey(s.memoryGauge, address, domain) +// +// // Get cached domain storage map if available. +// domainStorageMap := s.cachedDomainStorageMaps[domainStorageKey] +// +// if domainStorageMap != nil { +// return domainStorageMap, nil +// } +// +// return getDomainStorageMapFromLegacyDomainRegister(ledger, storage, address, domain) +// } +// +// migrator := NewDomainRegisterMigration(s.Ledger, s.PersistentSlabStorage, inter, s.memoryGauge) +// migrator.SetGetDomainStorageMapFunc(getDomainStorageMap) +// +// migratedAccounts, err := migrator.MigrateAccounts(s.unmigratedAccounts, migrateAccountPred) +// if err != nil { +// return err +// } +// +// if migratedAccounts == nil { +// return nil +// } +// +// // Update internal state with migrated accounts +// for pair := migratedAccounts.Oldest(); pair != nil; pair = pair.Next() { +// address := pair.Key +// accountStorageMap := pair.Value +// +// // Cache migrated account storage map +// accountStorageKey := interpreter.NewStorageKey(s.memoryGauge, address, AccountStorageKey) +// s.cachedAccountStorageMaps[accountStorageKey] = accountStorageMap +// +// // Remove migrated accounts from unmigratedAccounts +// s.unmigratedAccounts.Delete(address) +// } +// +// return nil +//} func (s *Storage) CheckHealth() error { - // Check slab storage health - rootSlabIDs, err := atree.CheckStorageHealth(s, -1) - if err != nil { - return err - } - - // Find account / non-temporary root slab IDs - - accountRootSlabIDs := make(map[atree.SlabID]struct{}, len(rootSlabIDs)) - - // NOTE: map range is safe, as it creates a subset - for rootSlabID := range rootSlabIDs { //nolint:maprange - if rootSlabID.HasTempAddress() { - continue - } - - accountRootSlabIDs[rootSlabID] = struct{}{} - } - - // Check that account storage maps and unmigrated domain storage maps - // match returned root slabs from atree.CheckStorageHealth. - - var storageMapStorageIDs []atree.SlabID + // TODO: - // Get cached account storage map slab IDs. - for _, storageMap := range s.cachedAccountStorageMaps { //nolint:maprange - storageMapStorageIDs = append( - storageMapStorageIDs, - storageMap.SlabID(), - ) - } - - // Get cached unmigrated domain storage map slab IDs - for storageKey, storageMap := range s.cachedDomainStorageMaps { //nolint:maprange - address := storageKey.Address - - if s.unmigratedAccounts != nil && - s.unmigratedAccounts.Contains(address) { - - domainValueID := storageMap.ValueID() - - slabID := atree.NewSlabID( - atree.Address(address), - atree.SlabIndex(domainValueID[8:]), - ) - - storageMapStorageIDs = append( - storageMapStorageIDs, - slabID, - ) - } - } - - sort.Slice(storageMapStorageIDs, func(i, j int) bool { - a := storageMapStorageIDs[i] - b := storageMapStorageIDs[j] - return a.Compare(b) < 0 - }) - - found := map[atree.SlabID]struct{}{} - - for _, storageMapStorageID := range storageMapStorageIDs { - if _, ok := accountRootSlabIDs[storageMapStorageID]; !ok { - return errors.NewUnexpectedError("account storage map (and unmigrated domain storage map) points to non-root slab %s", storageMapStorageID) - } - - found[storageMapStorageID] = struct{}{} - } - - // Check that all slabs in slab storage - // are referenced by storables in account storage. - // If a slab is not referenced, it is garbage. - - if len(accountRootSlabIDs) > len(found) { - var unreferencedRootSlabIDs []atree.SlabID - - for accountRootSlabID := range accountRootSlabIDs { //nolint:maprange - if _, ok := found[accountRootSlabID]; ok { - continue - } - - unreferencedRootSlabIDs = append( - unreferencedRootSlabIDs, - accountRootSlabID, - ) - } - - sort.Slice(unreferencedRootSlabIDs, func(i, j int) bool { - a := unreferencedRootSlabIDs[i] - b := unreferencedRootSlabIDs[j] - return a.Compare(b) < 0 - }) - - return UnreferencedRootSlabsError{ - UnreferencedRootSlabIDs: unreferencedRootSlabIDs, - } - } + //// Check slab storage health + //rootSlabIDs, err := atree.CheckStorageHealth(s, -1) + //if err != nil { + // return err + //} + // + //// Find account / non-temporary root slab IDs + // + //accountRootSlabIDs := make(map[atree.SlabID]struct{}, len(rootSlabIDs)) + // + //// NOTE: map range is safe, as it creates a subset + //for rootSlabID := range rootSlabIDs { //nolint:maprange + // if rootSlabID.HasTempAddress() { + // continue + // } + // + // accountRootSlabIDs[rootSlabID] = struct{}{} + //} + // + //// Check that account storage maps and unmigrated domain storage maps + //// match returned root slabs from atree.CheckStorageHealth. + // + //var storageMapStorageIDs []atree.SlabID + // + //// Get cached account storage map slab IDs. + //for _, storageMap := range s.cachedAccountStorageMaps { //nolint:maprange + // storageMapStorageIDs = append( + // storageMapStorageIDs, + // storageMap.SlabID(), + // ) + //} + // + //// Get cached unmigrated domain storage map slab IDs + //for storageKey, storageMap := range s.cachedDomainStorageMaps { //nolint:maprange + // address := storageKey.Address + // + // if s.unmigratedAccounts != nil && + // s.unmigratedAccounts.Contains(address) { + // + // domainValueID := storageMap.ValueID() + // + // slabID := atree.NewSlabID( + // atree.Address(address), + // atree.SlabIndex(domainValueID[8:]), + // ) + // + // storageMapStorageIDs = append( + // storageMapStorageIDs, + // slabID, + // ) + // } + //} + // + //sort.Slice(storageMapStorageIDs, func(i, j int) bool { + // a := storageMapStorageIDs[i] + // b := storageMapStorageIDs[j] + // return a.Compare(b) < 0 + //}) + // + //found := map[atree.SlabID]struct{}{} + // + //for _, storageMapStorageID := range storageMapStorageIDs { + // if _, ok := accountRootSlabIDs[storageMapStorageID]; !ok { + // return errors.NewUnexpectedError("account storage map (and unmigrated domain storage map) points to non-root slab %s", storageMapStorageID) + // } + // + // found[storageMapStorageID] = struct{}{} + //} + // + //// Check that all slabs in slab storage + //// are referenced by storables in account storage. + //// If a slab is not referenced, it is garbage. + // + //if len(accountRootSlabIDs) > len(found) { + // var unreferencedRootSlabIDs []atree.SlabID + // + // for accountRootSlabID := range accountRootSlabIDs { //nolint:maprange + // if _, ok := found[accountRootSlabID]; ok { + // continue + // } + // + // unreferencedRootSlabIDs = append( + // unreferencedRootSlabIDs, + // accountRootSlabID, + // ) + // } + // + // sort.Slice(unreferencedRootSlabIDs, func(i, j int) bool { + // a := unreferencedRootSlabIDs[i] + // b := unreferencedRootSlabIDs[j] + // return a.Compare(b) < 0 + // }) + // + // return UnreferencedRootSlabsError{ + // UnreferencedRootSlabIDs: unreferencedRootSlabIDs, + // } + //} return nil } diff --git a/runtime/storage_test.go b/runtime/storage_test.go index 55090be2be..7d970b6364 100644 --- a/runtime/storage_test.go +++ b/runtime/storage_test.go @@ -35,7 +35,6 @@ import ( "github.com/onflow/cadence" "github.com/onflow/cadence/common" - "github.com/onflow/cadence/common/orderedmap" "github.com/onflow/cadence/encoding/json" "github.com/onflow/cadence/interpreter" . "github.com/onflow/cadence/runtime" @@ -57,24 +56,27 @@ func withWritesToStorage( inter := NewTestInterpreter(tb) - address := common.MustBytesToAddress([]byte{0x1}) + // TODO: + //address := common.MustBytesToAddress([]byte{0x1}) for i := 0; i < count; i++ { randomIndex := random.Uint32() - storageKey := interpreter.StorageKey{ - Address: address, - Key: fmt.Sprintf("%d", randomIndex), - } + // TODO: + //storageKey := interpreter.StorageKey{ + // Address: address, + // Key: fmt.Sprintf("%d", randomIndex), + //} var slabIndex atree.SlabIndex binary.BigEndian.PutUint32(slabIndex[:], randomIndex) - if storage.NewAccountStorageMapSlabIndices == nil { - storage.NewAccountStorageMapSlabIndices = &orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex]{} - } - storage.NewAccountStorageMapSlabIndices.Set(storageKey, slabIndex) + // TODO: + //if storage.NewAccountStorageMapSlabIndices == nil { + // storage.NewAccountStorageMapSlabIndices = &orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex]{} + //} + //storage.NewAccountStorageMapSlabIndices.Set(storageKey, slabIndex) } handler(storage, inter) @@ -3110,7 +3112,7 @@ func TestRuntimeStorageInternalAccess(t *testing.T) { }) require.NoError(t, err) - storageMap := storage.GetStorageMap(inter, address, common.PathDomainStorage.Identifier(), false) + storageMap := storage.GetDomainStorageMap(inter, address, common.PathDomainStorage.Identifier(), false) require.NotNil(t, storageMap) // Read first @@ -6268,7 +6270,7 @@ func TestRuntimeStorageForNewAccount(t *testing.T) { // Get non-existent domain storage map const createIfNotExists = false - domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) require.Nil(t, domainStorageMap) // Commit changes @@ -6321,7 +6323,7 @@ func TestRuntimeStorageForNewAccount(t *testing.T) { for _, domain := range tc.newDomains { // Create new domain storage map const createIfNotExists = true - domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) require.NotNil(t, domainStorageMap) require.Equal(t, uint64(0), domainStorageMap.Count()) @@ -6404,7 +6406,7 @@ func TestRuntimeStorageForNewAccount(t *testing.T) { { for _, domain := range domains { const createIfNotExists = true - domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) require.NotNil(t, domainStorageMap) require.Equal(t, uint64(0), domainStorageMap.Count()) @@ -6428,7 +6430,7 @@ func TestRuntimeStorageForNewAccount(t *testing.T) { { for _, domain := range domains { const createIfNotExists = false - domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) require.NotNil(t, domainStorageMap) require.Equal(t, uint64(0), domainStorageMap.Count()) @@ -6454,7 +6456,7 @@ func TestRuntimeStorageForNewAccount(t *testing.T) { { for _, domain := range domains { const createIfNotExists = false - domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) require.NotNil(t, domainStorageMap) expectedDomainValues := accountValues[domain] @@ -6486,7 +6488,7 @@ func TestRuntimeStorageForNewAccount(t *testing.T) { { for _, domain := range domains { const createIfNotExists = false - domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) require.NotNil(t, domainStorageMap) require.Equal(t, uint64(0), domainStorageMap.Count()) } @@ -6561,7 +6563,7 @@ func TestRuntimeStorageForMigratedAccount(t *testing.T) { // Get non-existent domain storage map const createIfNotExists = false - domainStorageMap := storage.GetStorageMap(inter, address, nonexistentDomain, createIfNotExists) + domainStorageMap := storage.GetDomainStorageMap(inter, address, nonexistentDomain, createIfNotExists) require.Nil(t, domainStorageMap) // Commit changes @@ -6607,7 +6609,7 @@ func TestRuntimeStorageForMigratedAccount(t *testing.T) { // Read existing domain storage map for domain, domainValues := range accountValues { - domainStorageMap := storage.GetStorageMap(inter, address, domain, tc.createIfNotExists) + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, tc.createIfNotExists) require.NotNil(t, domainStorageMap) require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) @@ -6696,7 +6698,7 @@ func TestRuntimeStorageForMigratedAccount(t *testing.T) { // Create and write to domain storage map (createIfNotExists is true) for _, domain := range tc.newDomains { const createIfNotExists = true - domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) require.NotNil(t, domainStorageMap) require.Equal(t, uint64(0), domainStorageMap.Count()) @@ -6779,7 +6781,7 @@ func TestRuntimeStorageForMigratedAccount(t *testing.T) { // Write to existing domain storage map (createIfNotExists is false) for _, domain := range existingDomains { const createIfNotExists = false - domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) require.NotNil(t, domainStorageMap) domainValues := accountValues[domain] @@ -6873,7 +6875,7 @@ func TestRuntimeStorageForMigratedAccount(t *testing.T) { { for _, domain := range domains { const createIfNotExists = false - domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) require.NotNil(t, domainStorageMap) domainValues := accountValues[domain] @@ -6904,7 +6906,7 @@ func TestRuntimeStorageForMigratedAccount(t *testing.T) { { for _, domain := range domains { const createIfNotExists = false - domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) require.NotNil(t, domainStorageMap) domainValues := accountValues[domain] @@ -6935,7 +6937,7 @@ func TestRuntimeStorageForMigratedAccount(t *testing.T) { { for _, domain := range domains { const createIfNotExists = false - domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) require.NotNil(t, domainStorageMap) expectedDomainValues := accountValues[domain] @@ -6967,7 +6969,7 @@ func TestRuntimeStorageForMigratedAccount(t *testing.T) { { for _, domain := range domains { const createIfNotExists = false - domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) require.NotNil(t, domainStorageMap) require.Equal(t, uint64(0), domainStorageMap.Count()) } @@ -7069,7 +7071,7 @@ func TestRuntimeStorageForUnmigratedAccount(t *testing.T) { // Get non-existent domain storage map const createIfNotExists = false nonexistingDomain := common.PathDomainPublic.Identifier() - domainStorageMap := storage.GetStorageMap(inter, address, nonexistingDomain, createIfNotExists) + domainStorageMap := storage.GetDomainStorageMap(inter, address, nonexistingDomain, createIfNotExists) require.Nil(t, domainStorageMap) // Commit changes @@ -7115,11 +7117,11 @@ func TestRuntimeStorageForUnmigratedAccount(t *testing.T) { // Read existing domain storage map for domain, domainValues := range accountValues { - domainStorageMap := storage.GetStorageMap(inter, address, domain, tc.createIfNotExists) + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, tc.createIfNotExists) require.NotNil(t, domainStorageMap) require.Equal(t, uint64(len(domainValues)), domainStorageMap.Count()) - // Read elements to to domain storage map + // Read elements to domain storage map for k, expectedV := range domainValues { v := domainStorageMap.ReadValue(nil, k) ev, ok := v.(interpreter.EquatableValue) @@ -7206,7 +7208,7 @@ func TestRuntimeStorageForUnmigratedAccount(t *testing.T) { // Create and write to new domain storage map for _, domain := range tc.newDomains { const createIfNotExists = true - domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) require.NotNil(t, domainStorageMap) require.Equal(t, uint64(0), domainStorageMap.Count()) @@ -7297,7 +7299,7 @@ func TestRuntimeStorageForUnmigratedAccount(t *testing.T) { // write to existing domain storage map (createIfNotExists is false) for _, domain := range domains { const createIfNotExists = false - domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) require.NotNil(t, domainStorageMap) domainValues := accountValues[domain] @@ -7377,7 +7379,7 @@ func TestRuntimeStorageForUnmigratedAccount(t *testing.T) { checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) }) - // This test test storage map operations (including account migration) with intermittent Commit() + // This test storage map operations (including account migration) with intermittent Commit() // - read domain storage map and commit // - write to domain storage map and commit (including account migration) // - remove all elements from domain storage map and commit @@ -7407,7 +7409,7 @@ func TestRuntimeStorageForUnmigratedAccount(t *testing.T) { { for _, domain := range domains { const createIfNotExists = false - domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) require.NotNil(t, domainStorageMap) domainValues := accountValues[domain] @@ -7436,7 +7438,7 @@ func TestRuntimeStorageForUnmigratedAccount(t *testing.T) { // update existing domain storage map (loaded from storage) for _, domain := range domains { const createIfNotExists = false - domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) require.NotNil(t, domainStorageMap) domainValues := accountValues[domain] @@ -7507,7 +7509,7 @@ func TestRuntimeStorageForUnmigratedAccount(t *testing.T) { { for _, domain := range domains { const createIfNotExists = false - domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) require.NotNil(t, domainStorageMap) domainValues := accountValues[domain] @@ -7556,7 +7558,7 @@ func TestRuntimeStorageForUnmigratedAccount(t *testing.T) { { for _, domain := range domains { const createIfNotExists = false - domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) require.NotNil(t, domainStorageMap) domainValues := accountValues[domain] @@ -7616,7 +7618,7 @@ func TestRuntimeStorageDomainStorageMapInlinedState(t *testing.T) { // Create domain storage map const createIfNotExists = true - domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) require.NotNil(t, domainStorageMap) require.True(t, domainStorageMap.Inlined()) @@ -7738,7 +7740,7 @@ func TestRuntimeStorageLargeDomainValues(t *testing.T) { // Create domain storage map const createIfNotExists = true - domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) require.NotNil(t, domainStorageMap) require.True(t, domainStorageMap.Inlined()) @@ -7865,7 +7867,7 @@ func TestDomainRegisterMigrationForLargeAccount(t *testing.T) { // Create new domain storage map const createIfNotExists = true domain := stdlib.InboxStorageDomain - domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) require.NotNil(t, domainStorageMap) accountValues[domain] = make(domainStorageMapValues) @@ -7921,7 +7923,7 @@ func createAndWriteAccountStorageMap( // Create domain storage map for _, domain := range domains { const createIfNotExists = true - domainStorageMap := storage.GetStorageMap(inter, address, domain, createIfNotExists) + domainStorageMap := storage.GetDomainStorageMap(inter, address, domain, createIfNotExists) require.NotNil(t, domainStorageMap) require.Equal(t, uint64(0), domainStorageMap.Count()) diff --git a/stdlib/account.go b/stdlib/account.go index 4ce0f6d65c..7126789104 100644 --- a/stdlib/account.go +++ b/stdlib/account.go @@ -3252,7 +3252,7 @@ func recordStorageCapabilityController( storageMapKey := interpreter.StringStorageMapKey(identifier) - storageMap := inter.Storage().GetStorageMap( + storageMap := inter.Storage().GetDomainStorageMap( inter, address, PathCapabilityStorageDomain, @@ -3295,7 +3295,7 @@ func getPathCapabilityIDSet( storageMapKey := interpreter.StringStorageMapKey(identifier) - storageMap := inter.Storage().GetStorageMap( + storageMap := inter.Storage().GetDomainStorageMap( inter, address, PathCapabilityStorageDomain, @@ -3346,7 +3346,7 @@ func unrecordStorageCapabilityController( // Remove capability set if empty if capabilityIDSet.Count() == 0 { - storageMap := inter.Storage().GetStorageMap( + storageMap := inter.Storage().GetDomainStorageMap( inter, address, PathCapabilityStorageDomain, @@ -3419,7 +3419,7 @@ func recordAccountCapabilityController( storageMapKey := interpreter.Uint64StorageMapKey(capabilityIDValue) - storageMap := inter.Storage().GetStorageMap( + storageMap := inter.Storage().GetDomainStorageMap( inter, address, AccountCapabilityStorageDomain, @@ -3447,7 +3447,7 @@ func unrecordAccountCapabilityController( storageMapKey := interpreter.Uint64StorageMapKey(capabilityIDValue) - storageMap := inter.Storage().GetStorageMap( + storageMap := inter.Storage().GetDomainStorageMap( inter, address, AccountCapabilityStorageDomain, @@ -3467,7 +3467,7 @@ func getAccountCapabilityControllerIDsIterator( nextCapabilityID func() (uint64, bool), count uint64, ) { - storageMap := inter.Storage().GetStorageMap( + storageMap := inter.Storage().GetDomainStorageMap( inter, address, AccountCapabilityStorageDomain, diff --git a/tools/storage-explorer/main.go b/tools/storage-explorer/main.go index fe4f80504f..614143a5f7 100644 --- a/tools/storage-explorer/main.go +++ b/tools/storage-explorer/main.go @@ -184,7 +184,7 @@ func NewAccountStorageMapKeysHandler( } var keys []string - storageMap := storage.GetStorageMap(address, storageMapDomain, false) + storageMap := storage.GetDomainStorageMap(address, storageMapDomain, false) if storageMap == nil { keys = make([]string, 0) } else { @@ -225,7 +225,7 @@ func NewAccountStorageMapValueHandler( return } - storageMap := storage.GetStorageMap(address, storageMapDomain, false) + storageMap := storage.GetDomainStorageMap(address, storageMapDomain, false) if storageMap == nil { http.Error(w, "storage map does not exist", http.StatusNotFound) return From 6099a06650003925223693ab15fa62776cf60d47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Tue, 5 Nov 2024 16:34:17 -0800 Subject: [PATCH 08/52] try to add storage V2 --- runtime/account_storage_v2.go | 179 ++++++++++++++++++++++++++++++++++ runtime/storage.go | 138 ++++++++++---------------- 2 files changed, 230 insertions(+), 87 deletions(-) create mode 100644 runtime/account_storage_v2.go diff --git a/runtime/account_storage_v2.go b/runtime/account_storage_v2.go new file mode 100644 index 0000000000..870df59ee3 --- /dev/null +++ b/runtime/account_storage_v2.go @@ -0,0 +1,179 @@ +/* + * Cadence - The resource-oriented smart contract programming language + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package runtime + +import ( + "github.com/onflow/atree" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/common/orderedmap" + "github.com/onflow/cadence/interpreter" +) + +type AccountStorageV2 struct { + ledger atree.Ledger + slabStorage atree.SlabStorage + memoryGauge common.MemoryGauge + + // cachedAccountStorageMaps is a cache of account storage maps. + // Key is StorageKey{address, accountStorageKey} and value is account storage map. + cachedAccountStorageMaps map[interpreter.StorageKey]*interpreter.AccountStorageMap + + // newAccountStorageMapSlabIndices contains root slab index of new account storage maps. + // The indices are saved using Ledger.SetValue() during Commit(). + // Key is StorageKey{address, accountStorageKey} and value is 8-byte slab index. + newAccountStorageMapSlabIndices *orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex] +} + +func NewAccountStorageV2( + ledger atree.Ledger, + slabStorage atree.SlabStorage, + memoryGauge common.MemoryGauge, +) *AccountStorageV2 { + return &AccountStorageV2{ + ledger: ledger, + slabStorage: slabStorage, + memoryGauge: memoryGauge, + } +} + +func (s *AccountStorageV2) accountStorageKey(address common.Address) interpreter.StorageKey { + return interpreter.NewStorageKey( + s.memoryGauge, + address, + AccountStorageKey, + ) +} + +func (s *AccountStorageV2) GetDomainStorageMap( + inter *interpreter.Interpreter, + address common.Address, + domain string, + createIfNotExists bool, +) ( + domainStorageMap *interpreter.DomainStorageMap, +) { + accountStorageMap := s.getAccountStorageMap(address) + + if accountStorageMap == nil && createIfNotExists { + accountStorageMap = s.storeNewAccountStorageMap(address) + } + + if accountStorageMap != nil { + domainStorageMap = accountStorageMap.GetDomain( + s.memoryGauge, + inter, + domain, + createIfNotExists, + ) + } + + return +} + +// getAccountStorageMap returns AccountStorageMap if exists, or nil otherwise. +func (s *AccountStorageV2) getAccountStorageMap( + address common.Address, +) ( + accountStorageMap *interpreter.AccountStorageMap, +) { + accountStorageKey := s.accountStorageKey(address) + + // Return cached account storage map if it exists. + + if s.cachedAccountStorageMaps != nil { + accountStorageMap = s.cachedAccountStorageMaps[accountStorageKey] + if accountStorageMap != nil { + return accountStorageMap + } + } + + defer func() { + if accountStorageMap != nil { + s.cacheAccountStorageMap( + accountStorageKey, + accountStorageMap, + ) + } + }() + + // Load account storage map if account storage register exists. + + accountStorageSlabIndex, accountStorageRegisterExists, err := getSlabIndexFromRegisterValue( + s.ledger, + address, + []byte(accountStorageKey.Key), + ) + if err != nil { + panic(err) + } + if !accountStorageRegisterExists { + return nil + } + + slabID := atree.NewSlabID( + atree.Address(address), + accountStorageSlabIndex, + ) + + return interpreter.NewAccountStorageMapWithRootID(s.slabStorage, slabID) +} + +func (s *AccountStorageV2) cacheAccountStorageMap( + accountStorageKey interpreter.StorageKey, + accountStorageMap *interpreter.AccountStorageMap, +) { + if s.cachedAccountStorageMaps == nil { + s.cachedAccountStorageMaps = map[interpreter.StorageKey]*interpreter.AccountStorageMap{} + } + s.cachedAccountStorageMaps[accountStorageKey] = accountStorageMap +} + +func (s *AccountStorageV2) storeNewAccountStorageMap( + address common.Address, +) *interpreter.AccountStorageMap { + + accountStorageMap := interpreter.NewAccountStorageMap( + s.memoryGauge, + s.slabStorage, + atree.Address(address), + ) + + slabIndex := accountStorageMap.SlabID().Index() + + accountStorageKey := s.accountStorageKey(address) + + if s.newAccountStorageMapSlabIndices == nil { + s.newAccountStorageMapSlabIndices = &orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex]{} + } + s.newAccountStorageMapSlabIndices.Set(accountStorageKey, slabIndex) + + return accountStorageMap +} + +func (s *AccountStorageV2) commit() error { + if s.newAccountStorageMapSlabIndices == nil { + return nil + } + + return commitSlabIndices( + s.newAccountStorageMapSlabIndices, + s.ledger, + ) +} diff --git a/runtime/storage.go b/runtime/storage.go index e5ce8ead30..287ed02461 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -54,8 +54,7 @@ type Storage struct { memoryGauge common.MemoryGauge accountStorageV1 *AccountStorageV1 - // TODO: - //accountStorageV2 *AccountStorageV2 + accountStorageV2 *AccountStorageV2 } var _ atree.SlabStorage = &Storage{} @@ -96,17 +95,18 @@ func NewStorage(ledger atree.Ledger, memoryGauge common.MemoryGauge) *Storage { persistentSlabStorage, memoryGauge, ) - // TODO: - //accountStorageV2 := NewAccountStorageV2(ledger, memoryGauge) + accountStorageV2 := NewAccountStorageV2( + ledger, + persistentSlabStorage, + memoryGauge, + ) return &Storage{ - PersistentSlabStorage: persistentSlabStorage, - cachedDomainStorageMaps: map[interpreter.StorageKey]*interpreter.DomainStorageMap{}, - Ledger: ledger, - memoryGauge: memoryGauge, - accountStorageV1: accountStorageV1, - // TODO: - //accountStorageV2: accountStorageV2, + Ledger: ledger, + PersistentSlabStorage: persistentSlabStorage, + memoryGauge: memoryGauge, + accountStorageV1: accountStorageV1, + accountStorageV2: accountStorageV2, } } @@ -125,87 +125,52 @@ func (s *Storage) GetDomainStorageMap( domainStorageKey := interpreter.NewStorageKey(s.memoryGauge, address, domain) - domainStorageMap = s.cachedDomainStorageMaps[domainStorageKey] - if domainStorageMap != nil { - return domainStorageMap + if s.cachedDomainStorageMaps != nil { + domainStorageMap = s.cachedDomainStorageMaps[domainStorageKey] + if domainStorageMap != nil { + return domainStorageMap + } } defer func() { // Cache domain storage map if domainStorageMap != nil { - s.cachedDomainStorageMaps[domainStorageKey] = domainStorageMap + s.cacheDomainStorageMap( + domainStorageKey, + domainStorageMap, + ) } }() - // Account can be migrated account, new account, or unmigrated account. - // - // - // ### Migrated Account - // - // Migrated account is account with AccountStorageKey register. - // Migrated account has account storage map, which contains domain storage maps - // with domain as key. - // - // If domain exists in the account storage map, domain storage map is returned. - // - // If domain doesn't exist and createIfNotExists is true, - // new domain storage map is created, inserted into account storage map, and returned. - // - // If domain doesn't exist and createIfNotExists is false, nil is returned. - // - // - // ### New Account - // - // New account is account without AccountStorageKey register and without any domain registers. - // NOTE: new account's AccountStorageKey register is persisted in Commit(). - // - // If createIfNotExists is true, - // - new account storage map is created - // - new domain storage map is created - // - domain storage map is inserted into account storage map - // - domain storage map is returned - // - // If createIfNotExists is false, nil is returned. - // - // - // ### Unmigrated Account - // - // Unmigrated account is account with at least one domain register. - // Unmigrated account has domain registers and corresponding domain storage maps. - // - // If domain exists (domain register exists), domain storage map is loaded and returned. - // - // If domain doesn't exist and createIfNotExists is true, - // new domain storage map is created and returned. - // NOTE: given account would be migrated in Commit() since this is write op. - // - // If domain doesn't exist and createIfNotExists is false, nil is returned. - // - // - // ### Migration of unmigrated accounts - // - // Migration happens in Commit() if unmigrated account has write ops. - // NOTE: Commit() is not called by this function. - // - // Specifically, - // - unmigrated account is migrated in Commit() if there are write ops. - // For example, inserting values in unmigrated account triggers migration in Commit(). - // - unmigrated account is unchanged if there are only read ops. - // For example, iterating values in unmigrated account doesn't trigger migration, - // and checking if domain exists doesn't trigger migration. - - // Get (or create) domain storage map from existing account storage map - // if account is migrated account. - // TODO: - //domainStorageMap = s.accountStorageV2.GetDomainStorageMap(address, domain, createIfNotExists) - //if domainStorageMap != nil { - // return domainStorageMap - //} - // - //// At this point, account is either new or unmigrated account. + const useV2 = false + + if useV2 { + domainStorageMap = s.accountStorageV2.GetDomainStorageMap( + inter, + address, + domain, + createIfNotExists, + ) + } else { + domainStorageMap = s.accountStorageV1.GetDomainStorageMap( + address, + domain, + createIfNotExists, + ) + } + return domainStorageMap +} + +func (s *Storage) cacheDomainStorageMap( + domainStorageKey interpreter.StorageKey, + domainStorageMap *interpreter.DomainStorageMap, +) { + if s.cachedDomainStorageMaps == nil { + s.cachedDomainStorageMaps = map[interpreter.StorageKey]*interpreter.DomainStorageMap{} + } - return s.accountStorageV1.GetDomainStorageMap(address, domain, createIfNotExists) + s.cachedDomainStorageMaps[domainStorageKey] = domainStorageMap } // getSlabIndexFromRegisterValue returns register value as atree.SlabIndex. @@ -336,11 +301,10 @@ func (s *Storage) commit(inter *interpreter.Interpreter, commitContractUpdates b return err } - // TODO: - //err = s.accountStorageV2.commit() - //if err != nil { - // return err - //} + err = s.accountStorageV2.commit() + if err != nil { + return err + } // Commit the underlying slab storage's writes From 3b83c83ebccb9df950c05c0cf09fb2659c26e867 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Tue, 5 Nov 2024 16:56:51 -0800 Subject: [PATCH 09/52] align V1 and V2 further --- runtime/account_storage_v1.go | 13 +++++++++-- runtime/account_storage_v2.go | 43 ++++++++++++++++++++++++++--------- 2 files changed, 43 insertions(+), 13 deletions(-) diff --git a/runtime/account_storage_v1.go b/runtime/account_storage_v1.go index 7c022a9093..50076cff64 100644 --- a/runtime/account_storage_v1.go +++ b/runtime/account_storage_v1.go @@ -115,7 +115,12 @@ func getDomainStorageMapFromLegacyDomainRegister( address common.Address, domain string, ) (*interpreter.DomainStorageMap, error) { - domainStorageSlabIndex, domainRegisterExists, err := getSlabIndexFromRegisterValue(ledger, address, []byte(domain)) + + domainStorageSlabIndex, domainRegisterExists, err := getSlabIndexFromRegisterValue( + ledger, + address, + []byte(domain), + ) if err != nil { return nil, err } @@ -123,6 +128,10 @@ func getDomainStorageMapFromLegacyDomainRegister( return nil, nil } - slabID := atree.NewSlabID(atree.Address(address), domainStorageSlabIndex) + slabID := atree.NewSlabID( + atree.Address(address), + domainStorageSlabIndex, + ) + return interpreter.NewDomainStorageMapWithRootID(storage, slabID), nil } diff --git a/runtime/account_storage_v2.go b/runtime/account_storage_v2.go index 870df59ee3..32714baf2a 100644 --- a/runtime/account_storage_v2.go +++ b/runtime/account_storage_v2.go @@ -115,24 +115,17 @@ func (s *AccountStorageV2) getAccountStorageMap( // Load account storage map if account storage register exists. - accountStorageSlabIndex, accountStorageRegisterExists, err := getSlabIndexFromRegisterValue( + var err error + accountStorageMap, err = getAccountStorageMapFromRegister( s.ledger, + s.slabStorage, address, - []byte(accountStorageKey.Key), ) if err != nil { panic(err) } - if !accountStorageRegisterExists { - return nil - } - - slabID := atree.NewSlabID( - atree.Address(address), - accountStorageSlabIndex, - ) - return interpreter.NewAccountStorageMapWithRootID(s.slabStorage, slabID) + return } func (s *AccountStorageV2) cacheAccountStorageMap( @@ -177,3 +170,31 @@ func (s *AccountStorageV2) commit() error { s.ledger, ) } + +func getAccountStorageMapFromRegister( + ledger atree.Ledger, + slabStorage atree.SlabStorage, + address common.Address, +) ( + *interpreter.AccountStorageMap, + error, +) { + accountStorageSlabIndex, accountStorageRegisterExists, err := getSlabIndexFromRegisterValue( + ledger, + address, + []byte(AccountStorageKey), + ) + if err != nil { + return nil, err + } + if !accountStorageRegisterExists { + return nil, nil + } + + slabID := atree.NewSlabID( + atree.Address(address), + accountStorageSlabIndex, + ) + + return interpreter.NewAccountStorageMapWithRootID(slabStorage, slabID), nil +} From e82727532b57b230289cdc75219ba5b875fcc158 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Tue, 5 Nov 2024 17:51:38 -0800 Subject: [PATCH 10/52] fix and use account storage V2 --- runtime/account_storage_v2.go | 5 +++++ runtime/storage.go | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/runtime/account_storage_v2.go b/runtime/account_storage_v2.go index 32714baf2a..d050c359fa 100644 --- a/runtime/account_storage_v2.go +++ b/runtime/account_storage_v2.go @@ -157,6 +157,11 @@ func (s *AccountStorageV2) storeNewAccountStorageMap( } s.newAccountStorageMapSlabIndices.Set(accountStorageKey, slabIndex) + s.cacheAccountStorageMap( + accountStorageKey, + accountStorageMap, + ) + return accountStorageMap } diff --git a/runtime/storage.go b/runtime/storage.go index 287ed02461..77c4b27782 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -143,7 +143,7 @@ func (s *Storage) GetDomainStorageMap( }() // TODO: - const useV2 = false + const useV2 = true if useV2 { domainStorageMap = s.accountStorageV2.GetDomainStorageMap( From 1e1e0af369ad07eff5ac4ea76b2d39be623e41d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Tue, 5 Nov 2024 17:54:26 -0800 Subject: [PATCH 11/52] re-enable test --- runtime/account_storage_v2.go | 15 +++++++++++---- runtime/storage.go | 16 ++++++++-------- runtime/storage_test.go | 18 ++++++------------ 3 files changed, 25 insertions(+), 24 deletions(-) diff --git a/runtime/account_storage_v2.go b/runtime/account_storage_v2.go index d050c359fa..d37bdf4f18 100644 --- a/runtime/account_storage_v2.go +++ b/runtime/account_storage_v2.go @@ -152,10 +152,7 @@ func (s *AccountStorageV2) storeNewAccountStorageMap( accountStorageKey := s.accountStorageKey(address) - if s.newAccountStorageMapSlabIndices == nil { - s.newAccountStorageMapSlabIndices = &orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex]{} - } - s.newAccountStorageMapSlabIndices.Set(accountStorageKey, slabIndex) + s.SetNewAccountStorageMapSlabIndex(accountStorageKey, slabIndex) s.cacheAccountStorageMap( accountStorageKey, @@ -165,6 +162,16 @@ func (s *AccountStorageV2) storeNewAccountStorageMap( return accountStorageMap } +func (s *AccountStorageV2) SetNewAccountStorageMapSlabIndex( + accountStorageKey interpreter.StorageKey, + slabIndex atree.SlabIndex, +) { + if s.newAccountStorageMapSlabIndices == nil { + s.newAccountStorageMapSlabIndices = &orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex]{} + } + s.newAccountStorageMapSlabIndices.Set(accountStorageKey, slabIndex) +} + func (s *AccountStorageV2) commit() error { if s.newAccountStorageMapSlabIndices == nil { return nil diff --git a/runtime/storage.go b/runtime/storage.go index 77c4b27782..9ccb5d680b 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -53,8 +53,8 @@ type Storage struct { memoryGauge common.MemoryGauge - accountStorageV1 *AccountStorageV1 - accountStorageV2 *AccountStorageV2 + AccountStorageV1 *AccountStorageV1 + AccountStorageV2 *AccountStorageV2 } var _ atree.SlabStorage = &Storage{} @@ -105,8 +105,8 @@ func NewStorage(ledger atree.Ledger, memoryGauge common.MemoryGauge) *Storage { Ledger: ledger, PersistentSlabStorage: persistentSlabStorage, memoryGauge: memoryGauge, - accountStorageV1: accountStorageV1, - accountStorageV2: accountStorageV2, + AccountStorageV1: accountStorageV1, + AccountStorageV2: accountStorageV2, } } @@ -146,14 +146,14 @@ func (s *Storage) GetDomainStorageMap( const useV2 = true if useV2 { - domainStorageMap = s.accountStorageV2.GetDomainStorageMap( + domainStorageMap = s.AccountStorageV2.GetDomainStorageMap( inter, address, domain, createIfNotExists, ) } else { - domainStorageMap = s.accountStorageV1.GetDomainStorageMap( + domainStorageMap = s.AccountStorageV1.GetDomainStorageMap( address, domain, createIfNotExists, @@ -296,12 +296,12 @@ func (s *Storage) commit(inter *interpreter.Interpreter, commitContractUpdates b s.commitContractUpdates(inter) } - err := s.accountStorageV1.commit() + err := s.AccountStorageV1.commit() if err != nil { return err } - err = s.accountStorageV2.commit() + err = s.AccountStorageV2.commit() if err != nil { return err } diff --git a/runtime/storage_test.go b/runtime/storage_test.go index 7d970b6364..6d3574b309 100644 --- a/runtime/storage_test.go +++ b/runtime/storage_test.go @@ -56,27 +56,21 @@ func withWritesToStorage( inter := NewTestInterpreter(tb) - // TODO: - //address := common.MustBytesToAddress([]byte{0x1}) + address := common.MustBytesToAddress([]byte{0x1}) for i := 0; i < count; i++ { randomIndex := random.Uint32() - // TODO: - //storageKey := interpreter.StorageKey{ - // Address: address, - // Key: fmt.Sprintf("%d", randomIndex), - //} + storageKey := interpreter.StorageKey{ + Address: address, + Key: fmt.Sprintf("%d", randomIndex), + } var slabIndex atree.SlabIndex binary.BigEndian.PutUint32(slabIndex[:], randomIndex) - // TODO: - //if storage.NewAccountStorageMapSlabIndices == nil { - // storage.NewAccountStorageMapSlabIndices = &orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex]{} - //} - //storage.NewAccountStorageMapSlabIndices.Set(storageKey, slabIndex) + storage.AccountStorageV2.SetNewAccountStorageMapSlabIndex(storageKey, slabIndex) } handler(storage, inter) From dae042f909afaa8b539a7a2562cde5325ed19d9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Wed, 6 Nov 2024 10:37:17 -0800 Subject: [PATCH 12/52] bring back health check --- runtime/account_storage_v2.go | 15 +++ runtime/storage.go | 202 +++++++++++++++++----------------- 2 files changed, 119 insertions(+), 98 deletions(-) diff --git a/runtime/account_storage_v2.go b/runtime/account_storage_v2.go index d37bdf4f18..178443b868 100644 --- a/runtime/account_storage_v2.go +++ b/runtime/account_storage_v2.go @@ -210,3 +210,18 @@ func getAccountStorageMapFromRegister( return interpreter.NewAccountStorageMapWithRootID(slabStorage, slabID), nil } + +func (s *AccountStorageV2) cachedRootSlabIDs() []atree.SlabID { + + var slabIDs []atree.SlabID + + // Get cached account storage map slab IDs. + for _, storageMap := range s.cachedAccountStorageMaps { //nolint:maprange + slabIDs = append( + slabIDs, + storageMap.SlabID(), + ) + } + + return slabIDs +} diff --git a/runtime/storage.go b/runtime/storage.go index 9ccb5d680b..8a5d9cea3f 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -55,6 +55,9 @@ type Storage struct { AccountStorageV1 *AccountStorageV1 AccountStorageV2 *AccountStorageV2 + + // v1Accounts are accounts that are in account storage format v1 + v1Accounts *orderedmap.OrderedMap[common.Address, struct{}] } var _ atree.SlabStorage = &Storage{} @@ -158,6 +161,13 @@ func (s *Storage) GetDomainStorageMap( domain, createIfNotExists, ) + + if domainStorageMap != nil { + if s.v1Accounts == nil { + s.v1Accounts = &orderedmap.OrderedMap[common.Address, struct{}]{} + } + s.v1Accounts.Set(address, struct{}{}) + } } return domainStorageMap } @@ -404,105 +414,101 @@ func commitSlabIndices( //} func (s *Storage) CheckHealth() error { - // TODO: - //// Check slab storage health - //rootSlabIDs, err := atree.CheckStorageHealth(s, -1) - //if err != nil { - // return err - //} - // - //// Find account / non-temporary root slab IDs - // - //accountRootSlabIDs := make(map[atree.SlabID]struct{}, len(rootSlabIDs)) - // - //// NOTE: map range is safe, as it creates a subset - //for rootSlabID := range rootSlabIDs { //nolint:maprange - // if rootSlabID.HasTempAddress() { - // continue - // } - // - // accountRootSlabIDs[rootSlabID] = struct{}{} - //} - // - //// Check that account storage maps and unmigrated domain storage maps - //// match returned root slabs from atree.CheckStorageHealth. - // - //var storageMapStorageIDs []atree.SlabID - // - //// Get cached account storage map slab IDs. - //for _, storageMap := range s.cachedAccountStorageMaps { //nolint:maprange - // storageMapStorageIDs = append( - // storageMapStorageIDs, - // storageMap.SlabID(), - // ) - //} - // - //// Get cached unmigrated domain storage map slab IDs - //for storageKey, storageMap := range s.cachedDomainStorageMaps { //nolint:maprange - // address := storageKey.Address - // - // if s.unmigratedAccounts != nil && - // s.unmigratedAccounts.Contains(address) { - // - // domainValueID := storageMap.ValueID() - // - // slabID := atree.NewSlabID( - // atree.Address(address), - // atree.SlabIndex(domainValueID[8:]), - // ) - // - // storageMapStorageIDs = append( - // storageMapStorageIDs, - // slabID, - // ) - // } - //} - // - //sort.Slice(storageMapStorageIDs, func(i, j int) bool { - // a := storageMapStorageIDs[i] - // b := storageMapStorageIDs[j] - // return a.Compare(b) < 0 - //}) - // - //found := map[atree.SlabID]struct{}{} - // - //for _, storageMapStorageID := range storageMapStorageIDs { - // if _, ok := accountRootSlabIDs[storageMapStorageID]; !ok { - // return errors.NewUnexpectedError("account storage map (and unmigrated domain storage map) points to non-root slab %s", storageMapStorageID) - // } - // - // found[storageMapStorageID] = struct{}{} - //} - // - //// Check that all slabs in slab storage - //// are referenced by storables in account storage. - //// If a slab is not referenced, it is garbage. - // - //if len(accountRootSlabIDs) > len(found) { - // var unreferencedRootSlabIDs []atree.SlabID - // - // for accountRootSlabID := range accountRootSlabIDs { //nolint:maprange - // if _, ok := found[accountRootSlabID]; ok { - // continue - // } - // - // unreferencedRootSlabIDs = append( - // unreferencedRootSlabIDs, - // accountRootSlabID, - // ) - // } - // - // sort.Slice(unreferencedRootSlabIDs, func(i, j int) bool { - // a := unreferencedRootSlabIDs[i] - // b := unreferencedRootSlabIDs[j] - // return a.Compare(b) < 0 - // }) - // - // return UnreferencedRootSlabsError{ - // UnreferencedRootSlabIDs: unreferencedRootSlabIDs, - // } - //} + // Check slab storage health + rootSlabIDs, err := atree.CheckStorageHealth(s, -1) + if err != nil { + return err + } + + // Find account / non-temporary root slab IDs + + accountRootSlabIDs := make(map[atree.SlabID]struct{}, len(rootSlabIDs)) + + // NOTE: map range is safe, as it creates a subset + for rootSlabID := range rootSlabIDs { //nolint:maprange + if rootSlabID.HasTempAddress() { + continue + } + + accountRootSlabIDs[rootSlabID] = struct{}{} + } + + // Check that account storage maps and unmigrated domain storage maps + // match returned root slabs from atree.CheckStorageHealth. + + var storageMapStorageIDs []atree.SlabID + + // Get cached account storage map slab IDs. + storageMapStorageIDs = append( + storageMapStorageIDs, + s.AccountStorageV2.cachedRootSlabIDs()..., + ) + + // Get slab IDs of cached domain storage maps that are in account storage format v1. + for storageKey, storageMap := range s.cachedDomainStorageMaps { //nolint:maprange + address := storageKey.Address + + if s.v1Accounts != nil && + s.v1Accounts.Contains(address) { + + storageMapStorageIDs = append( + storageMapStorageIDs, + storageMap.SlabID(), + ) + } + } + + sort.Slice( + storageMapStorageIDs, + func(i, j int) bool { + a := storageMapStorageIDs[i] + b := storageMapStorageIDs[j] + return a.Compare(b) < 0 + }, + ) + + found := map[atree.SlabID]struct{}{} + + for _, storageMapStorageID := range storageMapStorageIDs { + if _, ok := accountRootSlabIDs[storageMapStorageID]; !ok { + return errors.NewUnexpectedError( + "account storage map (and unmigrated domain storage map) points to non-root slab %s", + storageMapStorageID, + ) + } + + found[storageMapStorageID] = struct{}{} + } + + // Check that all slabs in slab storage + // are referenced by storables in account storage. + // If a slab is not referenced, it is garbage. + + if len(accountRootSlabIDs) > len(found) { + var unreferencedRootSlabIDs []atree.SlabID + + for accountRootSlabID := range accountRootSlabIDs { //nolint:maprange + if _, ok := found[accountRootSlabID]; ok { + continue + } + + unreferencedRootSlabIDs = append( + unreferencedRootSlabIDs, + accountRootSlabID, + ) + } + + sort.Slice(unreferencedRootSlabIDs, func(i, j int) bool { + a := unreferencedRootSlabIDs[i] + b := unreferencedRootSlabIDs[j] + return a.Compare(b) < 0 + }) + + return UnreferencedRootSlabsError{ + UnreferencedRootSlabIDs: unreferencedRootSlabIDs, + } + } return nil } From b5e97e00e630e9cf3007caf88bf5112ca145f11a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Wed, 6 Nov 2024 13:46:24 -0800 Subject: [PATCH 13/52] put new storage format behind feature flag --- interpreter/account_storagemap_test.go | 200 +++++++++++++++++++---- interpreter/domain_storagemap_test.go | 139 ++++++++++++++-- runtime/config.go | 2 + runtime/contract_function_executor.go | 8 +- runtime/contract_test.go | 45 +++-- runtime/migrate_domain_registers_test.go | 74 ++++++++- runtime/runtime.go | 12 +- runtime/runtime_memory_metering_test.go | 11 +- runtime/runtime_test.go | 12 +- runtime/script_executor.go | 8 +- runtime/sharedstate_test.go | 6 +- runtime/storage.go | 102 +++++++++--- runtime/storage_test.go | 168 ++++++++++++++++--- runtime/transaction_executor.go | 8 +- 14 files changed, 672 insertions(+), 123 deletions(-) diff --git a/interpreter/account_storagemap_test.go b/interpreter/account_storagemap_test.go index cdc6fc868c..1b7ec4e675 100644 --- a/interpreter/account_storagemap_test.go +++ b/interpreter/account_storagemap_test.go @@ -42,8 +42,16 @@ func TestAccountStorageMapDomainExists(t *testing.T) { address := common.MustBytesToAddress([]byte{0x1}) t.Run("empty", func(t *testing.T) { + t.Parallel() + ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) require.NotNil(t, accountStorageMap) @@ -58,10 +66,18 @@ func TestAccountStorageMapDomainExists(t *testing.T) { }) t.Run("non-empty", func(t *testing.T) { + t.Parallel() + random := rand.New(rand.NewSource(42)) ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any @@ -96,8 +112,16 @@ func TestAccountStorageMapGetDomain(t *testing.T) { address := common.MustBytesToAddress([]byte{0x1}) t.Run("empty", func(t *testing.T) { + t.Parallel() + ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any @@ -117,18 +141,26 @@ func TestAccountStorageMapGetDomain(t *testing.T) { for _, domain := range runtime.AccountDomains { const createIfNotExists = false - storagemap := accountStorageMap.GetDomain(nil, inter, domain, createIfNotExists) - require.Nil(t, storagemap) + domainStorageMap := accountStorageMap.GetDomain(nil, inter, domain, createIfNotExists) + require.Nil(t, domainStorageMap) } CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) }) t.Run("non-empty", func(t *testing.T) { + t.Parallel() + random := rand.New(rand.NewSource(42)) ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any @@ -149,11 +181,11 @@ func TestAccountStorageMapGetDomain(t *testing.T) { for _, domain := range runtime.AccountDomains { const createIfNotExists = false - domainStoragemap := accountStorageMap.GetDomain(nil, inter, domain, createIfNotExists) - require.Equal(t, slices.Contains(existingDomains, domain), domainStoragemap != nil) + domainStorageMap := accountStorageMap.GetDomain(nil, inter, domain, createIfNotExists) + require.Equal(t, slices.Contains(existingDomains, domain), domainStorageMap != nil) - if domainStoragemap != nil { - checkDomainStorageMapData(t, inter, domainStoragemap, accountValues[domain]) + if domainStorageMap != nil { + checkDomainStorageMapData(t, inter, domainStorageMap, accountValues[domain]) } } @@ -167,8 +199,16 @@ func TestAccountStorageMapCreateDomain(t *testing.T) { address := common.MustBytesToAddress([]byte{0x1}) t.Run("empty", func(t *testing.T) { + t.Parallel() + ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any @@ -190,9 +230,9 @@ func TestAccountStorageMapCreateDomain(t *testing.T) { for _, domain := range runtime.AccountDomains { const createIfNotExists = true - domainStoragemap := accountStorageMap.GetDomain(nil, inter, domain, createIfNotExists) - require.NotNil(t, domainStoragemap) - require.Equal(t, uint64(0), domainStoragemap.Count()) + domainStorageMap := accountStorageMap.GetDomain(nil, inter, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(0), domainStorageMap.Count()) accountValues[domain] = make(domainStorageMapValues) } @@ -203,10 +243,18 @@ func TestAccountStorageMapCreateDomain(t *testing.T) { }) t.Run("non-empty", func(t *testing.T) { + t.Parallel() + random := rand.New(rand.NewSource(42)) ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any @@ -227,9 +275,9 @@ func TestAccountStorageMapCreateDomain(t *testing.T) { for _, domain := range runtime.AccountDomains { const createIfNotExists = true - domainStoragemap := accountStorageMap.GetDomain(nil, inter, domain, createIfNotExists) - require.NotNil(t, domainStoragemap) - require.Equal(t, uint64(len(accountValues[domain])), domainStoragemap.Count()) + domainStorageMap := accountStorageMap.GetDomain(nil, inter, domain, createIfNotExists) + require.NotNil(t, domainStorageMap) + require.Equal(t, uint64(len(accountValues[domain])), domainStorageMap.Count()) if !slices.Contains(existingDomains, domain) { accountValues[domain] = make(domainStorageMapValues) @@ -248,10 +296,18 @@ func TestAccountStorageMapSetAndUpdateDomain(t *testing.T) { address := common.MustBytesToAddress([]byte{0x1}) t.Run("empty", func(t *testing.T) { + t.Parallel() + random := rand.New(rand.NewSource(42)) ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any @@ -289,10 +345,18 @@ func TestAccountStorageMapSetAndUpdateDomain(t *testing.T) { }) t.Run("non-empty", func(t *testing.T) { + t.Parallel() + random := rand.New(rand.NewSource(42)) ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any @@ -334,8 +398,16 @@ func TestAccountStorageMapRemoveDomain(t *testing.T) { address := common.MustBytesToAddress([]byte{0x1}) t.Run("empty", func(t *testing.T) { + t.Parallel() + ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any @@ -366,10 +438,18 @@ func TestAccountStorageMapRemoveDomain(t *testing.T) { }) t.Run("non-empty", func(t *testing.T) { + t.Parallel() + random := rand.New(rand.NewSource(42)) ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any @@ -408,8 +488,16 @@ func TestAccountStorageMapIterator(t *testing.T) { address := common.MustBytesToAddress([]byte{0x1}) t.Run("empty", func(t *testing.T) { + t.Parallel() + ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any @@ -444,10 +532,18 @@ func TestAccountStorageMapIterator(t *testing.T) { }) t.Run("non-empty", func(t *testing.T) { + t.Parallel() + random := rand.New(rand.NewSource(42)) ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any @@ -505,8 +601,16 @@ func TestAccountStorageMapDomains(t *testing.T) { address := common.MustBytesToAddress([]byte{0x1}) t.Run("empty", func(t *testing.T) { + t.Parallel() + ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) accountStorageMap := interpreter.NewAccountStorageMap(nil, storage, atree.Address(address)) require.NotNil(t, accountStorageMap) @@ -519,10 +623,18 @@ func TestAccountStorageMapDomains(t *testing.T) { }) t.Run("non-empty", func(t *testing.T) { + t.Parallel() + random := rand.New(rand.NewSource(42)) ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) // Turn off automatic AtreeStorageValidationEnabled and explicitly check atree storage health directly. // This is because AccountStorageMap isn't created through storage, so there isn't any account register to match AccountStorageMap root slab. @@ -557,9 +669,17 @@ func TestAccountStorageMapLoadFromRootSlabID(t *testing.T) { address := common.MustBytesToAddress([]byte{0x1}) t.Run("empty", func(t *testing.T) { + t.Parallel() + init := func() (atree.SlabID, accountStorageMapValues, map[string][]byte, map[string]uint64) { ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) inter := NewTestInterpreterWithStorage(t, storage) @@ -576,7 +696,13 @@ func TestAccountStorageMapLoadFromRootSlabID(t *testing.T) { accountStorageMapRootSlabID, accountValues, storedValues, storageIndices := init() ledger := NewTestLedgerWithData(nil, nil, storedValues, storageIndices) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) accountStorageMap := interpreter.NewAccountStorageMapWithRootID(storage, accountStorageMapRootSlabID) require.Equal(t, uint64(0), accountStorageMap.Count()) @@ -590,6 +716,8 @@ func TestAccountStorageMapLoadFromRootSlabID(t *testing.T) { }) t.Run("non-empty", func(t *testing.T) { + t.Parallel() + existingDomains := []string{ common.PathDomainStorage.Identifier(), common.PathDomainPublic.Identifier(), @@ -600,7 +728,13 @@ func TestAccountStorageMapLoadFromRootSlabID(t *testing.T) { random := rand.New(rand.NewSource(42)) ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) // Turn off automatic AtreeStorageValidationEnabled and explicitly check atree storage health directly. // This is because AccountStorageMap isn't created through storage, so there isn't any account register to match AccountStorageMap root slab. @@ -620,7 +754,13 @@ func TestAccountStorageMapLoadFromRootSlabID(t *testing.T) { accountStorageMapRootSlabID, accountValues, storedValues, storageIndices := init() ledger := NewTestLedgerWithData(nil, nil, storedValues, storageIndices) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) accountStorageMap := interpreter.NewAccountStorageMapWithRootID(storage, accountStorageMapRootSlabID) require.Equal(t, uint64(len(existingDomains)), accountStorageMap.Count()) diff --git a/interpreter/domain_storagemap_test.go b/interpreter/domain_storagemap_test.go index 9b1985dce1..5eb2db5c66 100644 --- a/interpreter/domain_storagemap_test.go +++ b/interpreter/domain_storagemap_test.go @@ -40,8 +40,14 @@ func TestDomainStorageMapValueExists(t *testing.T) { address := common.MustBytesToAddress([]byte{0x1}) t.Run("empty", func(t *testing.T) { + t.Parallel() + ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) require.NotNil(t, domainStorageMap) @@ -56,10 +62,16 @@ func TestDomainStorageMapValueExists(t *testing.T) { }) t.Run("non-empty", func(t *testing.T) { + t.Parallel() + random := rand.New(rand.NewSource(42)) ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. // This is because DomainStorageMap isn't created through runtime.Storage, so there isn't any @@ -103,8 +115,14 @@ func TestDomainStorageMapReadValue(t *testing.T) { address := common.MustBytesToAddress([]byte{0x1}) t.Run("empty", func(t *testing.T) { + t.Parallel() + ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) require.NotNil(t, domainStorageMap) @@ -119,10 +137,16 @@ func TestDomainStorageMapReadValue(t *testing.T) { }) t.Run("non-empty", func(t *testing.T) { + t.Parallel() + random := rand.New(rand.NewSource(42)) ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. // This is because DomainStorageMap isn't created through runtime.Storage, so there isn't any @@ -169,10 +193,16 @@ func TestDomainStorageMapSetAndUpdateValue(t *testing.T) { address := common.MustBytesToAddress([]byte{0x1}) t.Run("empty", func(t *testing.T) { + t.Parallel() + random := rand.New(rand.NewSource(42)) ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any @@ -200,10 +230,16 @@ func TestDomainStorageMapSetAndUpdateValue(t *testing.T) { }) t.Run("non-empty", func(t *testing.T) { + t.Parallel() + random := rand.New(rand.NewSource(42)) ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any @@ -245,8 +281,14 @@ func TestDomainStorageMapRemoveValue(t *testing.T) { address := common.MustBytesToAddress([]byte{0x1}) t.Run("empty", func(t *testing.T) { + t.Parallel() + ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any @@ -273,10 +315,16 @@ func TestDomainStorageMapRemoveValue(t *testing.T) { }) t.Run("non-empty", func(t *testing.T) { + t.Parallel() + random := rand.New(rand.NewSource(42)) ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any @@ -325,8 +373,14 @@ func TestDomainStorageMapIteratorNext(t *testing.T) { address := common.MustBytesToAddress([]byte{0x1}) t.Run("empty", func(t *testing.T) { + t.Parallel() + ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any @@ -362,10 +416,16 @@ func TestDomainStorageMapIteratorNext(t *testing.T) { }) t.Run("non-empty", func(t *testing.T) { + t.Parallel() + random := rand.New(rand.NewSource(42)) ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any @@ -422,8 +482,14 @@ func TestDomainStorageMapIteratorNextKey(t *testing.T) { address := common.MustBytesToAddress([]byte{0x1}) t.Run("empty", func(t *testing.T) { + t.Parallel() + ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any @@ -458,10 +524,16 @@ func TestDomainStorageMapIteratorNextKey(t *testing.T) { }) t.Run("non-empty", func(t *testing.T) { + t.Parallel() + random := rand.New(rand.NewSource(42)) ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any @@ -515,8 +587,14 @@ func TestDomainStorageMapIteratorNextValue(t *testing.T) { address := common.MustBytesToAddress([]byte{0x1}) t.Run("empty", func(t *testing.T) { + t.Parallel() + ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any @@ -551,10 +629,16 @@ func TestDomainStorageMapIteratorNextValue(t *testing.T) { }) t.Run("non-empty", func(t *testing.T) { + t.Parallel() + random := rand.New(rand.NewSource(42)) ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. // This is because AccountStorageMap isn't created through runtime.Storage, so there isn't any @@ -615,9 +699,15 @@ func TestDomainStorageMapLoadFromRootSlabID(t *testing.T) { address := common.MustBytesToAddress([]byte{0x1}) t.Run("empty", func(t *testing.T) { + t.Parallel() + init := func() (atree.SlabID, domainStorageMapValues, map[string][]byte, map[string]uint64) { ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) inter := NewTestInterpreterWithStorage(t, storage) @@ -635,7 +725,11 @@ func TestDomainStorageMapLoadFromRootSlabID(t *testing.T) { domainStorageMapRootSlabID, domainValues, storedValues, storageIndices := init() ledger := NewTestLedgerWithData(nil, nil, storedValues, storageIndices) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) domainStorageMap := interpreter.NewDomainStorageMapWithRootID(storage, domainStorageMapRootSlabID) require.Equal(t, uint64(0), domainStorageMap.Count()) @@ -648,12 +742,17 @@ func TestDomainStorageMapLoadFromRootSlabID(t *testing.T) { }) t.Run("non-empty", func(t *testing.T) { + t.Parallel() init := func() (atree.SlabID, domainStorageMapValues, map[string][]byte, map[string]uint64) { random := rand.New(rand.NewSource(42)) ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) // Turn off automatic AtreeStorageValidationEnabled and explicitly check atree storage health directly. // This is because AccountStorageMap isn't created through storage, so there isn't any account register to match AccountStorageMap root slab. @@ -674,7 +773,11 @@ func TestDomainStorageMapLoadFromRootSlabID(t *testing.T) { domainStorageMapRootSlabID, domainValues, storedValues, storageIndices := init() ledger := NewTestLedgerWithData(nil, nil, storedValues, storageIndices) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{}, + ) domainStorageMap := interpreter.NewDomainStorageMapWithRootID(storage, domainStorageMapRootSlabID) diff --git a/runtime/config.go b/runtime/config.go index d6882cb353..68926367d0 100644 --- a/runtime/config.go +++ b/runtime/config.go @@ -37,4 +37,6 @@ type Config struct { CoverageReport *CoverageReport // LegacyContractUpgradeEnabled enabled specifies whether to use the old parser when parsing an old contract LegacyContractUpgradeEnabled bool + // StorageFormatV2Enabled specifies whether storage format V2 is enabled + StorageFormatV2Enabled bool } diff --git a/runtime/contract_function_executor.go b/runtime/contract_function_executor.go index 8ba0f49bf8..19960185f2 100644 --- a/runtime/contract_function_executor.go +++ b/runtime/contract_function_executor.go @@ -105,7 +105,13 @@ func (executor *interpreterContractFunctionExecutor) preprocess() (err error) { runtimeInterface := context.Interface - storage := NewStorage(runtimeInterface, runtimeInterface) + storage := NewStorage( + runtimeInterface, + runtimeInterface, + StorageConfig{ + StorageFormatV2Enabled: interpreterRuntime.defaultConfig.StorageFormatV2Enabled, + }, + ) executor.storage = storage environment := context.Environment diff --git a/runtime/contract_test.go b/runtime/contract_test.go index ec20627064..55ead92eed 100644 --- a/runtime/contract_test.go +++ b/runtime/contract_test.go @@ -44,18 +44,21 @@ func TestRuntimeContract(t *testing.T) { t.Parallel() type testCase struct { - name string // the name of the contract used in add/update calls - code string // the code we use to add the contract - code2 string // the code we use to update the contract - valid bool - isInterface bool + name string // the name of the contract used in add/update calls + code string // the code we use to add the contract + code2 string // the code we use to update the contract + valid bool + isInterface bool + storageFormatV2Enabled bool } - test := func(t *testing.T, tc testCase) { - + runTest := func(t *testing.T, tc testCase) { t.Parallel() - runtime := NewTestInterpreterRuntime() + config := DefaultTestInterpreterConfig + config.StorageFormatV2Enabled = tc.storageFormatV2Enabled + + runtime := NewTestInterpreterRuntimeWithConfig(config) var loggedMessages []string @@ -222,8 +225,18 @@ func TestRuntimeContract(t *testing.T) { // so getting the storage map here once upfront would result in outdated data getContractValueExists := func() bool { - storageMap := NewStorage(storage, nil). - GetDomainStorageMap(inter, signerAddress, StorageDomainContract, false) + storageMap := NewStorage( + storage, + nil, + StorageConfig{ + StorageFormatV2Enabled: tc.storageFormatV2Enabled, + }, + ).GetDomainStorageMap( + inter, + signerAddress, + StorageDomainContract, + false, + ) if storageMap == nil { return false } @@ -514,6 +527,18 @@ func TestRuntimeContract(t *testing.T) { } + test := func(t *testing.T, tc testCase) { + t.Run("storage format V2 disabled", func(t *testing.T) { + tc.storageFormatV2Enabled = false + runTest(t, tc) + }) + + t.Run("storage format V2 enabled", func(t *testing.T) { + tc.storageFormatV2Enabled = true + runTest(t, tc) + }) + } + t.Run("valid contract, correct name", func(t *testing.T) { test(t, testCase{ name: "Test", diff --git a/runtime/migrate_domain_registers_test.go b/runtime/migrate_domain_registers_test.go index 28769c9e14..209a0dc957 100644 --- a/runtime/migrate_domain_registers_test.go +++ b/runtime/migrate_domain_registers_test.go @@ -40,6 +40,7 @@ import ( ) func TestMigrateDomainRegisters(t *testing.T) { + t.Parallel() alwaysMigrate := func(common.Address) bool { return true @@ -74,8 +75,16 @@ func TestMigrateDomainRegisters(t *testing.T) { address2 := common.MustBytesToAddress([]byte{0x2}) t.Run("no accounts", func(t *testing.T) { + t.Parallel() + ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) inter := NewTestInterpreterWithStorage(t, storage) @@ -92,8 +101,16 @@ func TestMigrateDomainRegisters(t *testing.T) { }) t.Run("accounts without domain registers", func(t *testing.T) { + t.Parallel() + ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) inter := NewTestInterpreterWithStorage(t, storage) @@ -114,6 +131,7 @@ func TestMigrateDomainRegisters(t *testing.T) { }) t.Run("accounts with domain registers", func(t *testing.T) { + t.Parallel() accountsInfo := []accountInfo{ { @@ -132,7 +150,13 @@ func TestMigrateDomainRegisters(t *testing.T) { } ledger, accountsValues := newTestLedgerWithUnmigratedAccounts(t, nil, nil, accountsInfo) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) inter := NewTestInterpreterWithStorage(t, storage) @@ -174,6 +198,8 @@ func TestMigrateDomainRegisters(t *testing.T) { }) t.Run("migrated accounts", func(t *testing.T) { + t.Parallel() + accountsInfo := []accountInfo{ { address: address1, @@ -191,7 +217,13 @@ func TestMigrateDomainRegisters(t *testing.T) { } ledger, accountsValues := newTestLedgerWithMigratedAccounts(t, nil, nil, accountsInfo) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) inter := NewTestInterpreterWithStorage(t, storage) @@ -212,6 +244,7 @@ func TestMigrateDomainRegisters(t *testing.T) { }) t.Run("never migration predicate", func(t *testing.T) { + t.Parallel() accountsInfo := []accountInfo{ { @@ -229,7 +262,13 @@ func TestMigrateDomainRegisters(t *testing.T) { } ledger, _ := newTestLedgerWithUnmigratedAccounts(t, nil, nil, accountsInfo) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) inter := NewTestInterpreterWithStorage(t, storage) @@ -245,6 +284,7 @@ func TestMigrateDomainRegisters(t *testing.T) { }) t.Run("selective migration predicate", func(t *testing.T) { + t.Parallel() accountsInfo := []accountInfo{ { @@ -262,7 +302,13 @@ func TestMigrateDomainRegisters(t *testing.T) { } ledger, _ := newTestLedgerWithUnmigratedAccounts(t, nil, nil, accountsInfo) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) inter := NewTestInterpreterWithStorage(t, storage) @@ -298,7 +344,13 @@ func newTestLedgerWithUnmigratedAccounts( accounts []accountInfo, ) (TestLedger, map[common.Address]accountStorageMapValues) { ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. // This is because DomainStorageMap isn't created through runtime.Storage, so there isn't any @@ -382,7 +434,13 @@ func newTestLedgerWithMigratedAccounts( accounts []accountInfo, ) (TestLedger, map[common.Address]accountStorageMapValues) { ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage(ledger, nil) + storage := runtime.NewStorage( + ledger, + nil, + runtime.StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) // Turn off AtreeStorageValidationEnabled and explicitly check atree storage health at the end of test. // This is because DomainStorageMap isn't created through runtime.Storage, so there isn't any diff --git a/runtime/runtime.go b/runtime/runtime.go index e385f2c0c7..1375a4bd2b 100644 --- a/runtime/runtime.go +++ b/runtime/runtime.go @@ -558,7 +558,15 @@ func (r *interpreterRuntime) Storage(context Context) (*Storage, *interpreter.In codesAndPrograms := NewCodesAndPrograms() - storage := NewStorage(context.Interface, context.Interface) + runtimeInterface := context.Interface + + storage := NewStorage( + runtimeInterface, + runtimeInterface, + StorageConfig{ + StorageFormatV2Enabled: r.defaultConfig.StorageFormatV2Enabled, + }, + ) environment := context.Environment if environment == nil { @@ -566,7 +574,7 @@ func (r *interpreterRuntime) Storage(context Context) (*Storage, *interpreter.In } environment.Configure( - context.Interface, + runtimeInterface, codesAndPrograms, storage, context.CoverageReport, diff --git a/runtime/runtime_memory_metering_test.go b/runtime/runtime_memory_metering_test.go index 669b6152c1..fe196484ff 100644 --- a/runtime/runtime_memory_metering_test.go +++ b/runtime/runtime_memory_metering_test.go @@ -858,7 +858,9 @@ func TestRuntimeStorageCommitsMetering(t *testing.T) { OnMeterMemory: meter.MeterMemory, } - runtime := NewTestInterpreterRuntime() + config := DefaultTestInterpreterConfig + config.StorageFormatV2Enabled = true + runtime := NewTestInterpreterRuntimeWithConfig(config) err := runtime.ExecuteTransaction( Script{ @@ -904,7 +906,9 @@ func TestRuntimeStorageCommitsMetering(t *testing.T) { }, } - runtime := NewTestInterpreterRuntime() + config := DefaultTestInterpreterConfig + config.StorageFormatV2Enabled = true + runtime := NewTestInterpreterRuntimeWithConfig(config) err := runtime.ExecuteTransaction( Script{ @@ -1038,6 +1042,7 @@ func TestRuntimeMeterEncoding(t *testing.T) { config := DefaultTestInterpreterConfig config.AtreeValidationEnabled = false + config.StorageFormatV2Enabled = true rt := NewTestInterpreterRuntimeWithConfig(config) address := common.MustBytesToAddress([]byte{0x1}) @@ -1082,6 +1087,7 @@ func TestRuntimeMeterEncoding(t *testing.T) { config := DefaultTestInterpreterConfig config.AtreeValidationEnabled = false + config.StorageFormatV2Enabled = true rt := NewTestInterpreterRuntimeWithConfig(config) address := common.MustBytesToAddress([]byte{0x1}) @@ -1131,6 +1137,7 @@ func TestRuntimeMeterEncoding(t *testing.T) { config := DefaultTestInterpreterConfig config.AtreeValidationEnabled = false + config.StorageFormatV2Enabled = true rt := NewTestInterpreterRuntimeWithConfig(config) address := common.MustBytesToAddress([]byte{0x1}) diff --git a/runtime/runtime_test.go b/runtime/runtime_test.go index ce4c2219b4..6223ebb2b7 100644 --- a/runtime/runtime_test.go +++ b/runtime/runtime_test.go @@ -5683,7 +5683,9 @@ func TestRuntimeContractWriteback(t *testing.T) { t.Parallel() - runtime := NewTestInterpreterRuntime() + config := DefaultTestInterpreterConfig + config.StorageFormatV2Enabled = true + runtime := NewTestInterpreterRuntimeWithConfig(config) addressValue := cadence.BytesToAddress([]byte{0xCA, 0xDE}) @@ -5853,7 +5855,9 @@ func TestRuntimeStorageWriteback(t *testing.T) { t.Parallel() - runtime := NewTestInterpreterRuntime() + config := DefaultTestInterpreterConfig + config.StorageFormatV2Enabled = true + runtime := NewTestInterpreterRuntimeWithConfig(config) addressValue := cadence.BytesToAddress([]byte{0xCA, 0xDE}) @@ -7600,7 +7604,9 @@ func TestRuntimeComputationMetring(t *testing.T) { ), ) - runtime := NewTestInterpreterRuntime() + config := DefaultTestInterpreterConfig + config.StorageFormatV2Enabled = true + runtime := NewTestInterpreterRuntimeWithConfig(config) compErr := errors.New("computation exceeded limit") var hits, totalIntensity uint diff --git a/runtime/script_executor.go b/runtime/script_executor.go index ca07c4cb00..8a51088a3d 100644 --- a/runtime/script_executor.go +++ b/runtime/script_executor.go @@ -107,7 +107,13 @@ func (executor *interpreterScriptExecutor) preprocess() (err error) { runtimeInterface := context.Interface - storage := NewStorage(runtimeInterface, runtimeInterface) + storage := NewStorage( + runtimeInterface, + runtimeInterface, + StorageConfig{ + StorageFormatV2Enabled: interpreterRuntime.defaultConfig.StorageFormatV2Enabled, + }, + ) executor.storage = storage environment := context.Environment diff --git a/runtime/sharedstate_test.go b/runtime/sharedstate_test.go index f1d0ad703e..e27e48443a 100644 --- a/runtime/sharedstate_test.go +++ b/runtime/sharedstate_test.go @@ -36,7 +36,9 @@ func TestRuntimeSharedState(t *testing.T) { t.Parallel() - runtime := NewTestInterpreterRuntime() + config := DefaultTestInterpreterConfig + config.StorageFormatV2Enabled = true + runtime := NewTestInterpreterRuntimeWithConfig(config) signerAddress := common.MustBytesToAddress([]byte{0x1}) @@ -111,7 +113,7 @@ func TestRuntimeSharedState(t *testing.T) { }, } - environment := NewBaseInterpreterEnvironment(Config{}) + environment := NewBaseInterpreterEnvironment(config) nextTransactionLocation := NewTransactionLocationGenerator() diff --git a/runtime/storage.go b/runtime/storage.go index 8a5d9cea3f..61f619cc00 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -38,6 +38,10 @@ const ( AccountStorageKey = "stored" ) +type StorageConfig struct { + StorageFormatV2Enabled bool +} + type Storage struct { *atree.PersistentSlabStorage @@ -53,17 +57,24 @@ type Storage struct { memoryGauge common.MemoryGauge + Config StorageConfig + AccountStorageV1 *AccountStorageV1 AccountStorageV2 *AccountStorageV2 - // v1Accounts are accounts that are in account storage format v1 - v1Accounts *orderedmap.OrderedMap[common.Address, struct{}] + // v1Accounts contains the cached result of determining + // if the account is in storage format v1 or not. + v1Accounts *orderedmap.OrderedMap[common.Address, bool] } var _ atree.SlabStorage = &Storage{} var _ interpreter.Storage = &Storage{} -func NewStorage(ledger atree.Ledger, memoryGauge common.MemoryGauge) *Storage { +func NewStorage( + ledger atree.Ledger, + memoryGauge common.MemoryGauge, + config StorageConfig, +) *Storage { decodeStorable := func( decoder *cbor.StreamDecoder, slabID atree.SlabID, @@ -108,6 +119,7 @@ func NewStorage(ledger atree.Ledger, memoryGauge common.MemoryGauge) *Storage { Ledger: ledger, PersistentSlabStorage: persistentSlabStorage, memoryGauge: memoryGauge, + Config: config, AccountStorageV1: accountStorageV1, AccountStorageV2: accountStorageV2, } @@ -145,30 +157,35 @@ func (s *Storage) GetDomainStorageMap( } }() - // TODO: - const useV2 = true + if s.IsV1Account(address) || + !s.Config.StorageFormatV2Enabled { - if useV2 { - domainStorageMap = s.AccountStorageV2.GetDomainStorageMap( - inter, + domainStorageMap = s.AccountStorageV1.GetDomainStorageMap( address, domain, createIfNotExists, ) + + // If the account was not in storage format v1, + // but the domain storage map was created, + // mark the account as in storage format v1. + + if domainStorageMap != nil { + s.v1Accounts.Set(address, true) + } + } else { - domainStorageMap = s.AccountStorageV1.GetDomainStorageMap( + // The account is not in storage format v1, + // so use the new account storage format v2. + + domainStorageMap = s.AccountStorageV2.GetDomainStorageMap( + inter, address, domain, createIfNotExists, ) - - if domainStorageMap != nil { - if s.v1Accounts == nil { - s.v1Accounts = &orderedmap.OrderedMap[common.Address, struct{}]{} - } - s.v1Accounts.Set(address, struct{}{}) - } } + return domainStorageMap } @@ -183,6 +200,43 @@ func (s *Storage) cacheDomainStorageMap( s.cachedDomainStorageMaps[domainStorageKey] = domainStorageMap } +// IsV1Account returns true if given account is in account storage format v1. +func (s *Storage) IsV1Account(address common.Address) (isV1 bool) { + + // Check cache + + if s.v1Accounts != nil { + var present bool + isV1, present = s.v1Accounts.Get(address) + if present { + return isV1 + } + } + + // Cache result + + defer func() { + if s.v1Accounts == nil { + s.v1Accounts = &orderedmap.OrderedMap[common.Address, bool]{} + } + s.v1Accounts.Set(address, isV1) + }() + + // Check if a storage map register exists for any of the domains. + // Check the most frequently used domains first, such as storage, public, private. + for _, domain := range AccountDomains { + _, domainExists, err := getSlabIndexFromRegisterValue(s.Ledger, address, []byte(domain)) + if err != nil { + panic(err) + } + if domainExists { + return true + } + } + + return false +} + // getSlabIndexFromRegisterValue returns register value as atree.SlabIndex. // This function returns error if // - underlying ledger panics, or @@ -449,14 +503,16 @@ func (s *Storage) CheckHealth() error { for storageKey, storageMap := range s.cachedDomainStorageMaps { //nolint:maprange address := storageKey.Address - if s.v1Accounts != nil && - s.v1Accounts.Contains(address) { - - storageMapStorageIDs = append( - storageMapStorageIDs, - storageMap.SlabID(), - ) + // Only accounts in storage format v1 store domain storage maps + // directly at the root of the account + if !s.IsV1Account(address) { + continue } + + storageMapStorageIDs = append( + storageMapStorageIDs, + storageMap.SlabID(), + ) } sort.Slice( diff --git a/runtime/storage_test.go b/runtime/storage_test.go index 6d3574b309..7a50792ccd 100644 --- a/runtime/storage_test.go +++ b/runtime/storage_test.go @@ -52,7 +52,13 @@ func withWritesToStorage( handler func(*Storage, *interpreter.Interpreter), ) { ledger := NewTestLedger(nil, onWrite) - storage := NewStorage(ledger, nil) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) inter := NewTestInterpreter(tb) @@ -154,7 +160,9 @@ func TestRuntimeStorageWrite(t *testing.T) { t.Parallel() - runtime := NewTestInterpreterRuntime() + config := DefaultTestInterpreterConfig + config.StorageFormatV2Enabled = true + runtime := NewTestInterpreterRuntimeWithConfig(config) address := common.MustBytesToAddress([]byte{0x1}) @@ -1617,6 +1625,7 @@ func TestRuntimeResourceOwnerChange(t *testing.T) { config := DefaultTestInterpreterConfig config.ResourceOwnerChangeHandlerEnabled = true + config.StorageFormatV2Enabled = true runtime := NewTestInterpreterRuntimeWithConfig(config) address1 := common.MustBytesToAddress([]byte{0x1}) @@ -6256,7 +6265,13 @@ func TestRuntimeStorageForNewAccount(t *testing.T) { // Create empty storage ledger := NewTestLedger(nil, LedgerOnWriteCounter(&writeCount)) - storage := NewStorage(ledger, nil) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) inter := NewTestInterpreterWithStorage(t, storage) @@ -6305,7 +6320,13 @@ func TestRuntimeStorageForNewAccount(t *testing.T) { // Create empty storage ledger := NewTestLedger(nil, LedgerOnWriteEntries(&writeEntries)) - storage := NewStorage(ledger, nil) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) inter := NewTestInterpreterWithStorage(t, storage) @@ -6383,7 +6404,13 @@ func TestRuntimeStorageForNewAccount(t *testing.T) { t.Run("create, commit, write, commit, remove, commit", func(t *testing.T) { // Create empty storage ledger := NewTestLedger(nil, nil) - storage := NewStorage(ledger, nil) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) inter := NewTestInterpreterWithStorage(t, storage) @@ -6517,7 +6544,13 @@ func TestRuntimeStorageForMigratedAccount(t *testing.T) { domainStorageMapCount int, ) (TestLedger, accountStorageMapValues) { ledger := NewTestLedger(nil, nil) - storage := NewStorage(ledger, nil) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) inter := NewTestInterpreterWithStorage(t, storage) @@ -6551,7 +6584,13 @@ func TestRuntimeStorageForMigratedAccount(t *testing.T) { address, existingDomains, domainStorageMapCount) - storage := NewStorage(ledger, nil) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) inter := NewTestInterpreterWithStorage(t, storage) @@ -6597,7 +6636,13 @@ func TestRuntimeStorageForMigratedAccount(t *testing.T) { existingDomains, domainStorageMapCount, ) - storage := NewStorage(ledger, nil) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) inter := NewTestInterpreterWithStorage(t, storage) @@ -6681,7 +6726,13 @@ func TestRuntimeStorageForMigratedAccount(t *testing.T) { tc.existingDomains, tc.existingDomainStorageMapCount, ) - storage := NewStorage(ledger, nil) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) inter := NewTestInterpreterWithStorage(t, storage) @@ -6766,7 +6817,13 @@ func TestRuntimeStorageForMigratedAccount(t *testing.T) { existingDomains, existingDomainStorageMapCount, ) - storage := NewStorage(ledger, nil) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) inter := NewTestInterpreterWithStorage(t, storage) @@ -6859,7 +6916,13 @@ func TestRuntimeStorageForMigratedAccount(t *testing.T) { domains, domainStorageMapCount, ) - storage := NewStorage(ledger, nil) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) inter := NewTestInterpreterWithStorage(t, storage) @@ -6997,7 +7060,13 @@ func TestRuntimeStorageForUnmigratedAccount(t *testing.T) { domainStorageMapCount int, ) (TestLedger, accountStorageMapValues) { ledger := NewTestLedger(nil, nil) - storage := NewStorage(ledger, nil) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) inter := NewTestInterpreter(t) @@ -7058,7 +7127,13 @@ func TestRuntimeStorageForUnmigratedAccount(t *testing.T) { address, existingDomains, domainStorageMapCount) - storage := NewStorage(ledger, nil) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) inter := NewTestInterpreterWithStorage(t, storage) @@ -7105,7 +7180,13 @@ func TestRuntimeStorageForUnmigratedAccount(t *testing.T) { existingDomains, existingDomainStorageMapCount, ) - storage := NewStorage(ledger, nil) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) inter := NewTestInterpreterWithStorage(t, storage) @@ -7193,7 +7274,13 @@ func TestRuntimeStorageForUnmigratedAccount(t *testing.T) { tc.existingDomains, tc.existingDomainStorageMapCount, ) - storage := NewStorage(ledger, nil) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) inter := NewTestInterpreterWithStorage(t, storage) @@ -7284,7 +7371,13 @@ func TestRuntimeStorageForUnmigratedAccount(t *testing.T) { domains, existingDomainStorageMapCount, ) - storage := NewStorage(ledger, nil) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) inter := NewTestInterpreterWithStorage(t, storage) @@ -7393,7 +7486,13 @@ func TestRuntimeStorageForUnmigratedAccount(t *testing.T) { domains, domainStorageMapCount, ) - storage := NewStorage(ledger, nil) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) inter := NewTestInterpreterWithStorage(t, storage) @@ -7594,7 +7693,13 @@ func TestRuntimeStorageDomainStorageMapInlinedState(t *testing.T) { // Create empty storage ledger := NewTestLedger(nil, nil) - storage := NewStorage(ledger, nil) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) inter := NewTestInterpreterWithStorage(t, storage) @@ -7716,7 +7821,13 @@ func TestRuntimeStorageLargeDomainValues(t *testing.T) { // Create empty storage ledger := NewTestLedger(nil, nil) - storage := NewStorage(ledger, nil) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) inter := NewTestInterpreterWithStorage(t, storage) @@ -7852,7 +7963,13 @@ func TestDomainRegisterMigrationForLargeAccount(t *testing.T) { LedgerOnWriteCounter(&writeCount), accountsInfo, ) - storage := NewStorage(ledger, nil) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) inter := NewTestInterpreterWithStorage(t, storage) @@ -7960,7 +8077,8 @@ func writeToDomainStorageMap( return domainValues } -// checkAccountStorageMapData creates new storage with given storedValues, and compares account storage map values with given expectedAccountValues. +// checkAccountStorageMapData creates new storage with given storedValues, +// and compares account storage map values with given expectedAccountValues. func checkAccountStorageMapData( tb testing.TB, storedValues map[string][]byte, @@ -7970,7 +8088,13 @@ func checkAccountStorageMapData( ) { // Create storage with given storedValues and storageIndices ledger := NewTestLedgerWithData(nil, nil, storedValues, storageIndices) - storage := NewStorage(ledger, nil) + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) inter := NewTestInterpreterWithStorage(tb, storage) diff --git a/runtime/transaction_executor.go b/runtime/transaction_executor.go index a8d3f30a90..f071aeb8e2 100644 --- a/runtime/transaction_executor.go +++ b/runtime/transaction_executor.go @@ -106,7 +106,13 @@ func (executor *interpreterTransactionExecutor) preprocess() (err error) { runtimeInterface := context.Interface - storage := NewStorage(runtimeInterface, runtimeInterface) + storage := NewStorage( + runtimeInterface, + runtimeInterface, + StorageConfig{ + StorageFormatV2Enabled: interpreterRuntime.defaultConfig.StorageFormatV2Enabled, + }, + ) executor.storage = storage environment := context.Environment From e28e8c98771ff21ff55283aec9d188b5dc4f52d3 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 7 Nov 2024 09:35:35 -0600 Subject: [PATCH 14/52] Refactor to use a type alias --- runtime/migrate_domain_registers.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/runtime/migrate_domain_registers.go b/runtime/migrate_domain_registers.go index 462f588278..2298e99e38 100644 --- a/runtime/migrate_domain_registers.go +++ b/runtime/migrate_domain_registers.go @@ -27,6 +27,8 @@ import ( "github.com/onflow/cadence/interpreter" ) +type AccountStorageMaps = *orderedmap.OrderedMap[common.Address, *interpreter.AccountStorageMap] + type GetDomainStorageMapFunc func( ledger atree.Ledger, storage atree.SlabStorage, @@ -69,14 +71,14 @@ func (m *DomainRegisterMigration) MigrateAccounts( accounts *orderedmap.OrderedMap[common.Address, struct{}], pred func(common.Address) bool, ) ( - *orderedmap.OrderedMap[common.Address, *interpreter.AccountStorageMap], + AccountStorageMaps, error, ) { if accounts == nil || accounts.Len() == 0 { return nil, nil } - var migratedAccounts *orderedmap.OrderedMap[common.Address, *interpreter.AccountStorageMap] + var migratedAccounts AccountStorageMaps for pair := accounts.Oldest(); pair != nil; pair = pair.Next() { address := pair.Key From bc2b313cfbe0c99bd3d6536e950b28742d5b6edb Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 12 Nov 2024 16:27:05 -0600 Subject: [PATCH 15/52] Replace domain string with enum for AccountStorageMap This commit replaces domain string, such as "storage", "public", "private", etc. with domain enum (integer) as the key for AccountStorageMap. Also, this uses CBOR to store the domain integer in shortest form. This optimization further improves storage efficiency of domain payloads of PR 3664. --- interpreter/account_storagemap.go | 20 ++--- interpreter/account_storagemap_test.go | 104 +++++++++++++++++------- runtime/runtime_memory_metering_test.go | 6 +- runtime/runtime_test.go | 2 +- 4 files changed, 88 insertions(+), 44 deletions(-) diff --git a/interpreter/account_storagemap.go b/interpreter/account_storagemap.go index 7498ae14c1..e6ec1f3422 100644 --- a/interpreter/account_storagemap.go +++ b/interpreter/account_storagemap.go @@ -76,7 +76,7 @@ func NewAccountStorageMapWithRootID( // DomainExists returns true if the given domain exists in the account storage map. func (s *AccountStorageMap) DomainExists(domain common.StorageDomain) bool { - key := StringStorageMapKey(domain.Identifier()) + key := Uint64StorageMapKey(domain) exists, err := s.orderedMap.Has( key.AtreeValueCompare, @@ -99,7 +99,7 @@ func (s *AccountStorageMap) GetDomain( domain common.StorageDomain, createIfNotExists bool, ) *DomainStorageMap { - key := StringStorageMapKey(domain.Identifier()) + key := Uint64StorageMapKey(domain) storedValue, err := s.orderedMap.Get( key.AtreeValueCompare, @@ -135,7 +135,7 @@ func (s *AccountStorageMap) NewDomain( domainStorageMap := NewDomainStorageMap(gauge, s.orderedMap.Storage, s.orderedMap.Address()) - key := StringStorageMapKey(domain.Identifier()) + key := Uint64StorageMapKey(domain) existingStorable, err := s.orderedMap.Set( key.AtreeValueCompare, @@ -181,7 +181,7 @@ func (s *AccountStorageMap) setDomain( ) (existed bool) { interpreter.recordStorageMutation() - key := StringStorageMapKey(domain.Identifier()) + key := Uint64StorageMapKey(domain) existingValueStorable, err := s.orderedMap.Set( key.AtreeValueCompare, @@ -218,7 +218,7 @@ func (s *AccountStorageMap) setDomain( func (s *AccountStorageMap) removeDomain(interpreter *Interpreter, domain common.StorageDomain) (existed bool) { interpreter.recordStorageMutation() - key := StringStorageMapKey(domain.Identifier()) + key := Uint64StorageMapKey(domain) existingKeyStorable, existingValueStorable, err := s.orderedMap.Remove( key.AtreeValueCompare, @@ -236,7 +236,7 @@ func (s *AccountStorageMap) removeDomain(interpreter *Interpreter, domain common // Key - // NOTE: Key is just an atree.Value (StringAtreeValue), not an interpreter.Value, + // NOTE: Key is just an atree.Value (Uint64AtreeValue), not an interpreter.Value, // so do not need (can) convert and not need to deep remove interpreter.RemoveReferencedSlab(existingKeyStorable) @@ -334,13 +334,13 @@ func (i *AccountStorageMapIterator) Next() (common.StorageDomain, *DomainStorage } func convertKeyToDomain(v atree.Value) common.StorageDomain { - key, ok := v.(StringAtreeValue) + key, ok := v.(Uint64AtreeValue) if !ok { panic(errors.NewUnexpectedError("domain key type %T isn't expected", key)) } - domain, found := common.StorageDomainFromIdentifier(string(key)) - if !found { - panic(errors.NewUnexpectedError("domain key %s isn't expected", key)) + domain, err := common.StorageDomainFromUint64(uint64(key)) + if err != nil { + panic(errors.NewUnexpectedError("domain key %d isn't expected: %w", key, err)) } return domain } diff --git a/interpreter/account_storagemap_test.go b/interpreter/account_storagemap_test.go index b926f2724b..6a8794a60e 100644 --- a/interpreter/account_storagemap_test.go +++ b/interpreter/account_storagemap_test.go @@ -20,6 +20,7 @@ package interpreter_test import ( "math/rand" + goruntime "runtime" "slices" "strconv" "strings" @@ -188,6 +189,8 @@ func TestAccountStorageMapCreateDomain(t *testing.T) { require.NotNil(t, accountStorageMap) require.Equal(t, uint64(0), accountStorageMap.Count()) + accountStorageMapRootSlabID := accountStorageMap.SlabID() + for _, domain := range common.AllStorageDomains { const createIfNotExists = true domainStoragemap := accountStorageMap.GetDomain(nil, inter, domain, createIfNotExists) @@ -199,7 +202,12 @@ func TestAccountStorageMapCreateDomain(t *testing.T) { checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) - CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMapRootSlabID}) + + err := storage.PersistentSlabStorage.FastCommit(goruntime.NumCPU()) + require.NoError(t, err) + + checkAccountStorageMapDataWithRawData(t, ledger.StoredValues, ledger.StorageIndices, accountStorageMapRootSlabID, accountValues) }) t.Run("non-empty", func(t *testing.T) { @@ -225,6 +233,8 @@ func TestAccountStorageMapCreateDomain(t *testing.T) { const count = 10 accountStorageMap, accountValues := createAccountStorageMap(storage, inter, address, existingDomains, count, random) + accountStorageMapRootSlabID := accountStorageMap.SlabID() + for _, domain := range common.AllStorageDomains { const createIfNotExists = true domainStoragemap := accountStorageMap.GetDomain(nil, inter, domain, createIfNotExists) @@ -238,7 +248,12 @@ func TestAccountStorageMapCreateDomain(t *testing.T) { checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) - CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMapRootSlabID}) + + err := storage.PersistentSlabStorage.FastCommit(goruntime.NumCPU()) + require.NoError(t, err) + + checkAccountStorageMapDataWithRawData(t, ledger.StoredValues, ledger.StorageIndices, accountStorageMapRootSlabID, accountValues) }) } @@ -271,6 +286,8 @@ func TestAccountStorageMapSetAndUpdateDomain(t *testing.T) { require.NotNil(t, accountStorageMap) require.Equal(t, uint64(0), accountStorageMap.Count()) + accountStorageMapRootSlabID := accountStorageMap.SlabID() + const count = 10 for _, domain := range common.AllStorageDomains { @@ -285,7 +302,12 @@ func TestAccountStorageMapSetAndUpdateDomain(t *testing.T) { checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) - CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMapRootSlabID}) + + err := storage.PersistentSlabStorage.FastCommit(goruntime.NumCPU()) + require.NoError(t, err) + + checkAccountStorageMapDataWithRawData(t, ledger.StoredValues, ledger.StorageIndices, accountStorageMapRootSlabID, accountValues) }) t.Run("non-empty", func(t *testing.T) { @@ -311,6 +333,8 @@ func TestAccountStorageMapSetAndUpdateDomain(t *testing.T) { const count = 10 accountStorageMap, accountValues := createAccountStorageMap(storage, inter, address, existingDomains, count, random) + accountStorageMapRootSlabID := accountStorageMap.SlabID() + for _, domain := range common.AllStorageDomains { domainStorageMap := interpreter.NewDomainStorageMap(nil, storage, atree.Address(address)) @@ -324,7 +348,12 @@ func TestAccountStorageMapSetAndUpdateDomain(t *testing.T) { checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) - CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMapRootSlabID}) + + err := storage.PersistentSlabStorage.FastCommit(goruntime.NumCPU()) + require.NoError(t, err) + + checkAccountStorageMapDataWithRawData(t, ledger.StoredValues, ledger.StorageIndices, accountStorageMapRootSlabID, accountValues) }) } @@ -355,6 +384,8 @@ func TestAccountStorageMapRemoveDomain(t *testing.T) { require.NotNil(t, accountStorageMap) require.Equal(t, uint64(0), accountStorageMap.Count()) + accountStorageMapRootSlabID := accountStorageMap.SlabID() + for _, domain := range common.AllStorageDomains { existed := accountStorageMap.WriteDomain(inter, domain, nil) require.False(t, existed) @@ -362,7 +393,12 @@ func TestAccountStorageMapRemoveDomain(t *testing.T) { checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) - CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMapRootSlabID}) + + err := storage.PersistentSlabStorage.FastCommit(goruntime.NumCPU()) + require.NoError(t, err) + + checkAccountStorageMapDataWithRawData(t, ledger.StoredValues, ledger.StorageIndices, accountStorageMapRootSlabID, accountValues) }) t.Run("non-empty", func(t *testing.T) { @@ -388,6 +424,8 @@ func TestAccountStorageMapRemoveDomain(t *testing.T) { const count = 10 accountStorageMap, accountValues := createAccountStorageMap(storage, inter, address, existingDomains, count, random) + accountStorageMapRootSlabID := accountStorageMap.SlabID() + for _, domain := range common.AllStorageDomains { existed := accountStorageMap.WriteDomain(inter, domain, nil) @@ -398,7 +436,12 @@ func TestAccountStorageMapRemoveDomain(t *testing.T) { checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) - CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMapRootSlabID}) + + err := storage.PersistentSlabStorage.FastCommit(goruntime.NumCPU()) + require.NoError(t, err) + + checkAccountStorageMapDataWithRawData(t, ledger.StoredValues, ledger.StorageIndices, accountStorageMapRootSlabID, accountValues) }) } @@ -575,18 +618,7 @@ func TestAccountStorageMapLoadFromRootSlabID(t *testing.T) { accountStorageMapRootSlabID, accountValues, storedValues, storageIndices := init() - ledger := NewTestLedgerWithData(nil, nil, storedValues, storageIndices) - storage := runtime.NewStorage(ledger, nil) - - accountStorageMap := interpreter.NewAccountStorageMapWithRootID(storage, accountStorageMapRootSlabID) - require.Equal(t, uint64(0), accountStorageMap.Count()) - require.Equal(t, accountStorageMapRootSlabID, accountStorageMap.SlabID()) - - inter := NewTestInterpreterWithStorage(t, storage) - - checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) - - CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + checkAccountStorageMapDataWithRawData(t, storedValues, storageIndices, accountStorageMapRootSlabID, accountValues) }) t.Run("non-empty", func(t *testing.T) { @@ -619,18 +651,7 @@ func TestAccountStorageMapLoadFromRootSlabID(t *testing.T) { accountStorageMapRootSlabID, accountValues, storedValues, storageIndices := init() - ledger := NewTestLedgerWithData(nil, nil, storedValues, storageIndices) - storage := runtime.NewStorage(ledger, nil) - - accountStorageMap := interpreter.NewAccountStorageMapWithRootID(storage, accountStorageMapRootSlabID) - require.Equal(t, uint64(len(existingDomains)), accountStorageMap.Count()) - require.Equal(t, accountStorageMapRootSlabID, accountStorageMap.SlabID()) - - inter := NewTestInterpreterWithStorage(t, storage) - - checkAccountStorageMapData(t, inter, accountStorageMap, accountValues) - - CheckAtreeStorageHealth(t, storage, []atree.SlabID{accountStorageMap.SlabID()}) + checkAccountStorageMapDataWithRawData(t, storedValues, storageIndices, accountStorageMapRootSlabID, accountValues) }) } @@ -697,6 +718,29 @@ func writeRandomValuesToDomainStorageMap( return domainValues } +// checkAccountStorageMapDataWithRawData checks loaded account storage map against expected account values. +func checkAccountStorageMapDataWithRawData( + tb testing.TB, + storedValues map[string][]byte, + storageIndices map[string]uint64, + rootSlabID atree.SlabID, + expectedAccountValues accountStorageMapValues, +) { + // Create new storage from raw data + ledger := NewTestLedgerWithData(nil, nil, storedValues, storageIndices) + storage := runtime.NewStorage(ledger, nil) + + inter := NewTestInterpreterWithStorage(tb, storage) + + loadedAccountStorageMap := interpreter.NewAccountStorageMapWithRootID(storage, rootSlabID) + require.Equal(tb, uint64(len(expectedAccountValues)), loadedAccountStorageMap.Count()) + require.Equal(tb, rootSlabID, loadedAccountStorageMap.SlabID()) + + checkAccountStorageMapData(tb, inter, loadedAccountStorageMap, expectedAccountValues) + + CheckAtreeStorageHealth(tb, storage, []atree.SlabID{rootSlabID}) +} + // checkAccountStorageMapData iterates account storage map and compares values with given expectedAccountValues. func checkAccountStorageMapData( tb testing.TB, diff --git a/runtime/runtime_memory_metering_test.go b/runtime/runtime_memory_metering_test.go index 09dc1d8069..a141120e99 100644 --- a/runtime/runtime_memory_metering_test.go +++ b/runtime/runtime_memory_metering_test.go @@ -1073,7 +1073,7 @@ func TestRuntimeMeterEncoding(t *testing.T) { ) require.NoError(t, err) - assert.Equal(t, 114, int(meter.getMemory(common.MemoryKindBytes))) + assert.Equal(t, 107, int(meter.getMemory(common.MemoryKindBytes))) }) t.Run("string in loop", func(t *testing.T) { @@ -1122,7 +1122,7 @@ func TestRuntimeMeterEncoding(t *testing.T) { ) require.NoError(t, err) - assert.Equal(t, 61501, int(meter.getMemory(common.MemoryKindBytes))) + assert.Equal(t, 61494, int(meter.getMemory(common.MemoryKindBytes))) }) t.Run("composite", func(t *testing.T) { @@ -1173,6 +1173,6 @@ func TestRuntimeMeterEncoding(t *testing.T) { ) require.NoError(t, err) - assert.Equal(t, 58369, int(meter.getMemory(common.MemoryKindBytes))) + assert.Equal(t, 58362, int(meter.getMemory(common.MemoryKindBytes))) }) } diff --git a/runtime/runtime_test.go b/runtime/runtime_test.go index ce4c2219b4..702def3041 100644 --- a/runtime/runtime_test.go +++ b/runtime/runtime_test.go @@ -7579,7 +7579,7 @@ func TestRuntimeComputationMetring(t *testing.T) { `, ok: true, hits: 3, - intensity: 115, + intensity: 108, }, } From 88c31e68aa7032b2df7f357015ffb39de0701edb Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 12 Nov 2024 17:24:41 -0600 Subject: [PATCH 16/52] Rename function convertKeyToDomain This commit renames function convertKeyToDomain to convertAccountStorageMapKeyToStorageDomain. --- interpreter/account_storagemap.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/interpreter/account_storagemap.go b/interpreter/account_storagemap.go index e6ec1f3422..d06aaafe87 100644 --- a/interpreter/account_storagemap.go +++ b/interpreter/account_storagemap.go @@ -284,7 +284,7 @@ func (s *AccountStorageMap) Domains() map[common.StorageDomain]struct{} { break } - domain := convertKeyToDomain(k) + domain := convertAccountStorageMapKeyToStorageDomain(k) domains[domain] = struct{}{} } @@ -326,14 +326,14 @@ func (i *AccountStorageMapIterator) Next() (common.StorageDomain, *DomainStorage return common.StorageDomainUnknown, nil } - key := convertKeyToDomain(k) + key := convertAccountStorageMapKeyToStorageDomain(k) value := NewDomainStorageMapWithAtreeValue(v) return key, value } -func convertKeyToDomain(v atree.Value) common.StorageDomain { +func convertAccountStorageMapKeyToStorageDomain(v atree.Value) common.StorageDomain { key, ok := v.(Uint64AtreeValue) if !ok { panic(errors.NewUnexpectedError("domain key type %T isn't expected", key)) From f68c5eb99bed0ef1a2cf0d36bb29db3415d95494 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 12 Nov 2024 17:33:31 -0600 Subject: [PATCH 17/52] Add comments and warnings about StorageDomain --- common/storagedomain.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/common/storagedomain.go b/common/storagedomain.go index 108196cdac..ac7b297daa 100644 --- a/common/storagedomain.go +++ b/common/storagedomain.go @@ -24,6 +24,19 @@ import ( "github.com/onflow/cadence/errors" ) +// StorageDomain is used to store domain values on chain. +// +// !!! *WARNING* !!! +// +// Only add new StorageDomain by: +// - appending to the end. +// +// Only remove StorageDomain by: +// - replacing existing StorageDomain with a placeholder `_`. +// +// DO *NOT* REPLACE EXISTING STORAGEDOMAIN! +// DO *NOT* REMOVE EXISTING STORAGEDOMAIN! +// DO *NOT* INSERT NEW STORAGEDOMAIN IN BETWEEN! type StorageDomain uint8 const ( @@ -54,6 +67,8 @@ const ( // StorageDomainAccountCapability is the storage domain which // records active account capability controller IDs StorageDomainAccountCapability + + // Append new StorageDomain here (if needed). ) var AllStorageDomains = []StorageDomain{ From d57959110dc1ff85c016d2e1beae1ea2ce03b253 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Tue, 12 Nov 2024 17:34:48 -0800 Subject: [PATCH 18/52] improve naming --- runtime/account_storage_v1.go | 6 +++--- runtime/migrate_domain_registers.go | 2 +- runtime/storage.go | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/runtime/account_storage_v1.go b/runtime/account_storage_v1.go index 2a970aa9f7..7ae67e3c36 100644 --- a/runtime/account_storage_v1.go +++ b/runtime/account_storage_v1.go @@ -58,7 +58,7 @@ func (s *AccountStorageV1) GetDomainStorageMap( domainStorageMap *interpreter.DomainStorageMap, ) { var err error - domainStorageMap, err = getDomainStorageMapFromLegacyDomainRegister( + domainStorageMap, err = getDomainStorageMapFromV1DomainRegister( s.ledger, s.slabStorage, address, @@ -120,8 +120,8 @@ func (s *AccountStorageV1) commit() error { return nil } -// getDomainStorageMapFromLegacyDomainRegister returns domain storage map from legacy domain register. -func getDomainStorageMapFromLegacyDomainRegister( +// getDomainStorageMapFromV1DomainRegister returns domain storage map from legacy domain register. +func getDomainStorageMapFromV1DomainRegister( ledger atree.Ledger, storage atree.SlabStorage, address common.Address, diff --git a/runtime/migrate_domain_registers.go b/runtime/migrate_domain_registers.go index e51bf81887..2ffd388308 100644 --- a/runtime/migrate_domain_registers.go +++ b/runtime/migrate_domain_registers.go @@ -55,7 +55,7 @@ func NewDomainRegisterMigration( storage: storage, inter: inter, memoryGauge: memoryGauge, - getDomainStorageMap: getDomainStorageMapFromLegacyDomainRegister, + getDomainStorageMap: getDomainStorageMapFromV1DomainRegister, } } diff --git a/runtime/storage.go b/runtime/storage.go index 977ceb9813..d836fba47b 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -417,7 +417,7 @@ func (s *Storage) commit(inter *interpreter.Interpreter, commitContractUpdates b // return domainStorageMap, nil // } // -// return getDomainStorageMapFromLegacyDomainRegister(ledger, storage, address, domain) +// return getDomainStorageMapFromV1DomainRegister(ledger, storage, address, domain) // } // // migrator := NewDomainRegisterMigration(s.Ledger, s.PersistentSlabStorage, inter, s.memoryGauge) From 99710ce200548e4a94be57cda10ec4ffb7e63525 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Wed, 13 Nov 2024 17:18:21 -0800 Subject: [PATCH 19/52] ensure account is not tested for V1 registers if V2 is disabled --- runtime/sharedstate_test.go | 201 +++++++++++++++++++++++++++++++++++- runtime/storage.go | 17 +-- 2 files changed, 210 insertions(+), 8 deletions(-) diff --git a/runtime/sharedstate_test.go b/runtime/sharedstate_test.go index 3bdbce7585..b24051fd20 100644 --- a/runtime/sharedstate_test.go +++ b/runtime/sharedstate_test.go @@ -31,7 +31,206 @@ import ( . "github.com/onflow/cadence/test_utils/runtime_utils" ) -func TestRuntimeSharedState(t *testing.T) { +func TestRuntimeSharedStateV1(t *testing.T) { + + t.Parallel() + + config := DefaultTestInterpreterConfig + config.StorageFormatV2Enabled = false + runtime := NewTestInterpreterRuntimeWithConfig(config) + + signerAddress := common.MustBytesToAddress([]byte{0x1}) + + deploy1 := DeploymentTransaction("C1", []byte(` + access(all) contract C1 { + access(all) fun hello() { + log("Hello from C1!") + } + } + `)) + + deploy2 := DeploymentTransaction("C2", []byte(` + access(all) contract C2 { + access(all) fun hello() { + log("Hello from C2!") + } + } + `)) + + accountCodes := map[common.Location][]byte{} + + var events []cadence.Event + var loggedMessages []string + + var interpreterState *interpreter.SharedState + + var ledgerReads []ownerKeyPair + + ledger := NewTestLedger( + func(owner, key, value []byte) { + ledgerReads = append( + ledgerReads, + ownerKeyPair{ + owner: owner, + key: key, + }, + ) + }, + nil, + ) + + runtimeInterface := &TestRuntimeInterface{ + Storage: ledger, + OnGetSigningAccounts: func() ([]Address, error) { + return []Address{signerAddress}, nil + }, + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnRemoveAccountContractCode: func(location common.AddressLocation) error { + delete(accountCodes, location) + return nil + }, + OnResolveLocation: MultipleIdentifierLocationResolver, + OnProgramLog: func(message string) { + loggedMessages = append(loggedMessages, message) + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnSetInterpreterSharedState: func(state *interpreter.SharedState) { + interpreterState = state + }, + OnGetInterpreterSharedState: func() *interpreter.SharedState { + return interpreterState + }, + } + + environment := NewBaseInterpreterEnvironment(config) + + nextTransactionLocation := NewTransactionLocationGenerator() + + // Deploy contracts + + for _, source := range [][]byte{ + deploy1, + deploy2, + } { + err := runtime.ExecuteTransaction( + Script{ + Source: source, + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + Environment: environment, + }, + ) + require.NoError(t, err) + } + + assert.NotEmpty(t, accountCodes) + + // Call C1.hello using transaction + + loggedMessages = nil + + err := runtime.ExecuteTransaction( + Script{ + Source: []byte(` + import C1 from 0x1 + + transaction { + prepare(signer: &Account) { + C1.hello() + } + } + `), + Arguments: nil, + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + Environment: environment, + }, + ) + require.NoError(t, err) + + assert.Equal(t, []string{`"Hello from C1!"`}, loggedMessages) + + // Call C1.hello manually + + loggedMessages = nil + + _, err = runtime.InvokeContractFunction( + common.AddressLocation{ + Address: signerAddress, + Name: "C1", + }, + "hello", + nil, + nil, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + Environment: environment, + }, + ) + require.NoError(t, err) + + assert.Equal(t, []string{`"Hello from C1!"`}, loggedMessages) + + // Call C2.hello manually + + loggedMessages = nil + + _, err = runtime.InvokeContractFunction( + common.AddressLocation{ + Address: signerAddress, + Name: "C2", + }, + "hello", + nil, + nil, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + Environment: environment, + }, + ) + require.NoError(t, err) + + assert.Equal(t, []string{`"Hello from C2!"`}, loggedMessages) + + // Assert shared state was used, + // i.e. data was not re-read + + require.Equal(t, + []ownerKeyPair{ + { + owner: signerAddress[:], + key: []byte(common.StorageDomainContract.Identifier()), + }, + { + owner: signerAddress[:], + key: []byte(common.StorageDomainContract.Identifier()), + }, + { + owner: signerAddress[:], + key: []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, + }, + }, + ledgerReads, + ) +} + +func TestRuntimeSharedStateV2(t *testing.T) { t.Parallel() diff --git a/runtime/storage.go b/runtime/storage.go index d836fba47b..94401c5199 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -155,8 +155,7 @@ func (s *Storage) GetDomainStorageMap( } }() - if s.IsV1Account(address) || - !s.Config.StorageFormatV2Enabled { + if !s.Config.StorageFormatV2Enabled || s.IsV1Account(address) { domainStorageMap = s.AccountStorageV1.GetDomainStorageMap( address, @@ -169,7 +168,7 @@ func (s *Storage) GetDomainStorageMap( // mark the account as in storage format v1. if domainStorageMap != nil { - s.v1Accounts.Set(address, true) + s.setIsV1Account(address, true) } } else { @@ -214,10 +213,7 @@ func (s *Storage) IsV1Account(address common.Address) (isV1 bool) { // Cache result defer func() { - if s.v1Accounts == nil { - s.v1Accounts = &orderedmap.OrderedMap[common.Address, bool]{} - } - s.v1Accounts.Set(address, isV1) + s.setIsV1Account(address, isV1) }() // Check if a storage map register exists for any of the domains. @@ -239,6 +235,13 @@ func (s *Storage) IsV1Account(address common.Address) (isV1 bool) { return false } +func (s *Storage) setIsV1Account(address common.Address, isV1 bool) { + if s.v1Accounts == nil { + s.v1Accounts = &orderedmap.OrderedMap[common.Address, bool]{} + } + s.v1Accounts.Set(address, isV1) +} + // getSlabIndexFromRegisterValue returns register value as atree.SlabIndex. // This function returns error if // - underlying ledger panics, or From 4ac1120b654cc5b38c442c6ef3d203c6fe20c3af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Wed, 13 Nov 2024 17:47:13 -0800 Subject: [PATCH 20/52] simplify V1/V2 check --- runtime/account_storage_v2.go | 37 +++++++++++++-- runtime/sharedstate_test.go | 44 ++---------------- runtime/storage.go | 84 ++++++++--------------------------- 3 files changed, 54 insertions(+), 111 deletions(-) diff --git a/runtime/account_storage_v2.go b/runtime/account_storage_v2.go index 18e6b2ad83..4c76d3a4a0 100644 --- a/runtime/account_storage_v2.go +++ b/runtime/account_storage_v2.go @@ -195,6 +195,21 @@ func (s *AccountStorageV2) commit() error { return nil } +func getAccountStorageSlabIndex( + ledger atree.Ledger, + address common.Address, +) ( + atree.SlabIndex, + bool, + error, +) { + return getSlabIndexFromRegisterValue( + ledger, + address, + []byte(AccountStorageKey), + ) +} + func getAccountStorageMapFromRegister( ledger atree.Ledger, slabStorage atree.SlabStorage, @@ -203,26 +218,40 @@ func getAccountStorageMapFromRegister( *interpreter.AccountStorageMap, error, ) { - accountStorageSlabIndex, accountStorageRegisterExists, err := getSlabIndexFromRegisterValue( + slabIndex, registerExists, err := getAccountStorageSlabIndex( ledger, address, - []byte(AccountStorageKey), ) if err != nil { return nil, err } - if !accountStorageRegisterExists { + if !registerExists { return nil, nil } slabID := atree.NewSlabID( atree.Address(address), - accountStorageSlabIndex, + slabIndex, ) return interpreter.NewAccountStorageMapWithRootID(slabStorage, slabID), nil } +func hasAccountStorageMap( + ledger atree.Ledger, + address common.Address, +) (bool, error) { + + _, registerExists, err := getAccountStorageSlabIndex( + ledger, + address, + ) + if err != nil { + return false, err + } + return registerExists, nil +} + func (s *AccountStorageV2) cachedRootSlabIDs() []atree.SlabID { var slabIDs []atree.SlabID diff --git a/runtime/sharedstate_test.go b/runtime/sharedstate_test.go index b24051fd20..865f63004e 100644 --- a/runtime/sharedstate_test.go +++ b/runtime/sharedstate_test.go @@ -37,6 +37,7 @@ func TestRuntimeSharedStateV1(t *testing.T) { config := DefaultTestInterpreterConfig config.StorageFormatV2Enabled = false + config.AtreeValidationEnabled = false runtime := NewTestInterpreterRuntimeWithConfig(config) signerAddress := common.MustBytesToAddress([]byte{0x1}) @@ -236,6 +237,7 @@ func TestRuntimeSharedStateV2(t *testing.T) { config := DefaultTestInterpreterConfig config.StorageFormatV2Enabled = true + config.AtreeValidationEnabled = false runtime := NewTestInterpreterRuntimeWithConfig(config) signerAddress := common.MustBytesToAddress([]byte{0x1}) @@ -412,62 +414,22 @@ func TestRuntimeSharedStateV2(t *testing.T) { require.Equal(t, []ownerKeyPair{ - // Read account domain register to check if it is a migrated account - // Read returns no value. { owner: signerAddress[:], key: []byte(AccountStorageKey), }, - // Read contract domain register to check if it is a unmigrated account - // Read returns no value. { owner: signerAddress[:], key: []byte(common.StorageDomainContract.Identifier()), }, - // Read all available domain registers to check if it is a new account - // Read returns no value. { owner: signerAddress[:], - key: []byte(common.PathDomainStorage.Identifier()), - }, - { - owner: signerAddress[:], - key: []byte(common.PathDomainPrivate.Identifier()), - }, - { - owner: signerAddress[:], - key: []byte(common.PathDomainPublic.Identifier()), + key: []byte(AccountStorageKey), }, { owner: signerAddress[:], key: []byte(common.StorageDomainContract.Identifier()), }, - { - owner: signerAddress[:], - key: []byte(common.StorageDomainInbox.Identifier()), - }, - { - owner: signerAddress[:], - key: []byte(common.StorageDomainCapabilityController.Identifier()), - }, - { - owner: signerAddress[:], - key: []byte(common.StorageDomainCapabilityControllerTag.Identifier()), - }, - { - owner: signerAddress[:], - key: []byte(common.StorageDomainPathCapability.Identifier()), - }, - { - owner: signerAddress[:], - key: []byte(common.StorageDomainAccountCapability.Identifier()), - }, - // Read account domain register - { - owner: signerAddress[:], - key: []byte(AccountStorageKey), - }, - // Read account storage map { owner: signerAddress[:], key: []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, diff --git a/runtime/storage.go b/runtime/storage.go index 94401c5199..74d2d371b6 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -59,10 +59,6 @@ type Storage struct { AccountStorageV1 *AccountStorageV1 AccountStorageV2 *AccountStorageV2 - - // v1Accounts contains the cached result of determining - // if the account is in storage format v1 or not. - v1Accounts *orderedmap.OrderedMap[common.Address, bool] } var _ atree.SlabStorage = &Storage{} @@ -155,28 +151,24 @@ func (s *Storage) GetDomainStorageMap( } }() - if !s.Config.StorageFormatV2Enabled || s.IsV1Account(address) { + var isV2 bool + if s.Config.StorageFormatV2Enabled { + var err error + isV2, err = hasAccountStorageMap(s.Ledger, address) + if err != nil { + panic(err) + } + } - domainStorageMap = s.AccountStorageV1.GetDomainStorageMap( + if isV2 { + domainStorageMap = s.AccountStorageV2.GetDomainStorageMap( + inter, address, domain, createIfNotExists, ) - - // If the account was not in storage format v1, - // but the domain storage map was created, - // mark the account as in storage format v1. - - if domainStorageMap != nil { - s.setIsV1Account(address, true) - } - } else { - // The account is not in storage format v1, - // so use the new account storage format v2. - - domainStorageMap = s.AccountStorageV2.GetDomainStorageMap( - inter, + domainStorageMap = s.AccountStorageV1.GetDomainStorageMap( address, domain, createIfNotExists, @@ -197,51 +189,6 @@ func (s *Storage) cacheDomainStorageMap( s.cachedDomainStorageMaps[storageDomainKey] = domainStorageMap } -// IsV1Account returns true if given account is in account storage format v1. -func (s *Storage) IsV1Account(address common.Address) (isV1 bool) { - - // Check cache - - if s.v1Accounts != nil { - var present bool - isV1, present = s.v1Accounts.Get(address) - if present { - return isV1 - } - } - - // Cache result - - defer func() { - s.setIsV1Account(address, isV1) - }() - - // Check if a storage map register exists for any of the domains. - // Check the most frequently used domains first, such as storage, public, private. - for _, domain := range common.AllStorageDomains { - _, domainExists, err := getSlabIndexFromRegisterValue( - s.Ledger, - address, - []byte(domain.Identifier()), - ) - if err != nil { - panic(err) - } - if domainExists { - return true - } - } - - return false -} - -func (s *Storage) setIsV1Account(address common.Address, isV1 bool) { - if s.v1Accounts == nil { - s.v1Accounts = &orderedmap.OrderedMap[common.Address, bool]{} - } - s.v1Accounts.Set(address, isV1) -} - // getSlabIndexFromRegisterValue returns register value as atree.SlabIndex. // This function returns error if // - underlying ledger panics, or @@ -489,7 +436,12 @@ func (s *Storage) CheckHealth() error { // Only accounts in storage format v1 store domain storage maps // directly at the root of the account - if !s.IsV1Account(address) { + isV2, err := hasAccountStorageMap(s.Ledger, address) + if err != nil { + return err + } + + if isV2 { continue } From 1eea55e41dccdaa6f4528968c28090c7ab74e6ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Thu, 14 Nov 2024 14:32:55 -0800 Subject: [PATCH 21/52] revert part of 4ac1120b654cc5b38c442c6ef3d203c6fe20c3af --- runtime/sharedstate_test.go | 42 ++++++++++++++++- runtime/storage.go | 90 +++++++++++++++++++++++++++++-------- 2 files changed, 112 insertions(+), 20 deletions(-) diff --git a/runtime/sharedstate_test.go b/runtime/sharedstate_test.go index 865f63004e..ae693b2d98 100644 --- a/runtime/sharedstate_test.go +++ b/runtime/sharedstate_test.go @@ -414,21 +414,61 @@ func TestRuntimeSharedStateV2(t *testing.T) { require.Equal(t, []ownerKeyPair{ + // Read account domain register to check if it is a migrated account + // Read returns no value. { owner: signerAddress[:], key: []byte(AccountStorageKey), }, + // Read all available domain registers to check if it is a new account + // Read returns no value. + { + owner: signerAddress[:], + key: []byte(common.PathDomainStorage.Identifier()), + }, + { + owner: signerAddress[:], + key: []byte(common.PathDomainPrivate.Identifier()), + }, + { + owner: signerAddress[:], + key: []byte(common.PathDomainPublic.Identifier()), + }, { owner: signerAddress[:], key: []byte(common.StorageDomainContract.Identifier()), }, + { + owner: signerAddress[:], + key: []byte(common.StorageDomainInbox.Identifier()), + }, + { + owner: signerAddress[:], + key: []byte(common.StorageDomainCapabilityController.Identifier()), + }, + { + owner: signerAddress[:], + key: []byte(common.StorageDomainCapabilityControllerTag.Identifier()), + }, + { + owner: signerAddress[:], + key: []byte(common.StorageDomainPathCapability.Identifier()), + }, + { + owner: signerAddress[:], + key: []byte(common.StorageDomainAccountCapability.Identifier()), + }, { owner: signerAddress[:], key: []byte(AccountStorageKey), }, { owner: signerAddress[:], - key: []byte(common.StorageDomainContract.Identifier()), + key: []byte(AccountStorageKey), + }, + { + owner: signerAddress[:], + key: []byte(AccountStorageKey), }, { owner: signerAddress[:], diff --git a/runtime/storage.go b/runtime/storage.go index 74d2d371b6..7460c8c4b2 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -47,6 +47,10 @@ type Storage struct { // Key is StorageKey{address, domain} and value is domain storage map. cachedDomainStorageMaps map[interpreter.StorageDomainKey]*interpreter.DomainStorageMap + // cachedV1Accounts contains the cached result of determining + // if the account is in storage format v1 or not. + cachedV1Accounts map[common.Address]bool + // contractUpdates is a cache of contract updates. // Key is StorageKey{contract_address, contract_name} and value is contract composite value. contractUpdates *orderedmap.OrderedMap[interpreter.StorageKey, *interpreter.CompositeValue] @@ -151,24 +155,20 @@ func (s *Storage) GetDomainStorageMap( } }() - var isV2 bool - if s.Config.StorageFormatV2Enabled { - var err error - isV2, err = hasAccountStorageMap(s.Ledger, address) - if err != nil { - panic(err) - } - } - - if isV2 { - domainStorageMap = s.AccountStorageV2.GetDomainStorageMap( - inter, + if !s.Config.StorageFormatV2Enabled || s.IsV1Account(address) { + domainStorageMap = s.AccountStorageV1.GetDomainStorageMap( address, domain, createIfNotExists, ) + + if domainStorageMap != nil { + s.cacheIsV1Account(address, true) + } + } else { - domainStorageMap = s.AccountStorageV1.GetDomainStorageMap( + domainStorageMap = s.AccountStorageV2.GetDomainStorageMap( + inter, address, domain, createIfNotExists, @@ -178,6 +178,63 @@ func (s *Storage) GetDomainStorageMap( return domainStorageMap } +// IsV1Account returns true if given account is in account storage format v1. +func (s *Storage) IsV1Account(address common.Address) (isV1 bool) { + + // Check cache + + if s.cachedV1Accounts != nil { + var present bool + isV1, present = s.cachedV1Accounts[address] + if present { + return isV1 + } + } + + // Cache result + + defer func() { + s.cacheIsV1Account(address, isV1) + }() + + // First check if account storage map exists. + // In that case the account was already migrated to account storage format v2, + // and we do not need to check the domain storage map registers. + + accountStorageMapExists, err := hasAccountStorageMap(s.Ledger, address) + if err != nil { + panic(err) + } + if accountStorageMapExists { + return false + } + + // Check if a storage map register exists for any of the domains. + // Check the most frequently used domains first, such as storage, public, private. + for _, domain := range common.AllStorageDomains { + _, domainExists, err := getSlabIndexFromRegisterValue( + s.Ledger, + address, + []byte(domain.Identifier()), + ) + if err != nil { + panic(err) + } + if domainExists { + return true + } + } + + return false +} + +func (s *Storage) cacheIsV1Account(address common.Address, isV1 bool) { + if s.cachedV1Accounts == nil { + s.cachedV1Accounts = map[common.Address]bool{} + } + s.cachedV1Accounts[address] = isV1 +} + func (s *Storage) cacheDomainStorageMap( storageDomainKey interpreter.StorageDomainKey, domainStorageMap *interpreter.DomainStorageMap, @@ -436,12 +493,7 @@ func (s *Storage) CheckHealth() error { // Only accounts in storage format v1 store domain storage maps // directly at the root of the account - isV2, err := hasAccountStorageMap(s.Ledger, address) - if err != nil { - return err - } - - if isV2 { + if !s.IsV1Account(address) { continue } From 0178bcb943184372a5a36597340dac948aefe91f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Thu, 14 Nov 2024 15:27:12 -0800 Subject: [PATCH 22/52] test storage format v1 and v2 --- runtime/runtime_memory_metering_test.go | 509 +++++++++------ runtime/runtime_test.go | 830 ++++++++++++++---------- runtime/sharedstate_test.go | 516 +++++---------- runtime/storage_test.go | 370 ++++++----- 4 files changed, 1201 insertions(+), 1024 deletions(-) diff --git a/runtime/runtime_memory_metering_test.go b/runtime/runtime_memory_metering_test.go index 40c8093e84..6e6687e4c8 100644 --- a/runtime/runtime_memory_metering_test.go +++ b/runtime/runtime_memory_metering_test.go @@ -815,7 +815,9 @@ func TestRuntimeStorageCommitsMetering(t *testing.T) { // Before the storageUsed function is invoked, the deltas must have been committed. // So the encoded slabs must have been metered at this point. assert.Equal(t, uint64(0), meter.getMemory(common.MemoryKindAtreeEncodedSlab)) + storageUsedInvoked = true + return 1, nil }, } @@ -840,89 +842,152 @@ func TestRuntimeStorageCommitsMetering(t *testing.T) { t.Run("account.storage.save", func(t *testing.T) { t.Parallel() - code := []byte(` - transaction { - prepare(signer: auth(Storage) &Account) { - signer.storage.save([[1, 2, 3], [4, 5, 6]], to: /storage/test) - } - } - `) + test := func(storageFormatV2Enabled bool) { - meter := newTestMemoryGauge() + name := fmt.Sprintf( + "storage format V2 enabled: %v", + storageFormatV2Enabled, + ) - runtimeInterface := &TestRuntimeInterface{ - Storage: NewTestLedger(nil, nil), - OnGetSigningAccounts: func() ([]Address, error) { - return []Address{{42}}, nil - }, - OnMeterMemory: meter.MeterMemory, - } + t.Run(name, func(t *testing.T) { + t.Parallel() - config := DefaultTestInterpreterConfig - config.StorageFormatV2Enabled = true - runtime := NewTestInterpreterRuntimeWithConfig(config) + code := []byte(` + transaction { + prepare(signer: auth(Storage) &Account) { + signer.storage.save([[1, 2, 3], [4, 5, 6]], to: /storage/test) + } + } + `) - err := runtime.ExecuteTransaction( - Script{ - Source: code, - }, - Context{ - Interface: runtimeInterface, - Location: common.TransactionLocation{}, - }, - ) + meter := newTestMemoryGauge() - require.NoError(t, err) - assert.Equal(t, uint64(5), meter.getMemory(common.MemoryKindAtreeEncodedSlab)) + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]Address, error) { + return []Address{{42}}, nil + }, + OnMeterMemory: meter.MeterMemory, + } + + config := DefaultTestInterpreterConfig + config.StorageFormatV2Enabled = storageFormatV2Enabled + runtime := NewTestInterpreterRuntimeWithConfig(config) + + err := runtime.ExecuteTransaction( + Script{ + Source: code, + }, + Context{ + Interface: runtimeInterface, + Location: common.TransactionLocation{}, + }, + ) + + require.NoError(t, err) + + var expected uint64 + if storageFormatV2Enabled { + expected = 5 + } else { + expected = 4 + } + assert.Equal(t, + expected, + meter.getMemory(common.MemoryKindAtreeEncodedSlab), + ) + }) + } + + for _, storageFormatV2Enabled := range []bool{false, true} { + test(storageFormatV2Enabled) + } }) t.Run("storage used non empty", func(t *testing.T) { t.Parallel() - code := []byte(` - transaction { - prepare(signer: auth(Storage) &Account) { - signer.storage.save([[1, 2, 3], [4, 5, 6]], to: /storage/test) - signer.storage.used - } - } - `) + test := func(storageFormatV2Enabled bool) { - meter := newTestMemoryGauge() - storageUsedInvoked := false + name := fmt.Sprintf( + "storage format V2 enabled: %v", + storageFormatV2Enabled, + ) - runtimeInterface := &TestRuntimeInterface{ - Storage: NewTestLedger(nil, nil), - OnGetSigningAccounts: func() ([]Address, error) { - return []Address{{42}}, nil - }, - OnMeterMemory: meter.MeterMemory, - OnGetStorageUsed: func(_ Address) (uint64, error) { - // Before the storageUsed function is invoked, the deltas must have been committed. - // So the encoded slabs must have been metered at this point. - assert.Equal(t, uint64(5), meter.getMemory(common.MemoryKindAtreeEncodedSlab)) - storageUsedInvoked = true - return 1, nil - }, - } + t.Run(name, func(t *testing.T) { + t.Parallel() - config := DefaultTestInterpreterConfig - config.StorageFormatV2Enabled = true - runtime := NewTestInterpreterRuntimeWithConfig(config) + code := []byte(` + transaction { + prepare(signer: auth(Storage) &Account) { + signer.storage.save([[1, 2, 3], [4, 5, 6]], to: /storage/test) + signer.storage.used + } + } + `) - err := runtime.ExecuteTransaction( - Script{ - Source: code, - }, - Context{ - Interface: runtimeInterface, - Location: common.TransactionLocation{}, - }, - ) + meter := newTestMemoryGauge() + storageUsedInvoked := false - require.NoError(t, err) - assert.True(t, storageUsedInvoked) - assert.Equal(t, uint64(5), meter.getMemory(common.MemoryKindAtreeEncodedSlab)) + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]Address, error) { + return []Address{{42}}, nil + }, + OnMeterMemory: meter.MeterMemory, + OnGetStorageUsed: func(_ Address) (uint64, error) { + // Before the storageUsed function is invoked, the deltas must have been committed. + // So the encoded slabs must have been metered at this point. + var expected uint64 + if storageFormatV2Enabled { + expected = 5 + } else { + expected = 4 + } + assert.Equal(t, + expected, + meter.getMemory(common.MemoryKindAtreeEncodedSlab), + ) + + storageUsedInvoked = true + + return 1, nil + }, + } + + config := DefaultTestInterpreterConfig + config.StorageFormatV2Enabled = storageFormatV2Enabled + runtime := NewTestInterpreterRuntimeWithConfig(config) + + err := runtime.ExecuteTransaction( + Script{ + Source: code, + }, + Context{ + Interface: runtimeInterface, + Location: common.TransactionLocation{}, + }, + ) + + require.NoError(t, err) + assert.True(t, storageUsedInvoked) + + var expected uint64 + if storageFormatV2Enabled { + expected = 5 + } else { + expected = 4 + } + assert.Equal(t, + expected, + meter.getMemory(common.MemoryKindAtreeEncodedSlab), + ) + }) + } + + for _, storageFormatV2Enabled := range []bool{false, true} { + test(storageFormatV2Enabled) + } }) } @@ -1040,146 +1105,226 @@ func TestRuntimeMeterEncoding(t *testing.T) { t.Parallel() - config := DefaultTestInterpreterConfig - config.AtreeValidationEnabled = false - config.StorageFormatV2Enabled = true - rt := NewTestInterpreterRuntimeWithConfig(config) + test := func(storageFormatV2Enabled bool) { - address := common.MustBytesToAddress([]byte{0x1}) - storage := NewTestLedger(nil, nil) - meter := newTestMemoryGauge() + name := fmt.Sprintf( + "storage format V2 enabled: %v", + storageFormatV2Enabled, + ) - runtimeInterface := &TestRuntimeInterface{ - Storage: storage, - OnGetSigningAccounts: func() ([]Address, error) { - return []Address{address}, nil - }, - OnMeterMemory: meter.MeterMemory, - } + t.Run(name, func(t *testing.T) { + t.Parallel() - text := "A quick brown fox jumps over the lazy dog" + config := DefaultTestInterpreterConfig + config.AtreeValidationEnabled = false + config.StorageFormatV2Enabled = storageFormatV2Enabled + rt := NewTestInterpreterRuntimeWithConfig(config) - err := rt.ExecuteTransaction( - Script{ - Source: []byte(fmt.Sprintf(` - transaction() { - prepare(acc: auth(Storage) &Account) { - var s = "%s" - acc.storage.save(s, to:/storage/some_path) - } - }`, - text, - )), - }, - Context{ - Interface: runtimeInterface, - Location: common.TransactionLocation{}, - }, - ) + address := common.MustBytesToAddress([]byte{0x1}) + storage := NewTestLedger(nil, nil) + meter := newTestMemoryGauge() - require.NoError(t, err) - assert.Equal(t, 107, int(meter.getMemory(common.MemoryKindBytes))) + runtimeInterface := &TestRuntimeInterface{ + Storage: storage, + OnGetSigningAccounts: func() ([]Address, error) { + return []Address{address}, nil + }, + OnMeterMemory: meter.MeterMemory, + } + + text := "A quick brown fox jumps over the lazy dog" + + err := rt.ExecuteTransaction( + Script{ + Source: []byte(fmt.Sprintf(` + transaction() { + prepare(acc: auth(Storage) &Account) { + var s = "%s" + acc.storage.save(s, to:/storage/some_path) + } + }`, + text, + )), + }, + Context{ + Interface: runtimeInterface, + Location: common.TransactionLocation{}, + }, + ) + + require.NoError(t, err) + + var expected uint64 + if storageFormatV2Enabled { + expected = 107 + } else { + expected = 75 + } + assert.Equal(t, + expected, + meter.getMemory(common.MemoryKindBytes), + ) + }) + } + + for _, storageFormatV2Enabled := range []bool{false, true} { + test(storageFormatV2Enabled) + } }) t.Run("string in loop", func(t *testing.T) { t.Parallel() - config := DefaultTestInterpreterConfig - config.AtreeValidationEnabled = false - config.StorageFormatV2Enabled = true - rt := NewTestInterpreterRuntimeWithConfig(config) + test := func(storageFormatV2Enabled bool) { - address := common.MustBytesToAddress([]byte{0x1}) - storage := NewTestLedger(nil, nil) - meter := newTestMemoryGauge() + name := fmt.Sprintf( + "storage format V2 enabled: %v", + storageFormatV2Enabled, + ) - runtimeInterface := &TestRuntimeInterface{ - Storage: storage, - OnGetSigningAccounts: func() ([]Address, error) { - return []Address{address}, nil - }, - OnMeterMemory: meter.MeterMemory, - } + t.Run(name, func(t *testing.T) { + t.Parallel() - text := "A quick brown fox jumps over the lazy dog" + config := DefaultTestInterpreterConfig + config.AtreeValidationEnabled = false + config.StorageFormatV2Enabled = storageFormatV2Enabled + rt := NewTestInterpreterRuntimeWithConfig(config) - err := rt.ExecuteTransaction( - Script{ - Source: []byte(fmt.Sprintf(` - transaction() { - prepare(acc: auth(Storage) &Account) { - var i = 0 - var s = "%s" - while i<1000 { - let path = StoragePath(identifier: "i".concat(i.toString()))! - acc.storage.save(s, to: path) - i=i+1 - } - } - }`, - text, - )), - }, - Context{ - Interface: runtimeInterface, - Location: common.TransactionLocation{}, - }, - ) + address := common.MustBytesToAddress([]byte{0x1}) + storage := NewTestLedger(nil, nil) + meter := newTestMemoryGauge() - require.NoError(t, err) - assert.Equal(t, 61494, int(meter.getMemory(common.MemoryKindBytes))) + runtimeInterface := &TestRuntimeInterface{ + Storage: storage, + OnGetSigningAccounts: func() ([]Address, error) { + return []Address{address}, nil + }, + OnMeterMemory: meter.MeterMemory, + } + + text := "A quick brown fox jumps over the lazy dog" + + err := rt.ExecuteTransaction( + Script{ + Source: []byte(fmt.Sprintf(` + transaction() { + prepare(acc: auth(Storage) &Account) { + var i = 0 + var s = "%s" + while i<1000 { + let path = StoragePath(identifier: "i".concat(i.toString()))! + acc.storage.save(s, to: path) + i=i+1 + } + } + }`, + text, + )), + }, + Context{ + Interface: runtimeInterface, + Location: common.TransactionLocation{}, + }, + ) + + require.NoError(t, err) + + var expected uint64 + if storageFormatV2Enabled { + expected = 61494 + } else { + expected = 61455 + } + assert.Equal(t, + expected, + meter.getMemory(common.MemoryKindBytes), + ) + }) + } + + for _, storageFormatV2Enabled := range []bool{false, true} { + test(storageFormatV2Enabled) + } }) t.Run("composite", func(t *testing.T) { t.Parallel() - config := DefaultTestInterpreterConfig - config.AtreeValidationEnabled = false - config.StorageFormatV2Enabled = true - rt := NewTestInterpreterRuntimeWithConfig(config) + test := func(storageFormatV2Enabled bool) { - address := common.MustBytesToAddress([]byte{0x1}) - storage := NewTestLedger(nil, nil) - meter := newTestMemoryGauge() + name := fmt.Sprintf( + "storage format V2 enabled: %v", + storageFormatV2Enabled, + ) - runtimeInterface := &TestRuntimeInterface{ - Storage: storage, - OnGetSigningAccounts: func() ([]Address, error) { - return []Address{address}, nil - }, - OnMeterMemory: meter.MeterMemory, - } + t.Run(name, func(t *testing.T) { + t.Parallel() - _, err := rt.ExecuteScript( - Script{ - Source: []byte(` - access(all) fun main() { - let acc = getAuthAccount(0x02) - var i = 0 - var f = Foo() - while i<1000 { - let path = StoragePath(identifier: "i".concat(i.toString()))! - acc.storage.save(f, to: path) - i=i+1 - } - } + config := DefaultTestInterpreterConfig + config.AtreeValidationEnabled = false + config.StorageFormatV2Enabled = storageFormatV2Enabled + rt := NewTestInterpreterRuntimeWithConfig(config) - access(all) struct Foo { - access(self) var id: Int - init() { - self.id = 123456789 - } - }`), - }, - Context{ - Interface: runtimeInterface, - Location: common.ScriptLocation{}, - }, - ) + address := common.MustBytesToAddress([]byte{0x1}) + storage := NewTestLedger(nil, nil) + meter := newTestMemoryGauge() - require.NoError(t, err) - assert.Equal(t, 58362, int(meter.getMemory(common.MemoryKindBytes))) + runtimeInterface := &TestRuntimeInterface{ + Storage: storage, + OnGetSigningAccounts: func() ([]Address, error) { + return []Address{address}, nil + }, + OnMeterMemory: meter.MeterMemory, + } + + _, err := rt.ExecuteScript( + Script{ + Source: []byte(` + access(all) fun main() { + let acc = getAuthAccount(0x02) + var i = 0 + var f = Foo() + while i<1000 { + let path = StoragePath(identifier: "i".concat(i.toString()))! + acc.storage.save(f, to: path) + i=i+1 + } + } + + access(all) struct Foo { + access(self) var id: Int + init() { + self.id = 123456789 + } + } + `), + }, + Context{ + Interface: runtimeInterface, + Location: common.ScriptLocation{}, + }, + ) + + require.NoError(t, err) + + var expected uint64 + if storageFormatV2Enabled { + expected = 58362 + } else { + expected = 58323 + } + + assert.Equal(t, + expected, + meter.getMemory(common.MemoryKindBytes), + ) + }) + } + + for _, storageFormatV2Enabled := range []bool{false, true} { + test(storageFormatV2Enabled) + } }) } diff --git a/runtime/runtime_test.go b/runtime/runtime_test.go index a41b83d597..6bf269aad6 100644 --- a/runtime/runtime_test.go +++ b/runtime/runtime_test.go @@ -5683,104 +5683,190 @@ func TestRuntimeContractWriteback(t *testing.T) { t.Parallel() - config := DefaultTestInterpreterConfig - config.StorageFormatV2Enabled = true - runtime := NewTestInterpreterRuntimeWithConfig(config) - addressValue := cadence.BytesToAddress([]byte{0xCA, 0xDE}) - contract := []byte(` - access(all) contract Test { + test := func( + storageFormatV2Enabled bool, + expectedWrites1 []ownerKeyPair, + expectedWrites2 []ownerKeyPair, + ) { - access(all) var test: Int + name := fmt.Sprintf( + "storage format V2 enabled: %v", + storageFormatV2Enabled, + ) - init() { - self.test = 1 - } + t.Run(name, func(t *testing.T) { + t.Parallel() - access(all) fun setTest(_ test: Int) { - self.test = test - } - } - `) + config := DefaultTestInterpreterConfig + config.StorageFormatV2Enabled = storageFormatV2Enabled + runtime := NewTestInterpreterRuntimeWithConfig(config) - deploy := DeploymentTransaction("Test", contract) + contract := []byte(` + access(all) contract Test { - readTx := []byte(` - import Test from 0xCADE + access(all) var test: Int - transaction { + init() { + self.test = 1 + } - prepare(signer: &Account) { - log(Test.test) - } - } - `) + access(all) fun setTest(_ test: Int) { + self.test = test + } + } + `) - writeTx := []byte(` - import Test from 0xCADE + deploy := DeploymentTransaction("Test", contract) - transaction { + readTx := []byte(` + import Test from 0xCADE - prepare(signer: &Account) { - Test.setTest(2) - } - } - `) + transaction { - var accountCode []byte - var events []cadence.Event - var loggedMessages []string - var writes []ownerKeyPair + prepare(signer: &Account) { + log(Test.test) + } + } + `) + + writeTx := []byte(` + import Test from 0xCADE + + transaction { + + prepare(signer: &Account) { + Test.setTest(2) + } + } + `) + + var accountCode []byte + var events []cadence.Event + var loggedMessages []string + var writes []ownerKeyPair + + onWrite := func(owner, key, value []byte) { + writes = append(writes, ownerKeyPair{ + owner, + key, + }) + } + + runtimeInterface := &TestRuntimeInterface{ + OnGetCode: func(_ Location) (bytes []byte, err error) { + return accountCode, nil + }, + Storage: NewTestLedger(nil, onWrite), + OnGetSigningAccounts: func() ([]Address, error) { + return []Address{Address(addressValue)}, nil + }, + OnResolveLocation: NewSingleIdentifierLocationResolver(t), + OnGetAccountContractCode: func(_ common.AddressLocation) (code []byte, err error) { + return accountCode, nil + }, + OnUpdateAccountContractCode: func(_ common.AddressLocation, code []byte) (err error) { + accountCode = code + return nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnProgramLog: func(message string) { + loggedMessages = append(loggedMessages, message) + }, + } + + nextTransactionLocation := NewTransactionLocationGenerator() + + err := runtime.ExecuteTransaction( + Script{ + Source: deploy, + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + }, + ) + require.NoError(t, err) + + assert.NotNil(t, accountCode) + + assert.Equal(t, + expectedWrites1, + writes, + ) + + writes = nil + + err = runtime.ExecuteTransaction( + Script{ + Source: readTx, + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + }, + ) + require.NoError(t, err) + + assert.Empty(t, writes) + + writes = nil + + err = runtime.ExecuteTransaction( + Script{ + Source: writeTx, + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + }, + ) + require.NoError(t, err) + + assert.Equal(t, + expectedWrites2, + writes, + ) - onWrite := func(owner, key, value []byte) { - writes = append(writes, ownerKeyPair{ - owner, - key, }) } - runtimeInterface := &TestRuntimeInterface{ - OnGetCode: func(_ Location) (bytes []byte, err error) { - return accountCode, nil - }, - Storage: NewTestLedger(nil, onWrite), - OnGetSigningAccounts: func() ([]Address, error) { - return []Address{Address(addressValue)}, nil - }, - OnResolveLocation: NewSingleIdentifierLocationResolver(t), - OnGetAccountContractCode: func(_ common.AddressLocation) (code []byte, err error) { - return accountCode, nil - }, - OnUpdateAccountContractCode: func(_ common.AddressLocation, code []byte) (err error) { - accountCode = code - return nil - }, - OnEmitEvent: func(event cadence.Event) error { - events = append(events, event) - return nil - }, - OnProgramLog: func(message string) { - loggedMessages = append(loggedMessages, message) + test(false, + []ownerKeyPair{ + // storage index to contract domain storage map + { + addressValue[:], + []byte("contract"), + }, + // contract value + { + addressValue[:], + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, + }, + // contract domain storage map + { + addressValue[:], + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, + }, }, - } - - nextTransactionLocation := NewTransactionLocationGenerator() - err := runtime.ExecuteTransaction( - Script{ - Source: deploy, - }, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), + []ownerKeyPair{ + // Storage map is modified because contract value is inlined in contract storage map. + // NOTE: contract value slab doesn't exist. + { + addressValue[:], + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, + }, }, ) - require.NoError(t, err) - assert.NotNil(t, accountCode) + test( + true, - assert.Equal(t, []ownerKeyPair{ // storage index to account storage map { @@ -5805,38 +5891,7 @@ func TestRuntimeContractWriteback(t *testing.T) { []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3}, }, }, - writes, - ) - - writes = nil - - err = runtime.ExecuteTransaction( - Script{ - Source: readTx, - }, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - }, - ) - require.NoError(t, err) - - assert.Empty(t, writes) - - writes = nil - - err = runtime.ExecuteTransaction( - Script{ - Source: writeTx, - }, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - }, - ) - require.NoError(t, err) - assert.Equal(t, []ownerKeyPair{ // Account storage map is modified because: // - contract value is inlined in contract storage map, and @@ -5847,7 +5902,6 @@ func TestRuntimeContractWriteback(t *testing.T) { []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, }, }, - writes, ) } @@ -5855,90 +5909,244 @@ func TestRuntimeStorageWriteback(t *testing.T) { t.Parallel() - config := DefaultTestInterpreterConfig - config.StorageFormatV2Enabled = true - runtime := NewTestInterpreterRuntimeWithConfig(config) - addressValue := cadence.BytesToAddress([]byte{0xCA, 0xDE}) - contract := []byte(` - access(all) contract Test { + test := func( + storageFormatV2Enabled bool, + expectedWrites1 []ownerKeyPair, + expectedWrites2 []ownerKeyPair, + expectedWrites3 []ownerKeyPair, + ) { - access(all) resource R { + name := fmt.Sprintf( + "storage format V2 enabled: %v", + storageFormatV2Enabled, + ) + t.Run(name, func(t *testing.T) { + t.Parallel() + + config := DefaultTestInterpreterConfig + config.StorageFormatV2Enabled = storageFormatV2Enabled + runtime := NewTestInterpreterRuntimeWithConfig(config) + + contract := []byte(` + access(all) contract Test { + + access(all) resource R { + + access(all) var test: Int + + init() { + self.test = 1 + } - access(all) var test: Int + access(all) fun setTest(_ test: Int) { + self.test = test + } + } - init() { - self.test = 1 + + access(all) fun createR(): @R { + return <-create R() + } } + `) - access(all) fun setTest(_ test: Int) { - self.test = test - } - } + deploy := DeploymentTransaction("Test", contract) + var accountCode []byte + var events []cadence.Event + var loggedMessages []string + var writes []ownerKeyPair - access(all) fun createR(): @R { - return <-create R() - } - } - `) + onWrite := func(owner, key, _ []byte) { + writes = append(writes, ownerKeyPair{ + owner, + key, + }) + } - deploy := DeploymentTransaction("Test", contract) + runtimeInterface := &TestRuntimeInterface{ + OnGetCode: func(_ Location) (bytes []byte, err error) { + return accountCode, nil + }, + Storage: NewTestLedger(nil, onWrite), + OnGetSigningAccounts: func() ([]Address, error) { + return []Address{Address(addressValue)}, nil + }, + OnResolveLocation: NewSingleIdentifierLocationResolver(t), + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + return accountCode, nil + }, + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCode = code + return nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnProgramLog: func(message string) { + loggedMessages = append(loggedMessages, message) + }, + } - var accountCode []byte - var events []cadence.Event - var loggedMessages []string - var writes []ownerKeyPair + nextTransactionLocation := NewTransactionLocationGenerator() + + err := runtime.ExecuteTransaction( + Script{ + Source: deploy, + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + }, + ) + require.NoError(t, err) + + assert.NotNil(t, accountCode) + + assert.Equal(t, + expectedWrites1, + writes, + ) + + writes = nil + + err = runtime.ExecuteTransaction( + Script{ + Source: []byte(` + import Test from 0xCADE + + transaction { + + prepare(signer: auth(Storage) &Account) { + signer.storage.save(<-Test.createR(), to: /storage/r) + } + } + `), + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + }, + ) + require.NoError(t, err) + + assert.Equal(t, + expectedWrites2, + writes, + ) + + readTx := []byte(` + import Test from 0xCADE + + transaction { + + prepare(signer: auth(Storage) &Account) { + log(signer.storage.borrow<&Test.R>(from: /storage/r)!.test) + } + } + `) + + writes = nil + + err = runtime.ExecuteTransaction( + Script{ + Source: readTx, + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + }, + ) + require.NoError(t, err) + + assert.Empty(t, writes) + + writeTx := []byte(` + import Test from 0xCADE + + transaction { + + prepare(signer: auth(Storage) &Account) { + let r = signer.storage.borrow<&Test.R>(from: /storage/r)! + r.setTest(2) + } + } + `) + + writes = nil + + err = runtime.ExecuteTransaction( + Script{ + Source: writeTx, + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + }, + ) + require.NoError(t, err) - onWrite := func(owner, key, _ []byte) { - writes = append(writes, ownerKeyPair{ - owner, - key, + assert.Equal(t, + expectedWrites3, + writes, + ) }) } - runtimeInterface := &TestRuntimeInterface{ - OnGetCode: func(_ Location) (bytes []byte, err error) { - return accountCode, nil - }, - Storage: NewTestLedger(nil, onWrite), - OnGetSigningAccounts: func() ([]Address, error) { - return []Address{Address(addressValue)}, nil - }, - OnResolveLocation: NewSingleIdentifierLocationResolver(t), - OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { - return accountCode, nil - }, - OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { - accountCode = code - return nil - }, - OnEmitEvent: func(event cadence.Event) error { - events = append(events, event) - return nil - }, - OnProgramLog: func(message string) { - loggedMessages = append(loggedMessages, message) + test( + false, + []ownerKeyPair{ + // storage index to contract domain storage map + { + addressValue[:], + []byte("contract"), + }, + // contract value + // NOTE: contract value slab is empty because it is inlined in contract domain storage map + { + addressValue[:], + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, + }, + // contract domain storage map + { + addressValue[:], + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, + }, }, - } - - nextTransactionLocation := NewTransactionLocationGenerator() - - err := runtime.ExecuteTransaction( - Script{ - Source: deploy, + []ownerKeyPair{ + // storage index to storage domain storage map + { + addressValue[:], + []byte("storage"), + }, + // resource value + // NOTE: resource value slab is empty because it is inlined in storage domain storage map + { + addressValue[:], + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3}, + }, + // storage domain storage map + // NOTE: resource value slab is inlined. + { + addressValue[:], + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4}, + }, }, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), + []ownerKeyPair{ + // Storage map is modified because resource value is inlined in storage map + // NOTE: resource value slab is empty. + { + addressValue[:], + []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4}, + }, }, ) - require.NoError(t, err) - - assert.NotNil(t, accountCode) - assert.Equal(t, + test( + true, []ownerKeyPair{ // storage index to account storage map { @@ -5963,32 +6171,7 @@ func TestRuntimeStorageWriteback(t *testing.T) { []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x3}, }, }, - writes, - ) - - writes = nil - - err = runtime.ExecuteTransaction( - Script{ - Source: []byte(` - import Test from 0xCADE - - transaction { - - prepare(signer: auth(Storage) &Account) { - signer.storage.save(<-Test.createR(), to: /storage/r) - } - } - `), - }, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - }, - ) - require.NoError(t, err) - assert.Equal(t, []ownerKeyPair{ // account storage map // NOTE: account storage map is updated with new storage domain storage map (inlined). @@ -6009,61 +6192,7 @@ func TestRuntimeStorageWriteback(t *testing.T) { []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5}, }, }, - writes, - ) - - readTx := []byte(` - import Test from 0xCADE - - transaction { - - prepare(signer: auth(Storage) &Account) { - log(signer.storage.borrow<&Test.R>(from: /storage/r)!.test) - } - } - `) - - writes = nil - - err = runtime.ExecuteTransaction( - Script{ - Source: readTx, - }, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - }, - ) - require.NoError(t, err) - - assert.Empty(t, writes) - - writeTx := []byte(` - import Test from 0xCADE - - transaction { - - prepare(signer: auth(Storage) &Account) { - let r = signer.storage.borrow<&Test.R>(from: /storage/r)! - r.setTest(2) - } - } - `) - - writes = nil - - err = runtime.ExecuteTransaction( - Script{ - Source: writeTx, - }, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - }, - ) - require.NoError(t, err) - assert.Equal(t, []ownerKeyPair{ // Account storage map is modified because resource value is inlined in storage map, // and storage map is inlined in account storage map. @@ -6073,7 +6202,6 @@ func TestRuntimeStorageWriteback(t *testing.T) { []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, }, }, - writes, ) } @@ -7527,11 +7655,12 @@ func TestRuntimeComputationMetring(t *testing.T) { t.Parallel() type test struct { - name string - code string - ok bool - hits uint - intensity uint + name string + code string + ok bool + hits uint + v1Intensity uint + v2Intensity uint } compLimit := uint(6) @@ -7540,118 +7669,143 @@ func TestRuntimeComputationMetring(t *testing.T) { { name: "Infinite while loop", code: ` - while true {} - `, - ok: false, - hits: compLimit, - intensity: 6, + while true {} + `, + ok: false, + hits: compLimit, + v1Intensity: 6, + v2Intensity: 6, }, { name: "Limited while loop", code: ` - var i = 0 - while i < 5 { - i = i + 1 - } - `, - ok: false, - hits: compLimit, - intensity: 6, + var i = 0 + while i < 5 { + i = i + 1 + } + `, + ok: false, + hits: compLimit, + v1Intensity: 6, + v2Intensity: 6, }, { name: "statement + createArray + transferArray + too many for-in loop iterations", code: ` - for i in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] {} - `, - ok: false, - hits: compLimit, - intensity: 6, + for i in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] {} + `, + ok: false, + hits: compLimit, + v1Intensity: 6, + v2Intensity: 6, }, { name: "statement + createArray + transferArray + two for-in loop iterations", code: ` - for i in [1, 2] {} - `, - ok: true, - hits: 4, - intensity: 4, + for i in [1, 2] {} + `, + ok: true, + hits: 4, + v1Intensity: 4, + v2Intensity: 4, }, { name: "statement + functionInvocation + encoding", code: ` - acc.storage.save("A quick brown fox jumps over the lazy dog", to:/storage/some_path) - `, - ok: true, - hits: 3, - intensity: 108, + acc.storage.save("A quick brown fox jumps over the lazy dog", to:/storage/some_path) + `, + ok: true, + hits: 3, + v1Intensity: 76, + v2Intensity: 108, }, } - for _, test := range tests { + for _, testCase := range tests { - t.Run(test.name, func(t *testing.T) { + t.Run(testCase.name, func(t *testing.T) { - script := []byte( - fmt.Sprintf( - ` - transaction { - prepare(acc: auth(Storage) &Account) { - %s - } - } - `, - test.code, - ), - ) + test := func(storageFormatV2Enabled bool) { - config := DefaultTestInterpreterConfig - config.StorageFormatV2Enabled = true - runtime := NewTestInterpreterRuntimeWithConfig(config) + name := fmt.Sprintf( + "storage format V2 enabled: %v", + storageFormatV2Enabled, + ) + t.Run(name, func(t *testing.T) { + t.Parallel() - compErr := errors.New("computation exceeded limit") - var hits, totalIntensity uint - meterComputationFunc := func(kind common.ComputationKind, intensity uint) error { - hits++ - totalIntensity += intensity - if hits >= compLimit { - return compErr - } - return nil - } + script := []byte( + fmt.Sprintf( + ` + transaction { + prepare(acc: auth(Storage) &Account) { + %s + } + } + `, + testCase.code, + ), + ) + + config := DefaultTestInterpreterConfig + config.StorageFormatV2Enabled = storageFormatV2Enabled + runtime := NewTestInterpreterRuntimeWithConfig(config) + + compErr := errors.New("computation exceeded limit") + var hits, totalIntensity uint + meterComputationFunc := func(kind common.ComputationKind, intensity uint) error { + hits++ + totalIntensity += intensity + if hits >= compLimit { + return compErr + } + return nil + } - address := common.MustBytesToAddress([]byte{0x1}) + address := common.MustBytesToAddress([]byte{0x1}) - runtimeInterface := &TestRuntimeInterface{ - Storage: NewTestLedger(nil, nil), - OnGetSigningAccounts: func() ([]Address, error) { - return []Address{address}, nil - }, - OnMeterComputation: meterComputationFunc, - } + runtimeInterface := &TestRuntimeInterface{ + Storage: NewTestLedger(nil, nil), + OnGetSigningAccounts: func() ([]Address, error) { + return []Address{address}, nil + }, + OnMeterComputation: meterComputationFunc, + } - nextTransactionLocation := NewTransactionLocationGenerator() + nextTransactionLocation := NewTransactionLocationGenerator() - err := runtime.ExecuteTransaction( - Script{ - Source: script, - }, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - }, - ) - if test.ok { - require.NoError(t, err) - } else { - RequireError(t, err) + err := runtime.ExecuteTransaction( + Script{ + Source: script, + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + }, + ) + if testCase.ok { + require.NoError(t, err) + } else { + RequireError(t, err) + + var executionErr Error + require.ErrorAs(t, err, &executionErr) + require.ErrorAs(t, err.(Error).Unwrap(), &compErr) + } + + assert.Equal(t, testCase.hits, hits) - var executionErr Error - require.ErrorAs(t, err, &executionErr) - require.ErrorAs(t, err.(Error).Unwrap(), &compErr) + if storageFormatV2Enabled { + assert.Equal(t, testCase.v2Intensity, totalIntensity) + } else { + assert.Equal(t, testCase.v1Intensity, totalIntensity) + } + }) } - assert.Equal(t, test.hits, hits) - assert.Equal(t, test.intensity, totalIntensity) + for _, storageFormatV2Enabled := range []bool{false, true} { + test(storageFormatV2Enabled) + } }) } } diff --git a/runtime/sharedstate_test.go b/runtime/sharedstate_test.go index ae693b2d98..52dd68f00a 100644 --- a/runtime/sharedstate_test.go +++ b/runtime/sharedstate_test.go @@ -19,6 +19,7 @@ package runtime_test import ( + "fmt" "testing" "github.com/stretchr/testify/assert" @@ -31,188 +32,210 @@ import ( . "github.com/onflow/cadence/test_utils/runtime_utils" ) -func TestRuntimeSharedStateV1(t *testing.T) { +func TestRuntimeSharedState(t *testing.T) { t.Parallel() - config := DefaultTestInterpreterConfig - config.StorageFormatV2Enabled = false - config.AtreeValidationEnabled = false - runtime := NewTestInterpreterRuntimeWithConfig(config) - signerAddress := common.MustBytesToAddress([]byte{0x1}) - deploy1 := DeploymentTransaction("C1", []byte(` - access(all) contract C1 { - access(all) fun hello() { - log("Hello from C1!") - } - } - `)) - - deploy2 := DeploymentTransaction("C2", []byte(` - access(all) contract C2 { - access(all) fun hello() { - log("Hello from C2!") - } - } - `)) - - accountCodes := map[common.Location][]byte{} - - var events []cadence.Event - var loggedMessages []string + test := func( + storageFormatV2Enabled bool, + expectedReads []ownerKeyPair, + ) { - var interpreterState *interpreter.SharedState + name := fmt.Sprintf( + "storage format V2 enabled: %v", + storageFormatV2Enabled, + ) - var ledgerReads []ownerKeyPair + t.Run(name, func(t *testing.T) { + t.Parallel() - ledger := NewTestLedger( - func(owner, key, value []byte) { - ledgerReads = append( - ledgerReads, - ownerKeyPair{ - owner: owner, - key: key, - }, - ) - }, - nil, - ) + config := DefaultTestInterpreterConfig + config.StorageFormatV2Enabled = storageFormatV2Enabled + config.AtreeValidationEnabled = false + runtime := NewTestInterpreterRuntimeWithConfig(config) - runtimeInterface := &TestRuntimeInterface{ - Storage: ledger, - OnGetSigningAccounts: func() ([]Address, error) { - return []Address{signerAddress}, nil - }, - OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { - accountCodes[location] = code - return nil - }, - OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { - code = accountCodes[location] - return code, nil - }, - OnRemoveAccountContractCode: func(location common.AddressLocation) error { - delete(accountCodes, location) - return nil - }, - OnResolveLocation: MultipleIdentifierLocationResolver, - OnProgramLog: func(message string) { - loggedMessages = append(loggedMessages, message) - }, - OnEmitEvent: func(event cadence.Event) error { - events = append(events, event) - return nil - }, - OnSetInterpreterSharedState: func(state *interpreter.SharedState) { - interpreterState = state - }, - OnGetInterpreterSharedState: func() *interpreter.SharedState { - return interpreterState - }, - } + deploy1 := DeploymentTransaction("C1", []byte(` + access(all) contract C1 { + access(all) fun hello() { + log("Hello from C1!") + } + } + `)) - environment := NewBaseInterpreterEnvironment(config) + deploy2 := DeploymentTransaction("C2", []byte(` + access(all) contract C2 { + access(all) fun hello() { + log("Hello from C2!") + } + } + `)) - nextTransactionLocation := NewTransactionLocationGenerator() + accountCodes := map[common.Location][]byte{} - // Deploy contracts + var events []cadence.Event + var loggedMessages []string - for _, source := range [][]byte{ - deploy1, - deploy2, - } { - err := runtime.ExecuteTransaction( - Script{ - Source: source, - }, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - Environment: environment, - }, - ) - require.NoError(t, err) - } + var interpreterState *interpreter.SharedState - assert.NotEmpty(t, accountCodes) + var ledgerReads []ownerKeyPair - // Call C1.hello using transaction + ledger := NewTestLedger( + func(owner, key, value []byte) { + ledgerReads = append( + ledgerReads, + ownerKeyPair{ + owner: owner, + key: key, + }, + ) + }, + nil, + ) - loggedMessages = nil + runtimeInterface := &TestRuntimeInterface{ + Storage: ledger, + OnGetSigningAccounts: func() ([]Address, error) { + return []Address{signerAddress}, nil + }, + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnRemoveAccountContractCode: func(location common.AddressLocation) error { + delete(accountCodes, location) + return nil + }, + OnResolveLocation: MultipleIdentifierLocationResolver, + OnProgramLog: func(message string) { + loggedMessages = append(loggedMessages, message) + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnSetInterpreterSharedState: func(state *interpreter.SharedState) { + interpreterState = state + }, + OnGetInterpreterSharedState: func() *interpreter.SharedState { + return interpreterState + }, + } + + environment := NewBaseInterpreterEnvironment(config) + + nextTransactionLocation := NewTransactionLocationGenerator() + + // Deploy contracts + + for _, source := range [][]byte{ + deploy1, + deploy2, + } { + err := runtime.ExecuteTransaction( + Script{ + Source: source, + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + Environment: environment, + }, + ) + require.NoError(t, err) + } + + assert.NotEmpty(t, accountCodes) + + // Call C1.hello using transaction + + loggedMessages = nil + + err := runtime.ExecuteTransaction( + Script{ + Source: []byte(` + import C1 from 0x1 + + transaction { + prepare(signer: &Account) { + C1.hello() + } + } + `), + Arguments: nil, + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + Environment: environment, + }, + ) + require.NoError(t, err) - err := runtime.ExecuteTransaction( - Script{ - Source: []byte(` - import C1 from 0x1 + assert.Equal(t, []string{`"Hello from C1!"`}, loggedMessages) - transaction { - prepare(signer: &Account) { - C1.hello() - } - } - `), - Arguments: nil, - }, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - Environment: environment, - }, - ) - require.NoError(t, err) + // Call C1.hello manually - assert.Equal(t, []string{`"Hello from C1!"`}, loggedMessages) + loggedMessages = nil - // Call C1.hello manually + _, err = runtime.InvokeContractFunction( + common.AddressLocation{ + Address: signerAddress, + Name: "C1", + }, + "hello", + nil, + nil, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + Environment: environment, + }, + ) + require.NoError(t, err) - loggedMessages = nil + assert.Equal(t, []string{`"Hello from C1!"`}, loggedMessages) - _, err = runtime.InvokeContractFunction( - common.AddressLocation{ - Address: signerAddress, - Name: "C1", - }, - "hello", - nil, - nil, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - Environment: environment, - }, - ) - require.NoError(t, err) + // Call C2.hello manually - assert.Equal(t, []string{`"Hello from C1!"`}, loggedMessages) + loggedMessages = nil - // Call C2.hello manually + _, err = runtime.InvokeContractFunction( + common.AddressLocation{ + Address: signerAddress, + Name: "C2", + }, + "hello", + nil, + nil, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + Environment: environment, + }, + ) + require.NoError(t, err) - loggedMessages = nil + assert.Equal(t, []string{`"Hello from C2!"`}, loggedMessages) - _, err = runtime.InvokeContractFunction( - common.AddressLocation{ - Address: signerAddress, - Name: "C2", - }, - "hello", - nil, - nil, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - Environment: environment, - }, - ) - require.NoError(t, err) + // Assert shared state was used, + // i.e. data was not re-read - assert.Equal(t, []string{`"Hello from C2!"`}, loggedMessages) + require.Equal(t, + expectedReads, + ledgerReads, + ) + }) + } - // Assert shared state was used, - // i.e. data was not re-read + test( + false, - require.Equal(t, []ownerKeyPair{ { owner: signerAddress[:], @@ -227,192 +250,10 @@ func TestRuntimeSharedStateV1(t *testing.T) { key: []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, }, }, - ledgerReads, ) -} - -func TestRuntimeSharedStateV2(t *testing.T) { - - t.Parallel() - - config := DefaultTestInterpreterConfig - config.StorageFormatV2Enabled = true - config.AtreeValidationEnabled = false - runtime := NewTestInterpreterRuntimeWithConfig(config) - - signerAddress := common.MustBytesToAddress([]byte{0x1}) - - deploy1 := DeploymentTransaction("C1", []byte(` - access(all) contract C1 { - access(all) fun hello() { - log("Hello from C1!") - } - } - `)) - - deploy2 := DeploymentTransaction("C2", []byte(` - access(all) contract C2 { - access(all) fun hello() { - log("Hello from C2!") - } - } - `)) - - accountCodes := map[common.Location][]byte{} - - var events []cadence.Event - var loggedMessages []string - - var interpreterState *interpreter.SharedState - - var ledgerReads []ownerKeyPair - - ledger := NewTestLedger( - func(owner, key, value []byte) { - ledgerReads = append( - ledgerReads, - ownerKeyPair{ - owner: owner, - key: key, - }, - ) - }, - nil, - ) - - runtimeInterface := &TestRuntimeInterface{ - Storage: ledger, - OnGetSigningAccounts: func() ([]Address, error) { - return []Address{signerAddress}, nil - }, - OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { - accountCodes[location] = code - return nil - }, - OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { - code = accountCodes[location] - return code, nil - }, - OnRemoveAccountContractCode: func(location common.AddressLocation) error { - delete(accountCodes, location) - return nil - }, - OnResolveLocation: MultipleIdentifierLocationResolver, - OnProgramLog: func(message string) { - loggedMessages = append(loggedMessages, message) - }, - OnEmitEvent: func(event cadence.Event) error { - events = append(events, event) - return nil - }, - OnSetInterpreterSharedState: func(state *interpreter.SharedState) { - interpreterState = state - }, - OnGetInterpreterSharedState: func() *interpreter.SharedState { - return interpreterState - }, - } - - environment := NewBaseInterpreterEnvironment(config) - - nextTransactionLocation := NewTransactionLocationGenerator() - - // Deploy contracts - - for _, source := range [][]byte{ - deploy1, - deploy2, - } { - err := runtime.ExecuteTransaction( - Script{ - Source: source, - }, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - Environment: environment, - }, - ) - require.NoError(t, err) - } - - assert.NotEmpty(t, accountCodes) - - // Call C1.hello using transaction - - loggedMessages = nil - - err := runtime.ExecuteTransaction( - Script{ - Source: []byte(` - import C1 from 0x1 - - transaction { - prepare(signer: &Account) { - C1.hello() - } - } - `), - Arguments: nil, - }, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - Environment: environment, - }, - ) - require.NoError(t, err) - - assert.Equal(t, []string{`"Hello from C1!"`}, loggedMessages) - - // Call C1.hello manually - - loggedMessages = nil - - _, err = runtime.InvokeContractFunction( - common.AddressLocation{ - Address: signerAddress, - Name: "C1", - }, - "hello", - nil, - nil, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - Environment: environment, - }, - ) - require.NoError(t, err) - - assert.Equal(t, []string{`"Hello from C1!"`}, loggedMessages) - - // Call C2.hello manually - - loggedMessages = nil - - _, err = runtime.InvokeContractFunction( - common.AddressLocation{ - Address: signerAddress, - Name: "C2", - }, - "hello", - nil, - nil, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - Environment: environment, - }, - ) - require.NoError(t, err) - - assert.Equal(t, []string{`"Hello from C2!"`}, loggedMessages) - - // Assert shared state was used, - // i.e. data was not re-read - require.Equal(t, + test( + true, []ownerKeyPair{ // Read account domain register to check if it is a migrated account // Read returns no value. @@ -475,6 +316,5 @@ func TestRuntimeSharedStateV2(t *testing.T) { key: []byte{'$', 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, }, }, - ledgerReads, ) } diff --git a/runtime/storage_test.go b/runtime/storage_test.go index 9486079ba4..3b16220127 100644 --- a/runtime/storage_test.go +++ b/runtime/storage_test.go @@ -1623,162 +1623,238 @@ func TestRuntimeResourceOwnerChange(t *testing.T) { t.Parallel() - config := DefaultTestInterpreterConfig - config.ResourceOwnerChangeHandlerEnabled = true - config.StorageFormatV2Enabled = true - runtime := NewTestInterpreterRuntimeWithConfig(config) + test := func( + storageFormatV2Enabled bool, + expectedNonEmptyKeys []string, + ) { + + name := fmt.Sprintf( + "storage format V2 enabled: %v", + storageFormatV2Enabled, + ) + t.Run(name, func(t *testing.T) { + t.Parallel() - address1 := common.MustBytesToAddress([]byte{0x1}) - address2 := common.MustBytesToAddress([]byte{0x2}) + config := DefaultTestInterpreterConfig + config.ResourceOwnerChangeHandlerEnabled = true + config.StorageFormatV2Enabled = storageFormatV2Enabled + runtime := NewTestInterpreterRuntimeWithConfig(config) - ledger := NewTestLedger(nil, nil) + address1 := common.MustBytesToAddress([]byte{0x1}) + address2 := common.MustBytesToAddress([]byte{0x2}) - var signers []Address + ledger := NewTestLedger(nil, nil) - deployTx := DeploymentTransaction("Test", []byte(` - access(all) contract Test { + var signers []Address - access(all) resource R {} + deployTx := DeploymentTransaction("Test", []byte(` + access(all) contract Test { - access(all) fun createR(): @R { - return <-create R() - } - } - `)) + access(all) resource R {} - type resourceOwnerChange struct { - uuid *interpreter.UInt64Value - typeID common.TypeID - oldAddress common.Address - newAddress common.Address - } + access(all) fun createR(): @R { + return <-create R() + } + } + `)) - accountCodes := map[Location][]byte{} - var events []cadence.Event - var loggedMessages []string - var resourceOwnerChanges []resourceOwnerChange + type resourceOwnerChange struct { + uuid *interpreter.UInt64Value + typeID common.TypeID + oldAddress common.Address + newAddress common.Address + } - runtimeInterface := &TestRuntimeInterface{ - Storage: ledger, - OnGetSigningAccounts: func() ([]Address, error) { - return signers, nil - }, - OnResolveLocation: NewSingleIdentifierLocationResolver(t), - OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { - accountCodes[location] = code - return nil - }, - OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { - code = accountCodes[location] - return code, nil - }, - OnEmitEvent: func(event cadence.Event) error { - events = append(events, event) - return nil - }, - OnProgramLog: func(message string) { - loggedMessages = append(loggedMessages, message) - }, - OnResourceOwnerChanged: func( - inter *interpreter.Interpreter, - resource *interpreter.CompositeValue, - oldAddress common.Address, - newAddress common.Address, - ) { - resourceOwnerChanges = append( - resourceOwnerChanges, - resourceOwnerChange{ - typeID: resource.TypeID(), - // TODO: provide proper location range - uuid: resource.ResourceUUID(inter, interpreter.EmptyLocationRange), - oldAddress: oldAddress, - newAddress: newAddress, + accountCodes := map[Location][]byte{} + var events []cadence.Event + var loggedMessages []string + var resourceOwnerChanges []resourceOwnerChange + + runtimeInterface := &TestRuntimeInterface{ + Storage: ledger, + OnGetSigningAccounts: func() ([]Address, error) { + return signers, nil }, - ) - }, - } + OnResolveLocation: NewSingleIdentifierLocationResolver(t), + OnUpdateAccountContractCode: func(location common.AddressLocation, code []byte) error { + accountCodes[location] = code + return nil + }, + OnGetAccountContractCode: func(location common.AddressLocation) (code []byte, err error) { + code = accountCodes[location] + return code, nil + }, + OnEmitEvent: func(event cadence.Event) error { + events = append(events, event) + return nil + }, + OnProgramLog: func(message string) { + loggedMessages = append(loggedMessages, message) + }, + OnResourceOwnerChanged: func( + inter *interpreter.Interpreter, + resource *interpreter.CompositeValue, + oldAddress common.Address, + newAddress common.Address, + ) { + resourceOwnerChanges = append( + resourceOwnerChanges, + resourceOwnerChange{ + typeID: resource.TypeID(), + // TODO: provide proper location range + uuid: resource.ResourceUUID(inter, interpreter.EmptyLocationRange), + oldAddress: oldAddress, + newAddress: newAddress, + }, + ) + }, + } - nextTransactionLocation := NewTransactionLocationGenerator() + nextTransactionLocation := NewTransactionLocationGenerator() - // Deploy contract + // Deploy contract - signers = []Address{address1} + signers = []Address{address1} - err := runtime.ExecuteTransaction( - Script{ - Source: deployTx, - }, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - }, - ) - require.NoError(t, err) + err := runtime.ExecuteTransaction( + Script{ + Source: deployTx, + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + }, + ) + require.NoError(t, err) - // Store + // Store - signers = []Address{address1} + signers = []Address{address1} - storeTx := []byte(` - import Test from 0x1 + storeTx := []byte(` + import Test from 0x1 - transaction { - prepare(signer: auth(Storage) &Account) { - signer.storage.save(<-Test.createR(), to: /storage/test) - } - } - `) + transaction { + prepare(signer: auth(Storage) &Account) { + signer.storage.save(<-Test.createR(), to: /storage/test) + } + } + `) - err = runtime.ExecuteTransaction( - Script{ - Source: storeTx, - }, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - }, - ) - require.NoError(t, err) + err = runtime.ExecuteTransaction( + Script{ + Source: storeTx, + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + }, + ) + require.NoError(t, err) - // Transfer + // Transfer - signers = []Address{address1, address2} + signers = []Address{address1, address2} - transferTx := []byte(` - import Test from 0x1 + transferTx := []byte(` + import Test from 0x1 - transaction { - prepare( - signer1: auth(Storage) &Account, - signer2: auth(Storage) &Account - ) { - let value <- signer1.storage.load<@Test.R>(from: /storage/test)! - signer2.storage.save(<-value, to: /storage/test) - } - } - `) + transaction { + prepare( + signer1: auth(Storage) &Account, + signer2: auth(Storage) &Account + ) { + let value <- signer1.storage.load<@Test.R>(from: /storage/test)! + signer2.storage.save(<-value, to: /storage/test) + } + } + `) - err = runtime.ExecuteTransaction( - Script{ - Source: transferTx, - }, - Context{ - Interface: runtimeInterface, - Location: nextTransactionLocation(), - }, - ) - require.NoError(t, err) + err = runtime.ExecuteTransaction( + Script{ + Source: transferTx, + }, + Context{ + Interface: runtimeInterface, + Location: nextTransactionLocation(), + }, + ) + require.NoError(t, err) - var nonEmptyKeys []string - for key, data := range ledger.StoredValues { - if len(data) > 0 { - nonEmptyKeys = append(nonEmptyKeys, key) - } + var actualNonEmptyKeys []string + for key, data := range ledger.StoredValues { + if len(data) > 0 { + actualNonEmptyKeys = append(actualNonEmptyKeys, key) + } + } + + sort.Strings(actualNonEmptyKeys) + + assert.Equal(t, + expectedNonEmptyKeys, + actualNonEmptyKeys, + ) + + expectedUUID := interpreter.NewUnmeteredUInt64Value(1) + assert.Equal(t, + []resourceOwnerChange{ + { + typeID: "A.0000000000000001.Test.R", + uuid: &expectedUUID, + oldAddress: common.Address{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + }, + newAddress: common.Address{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, + }, + }, + { + typeID: "A.0000000000000001.Test.R", + uuid: &expectedUUID, + oldAddress: common.Address{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, + }, + newAddress: common.Address{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + }, + }, + { + typeID: "A.0000000000000001.Test.R", + uuid: &expectedUUID, + oldAddress: common.Address{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + }, + newAddress: common.Address{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, + }, + }, + }, + resourceOwnerChanges, + ) + }) } - sort.Strings(nonEmptyKeys) + test( + false, + []string{ + // account 0x1: + // NOTE: with atree inlining, contract is inlined in contract map + // storage map (domain key + map slab) + // + contract map (domain key + map slab) + "\x00\x00\x00\x00\x00\x00\x00\x01|$\x00\x00\x00\x00\x00\x00\x00\x02", + "\x00\x00\x00\x00\x00\x00\x00\x01|$\x00\x00\x00\x00\x00\x00\x00\x04", + "\x00\x00\x00\x00\x00\x00\x00\x01|contract", + "\x00\x00\x00\x00\x00\x00\x00\x01|storage", + // account 0x2 + // NOTE: with atree inlining, resource is inlined in storage map + // storage map (domain key + map slab) + "\x00\x00\x00\x00\x00\x00\x00\x02|$\x00\x00\x00\x00\x00\x00\x00\x02", + "\x00\x00\x00\x00\x00\x00\x00\x02|storage", + }, + ) - assert.Equal(t, + test( + true, []string{ // account 0x1: // NOTE: with account storage map and atree inlining, @@ -1794,44 +1870,6 @@ func TestRuntimeResourceOwnerChange(t *testing.T) { "\x00\x00\x00\x00\x00\x00\x00\x02|$\x00\x00\x00\x00\x00\x00\x00\x02", "\x00\x00\x00\x00\x00\x00\x00\x02|stored", }, - nonEmptyKeys, - ) - - expectedUUID := interpreter.NewUnmeteredUInt64Value(1) - assert.Equal(t, - []resourceOwnerChange{ - { - typeID: "A.0000000000000001.Test.R", - uuid: &expectedUUID, - oldAddress: common.Address{ - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, - }, - newAddress: common.Address{ - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, - }, - }, - { - typeID: "A.0000000000000001.Test.R", - uuid: &expectedUUID, - oldAddress: common.Address{ - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, - }, - newAddress: common.Address{ - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, - }, - }, - { - typeID: "A.0000000000000001.Test.R", - uuid: &expectedUUID, - oldAddress: common.Address{ - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, - }, - newAddress: common.Address{ - 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2, - }, - }, - }, - resourceOwnerChanges, ) } From 3b2af1c60a43fe9e41e0b075543522677b8de3b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Fri, 15 Nov 2024 13:32:32 -0800 Subject: [PATCH 23/52] improve comment --- runtime/account_storage_v1.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/runtime/account_storage_v1.go b/runtime/account_storage_v1.go index 7ae67e3c36..0c52537726 100644 --- a/runtime/account_storage_v1.go +++ b/runtime/account_storage_v1.go @@ -32,9 +32,9 @@ type AccountStorageV1 struct { slabStorage atree.SlabStorage memoryGauge common.MemoryGauge - // newDomainStorageMapSlabIndices contains root slab index of new domain storage maps. - // The indices are saved using Ledger.SetValue() during Commit(). - // Key is StorageKey{address, accountStorageKey} and value is 8-byte slab index. + // newDomainStorageMapSlabIndices contains root slab indices of new domain storage maps. + // The indices are saved using Ledger.SetValue() during commit(). + // Key is StorageDomainKey{common.StorageDomain, Address} and value is 8-byte slab index. newDomainStorageMapSlabIndices *orderedmap.OrderedMap[interpreter.StorageDomainKey, atree.SlabIndex] } From b761a7df880b8bce671338f261d8424c98b7d51e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Fri, 15 Nov 2024 14:16:16 -0800 Subject: [PATCH 24/52] Apply suggestions from code review Co-authored-by: Faye Amacker <33205765+fxamacker@users.noreply.github.com> --- runtime/runtime_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/runtime/runtime_test.go b/runtime/runtime_test.go index 6bf269aad6..626311f678 100644 --- a/runtime/runtime_test.go +++ b/runtime/runtime_test.go @@ -5687,8 +5687,8 @@ func TestRuntimeContractWriteback(t *testing.T) { test := func( storageFormatV2Enabled bool, - expectedWrites1 []ownerKeyPair, - expectedWrites2 []ownerKeyPair, + expectedDeployTxWrites []ownerKeyPair, + expectedWriteTxWrites []ownerKeyPair, ) { name := fmt.Sprintf( @@ -5913,9 +5913,9 @@ func TestRuntimeStorageWriteback(t *testing.T) { test := func( storageFormatV2Enabled bool, - expectedWrites1 []ownerKeyPair, - expectedWrites2 []ownerKeyPair, - expectedWrites3 []ownerKeyPair, + expectedDeployTxWrites []ownerKeyPair, + expectedSaveToStorageTxWrites []ownerKeyPair, + expectedModifyStorageTxWrites []ownerKeyPair, ) { name := fmt.Sprintf( From 4f93a4af1137ff4968cc70bfadac79f66bbf100d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Fri, 15 Nov 2024 14:19:11 -0800 Subject: [PATCH 25/52] adjust names based on b761a7df880b8bce671338f261d8424c98b7d51e --- runtime/runtime_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/runtime/runtime_test.go b/runtime/runtime_test.go index 626311f678..fbcc4a0716 100644 --- a/runtime/runtime_test.go +++ b/runtime/runtime_test.go @@ -5795,7 +5795,7 @@ func TestRuntimeContractWriteback(t *testing.T) { assert.NotNil(t, accountCode) assert.Equal(t, - expectedWrites1, + expectedDeployTxWrites, writes, ) @@ -5828,7 +5828,7 @@ func TestRuntimeContractWriteback(t *testing.T) { require.NoError(t, err) assert.Equal(t, - expectedWrites2, + expectedWriteTxWrites, writes, ) @@ -6007,7 +6007,7 @@ func TestRuntimeStorageWriteback(t *testing.T) { assert.NotNil(t, accountCode) assert.Equal(t, - expectedWrites1, + expectedDeployTxWrites, writes, ) @@ -6034,7 +6034,7 @@ func TestRuntimeStorageWriteback(t *testing.T) { require.NoError(t, err) assert.Equal(t, - expectedWrites2, + expectedSaveToStorageTxWrites, writes, ) @@ -6090,7 +6090,7 @@ func TestRuntimeStorageWriteback(t *testing.T) { require.NoError(t, err) assert.Equal(t, - expectedWrites3, + expectedModifyStorageTxWrites, writes, ) }) From e827dcd41777623a0ca2b2021ca17bc7d0d7d918 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Wed, 13 Nov 2024 16:56:33 -0800 Subject: [PATCH 26/52] simplify domain register migration: only migrate one account specifically --- runtime/migrate_domain_registers.go | 127 +++++----------- runtime/migrate_domain_registers_test.go | 183 ++++------------------- runtime/storage.go | 56 ------- 3 files changed, 69 insertions(+), 297 deletions(-) diff --git a/runtime/migrate_domain_registers.go b/runtime/migrate_domain_registers.go index 2ffd388308..d5b7f7aa0a 100644 --- a/runtime/migrate_domain_registers.go +++ b/runtime/migrate_domain_registers.go @@ -22,26 +22,16 @@ import ( "github.com/onflow/atree" "github.com/onflow/cadence/common" - "github.com/onflow/cadence/common/orderedmap" "github.com/onflow/cadence/errors" "github.com/onflow/cadence/interpreter" ) -type AccountStorageMaps = *orderedmap.OrderedMap[common.Address, *interpreter.AccountStorageMap] - -type GetDomainStorageMapFunc func( - ledger atree.Ledger, - storage atree.SlabStorage, - address common.Address, - domain common.StorageDomain, -) (*interpreter.DomainStorageMap, error) - +// DomainRegisterMigration migrates domain registers to account storage maps. type DomainRegisterMigration struct { - ledger atree.Ledger - storage atree.SlabStorage - inter *interpreter.Interpreter - memoryGauge common.MemoryGauge - getDomainStorageMap GetDomainStorageMapFunc + ledger atree.Ledger + storage atree.SlabStorage + inter *interpreter.Interpreter + memoryGauge common.MemoryGauge } func NewDomainRegisterMigration( @@ -51,74 +41,30 @@ func NewDomainRegisterMigration( memoryGauge common.MemoryGauge, ) *DomainRegisterMigration { return &DomainRegisterMigration{ - ledger: ledger, - storage: storage, - inter: inter, - memoryGauge: memoryGauge, - getDomainStorageMap: getDomainStorageMapFromV1DomainRegister, + ledger: ledger, + storage: storage, + inter: inter, + memoryGauge: memoryGauge, } } -// SetGetDomainStorageMapFunc allows user to provide custom GetDomainStorageMap function. -func (m *DomainRegisterMigration) SetGetDomainStorageMapFunc( - getDomainStorageMapFunc GetDomainStorageMapFunc, -) { - m.getDomainStorageMap = getDomainStorageMapFunc -} - -// MigrateAccounts migrates given accounts. -func (m *DomainRegisterMigration) MigrateAccounts( - accounts *orderedmap.OrderedMap[common.Address, struct{}], - pred func(common.Address) bool, +func (m *DomainRegisterMigration) MigrateAccount( + address common.Address, ) ( - AccountStorageMaps, + *interpreter.AccountStorageMap, error, ) { - if accounts == nil || accounts.Len() == 0 { - return nil, nil + exists, err := hasAccountStorageMap(m.ledger, address) + if err != nil { + return nil, err } - - var migratedAccounts AccountStorageMaps - - for pair := accounts.Oldest(); pair != nil; pair = pair.Next() { - address := pair.Key - - if !pred(address) { - continue - } - - migrated, err := isMigrated(m.ledger, address) - if err != nil { - return nil, err - } - if migrated { - continue - } - - accountStorageMap, err := m.MigrateAccount(address) - if err != nil { - return nil, err - } - - if accountStorageMap == nil { - continue - } - - if migratedAccounts == nil { - migratedAccounts = &orderedmap.OrderedMap[common.Address, *interpreter.AccountStorageMap]{} - } - migratedAccounts.Set(address, accountStorageMap) + if exists { + // Account storage map already exists + return nil, nil } - return migratedAccounts, nil -} - -func (m *DomainRegisterMigration) MigrateAccount( - address common.Address, -) (*interpreter.AccountStorageMap, error) { - // Migrate existing domains - accountStorageMap, err := m.migrateDomains(address) + accountStorageMap, err := m.migrateDomainRegisters(address) if err != nil { return nil, err } @@ -128,14 +74,14 @@ func (m *DomainRegisterMigration) MigrateAccount( return nil, nil } - accountStorageMapSlabIndex := accountStorageMap.SlabID().Index() + slabIndex := accountStorageMap.SlabID().Index() // Write account register errors.WrapPanic(func() { err = m.ledger.SetValue( address[:], []byte(AccountStorageKey), - accountStorageMapSlabIndex[:], + slabIndex[:], ) }) if err != nil { @@ -145,16 +91,25 @@ func (m *DomainRegisterMigration) MigrateAccount( return accountStorageMap, nil } -// migrateDomains migrates existing domain storage maps and removes domain registers. -func (m *DomainRegisterMigration) migrateDomains( +// migrateDomainRegisters migrates all existing domain storage maps to a new account storage map, +// and removes the domain registers. +func (m *DomainRegisterMigration) migrateDomainRegisters( address common.Address, -) (*interpreter.AccountStorageMap, error) { +) ( + *interpreter.AccountStorageMap, + error, +) { var accountStorageMap *interpreter.AccountStorageMap for _, domain := range common.AllStorageDomains { - domainStorageMap, err := m.getDomainStorageMap(m.ledger, m.storage, address, domain) + domainStorageMap, err := getDomainStorageMapFromV1DomainRegister( + m.ledger, + m.storage, + address, + domain, + ) if err != nil { return nil, err } @@ -165,7 +120,11 @@ func (m *DomainRegisterMigration) migrateDomains( } if accountStorageMap == nil { - accountStorageMap = interpreter.NewAccountStorageMap(m.memoryGauge, m.storage, atree.Address(address)) + accountStorageMap = interpreter.NewAccountStorageMap( + m.memoryGauge, + m.storage, + atree.Address(address), + ) } // Migrate (insert) existing domain storage map to account storage map @@ -194,11 +153,3 @@ func (m *DomainRegisterMigration) migrateDomains( return accountStorageMap, nil } - -func isMigrated(ledger atree.Ledger, address common.Address) (bool, error) { - _, registerExists, err := getSlabIndexFromRegisterValue(ledger, address, []byte(AccountStorageKey)) - if err != nil { - return false, err - } - return registerExists, nil -} diff --git a/runtime/migrate_domain_registers_test.go b/runtime/migrate_domain_registers_test.go index 75f016748b..db1038f61e 100644 --- a/runtime/migrate_domain_registers_test.go +++ b/runtime/migrate_domain_registers_test.go @@ -31,7 +31,6 @@ import ( "github.com/onflow/atree" "github.com/onflow/cadence/common" - "github.com/onflow/cadence/common/orderedmap" "github.com/onflow/cadence/errors" "github.com/onflow/cadence/interpreter" "github.com/onflow/cadence/runtime" @@ -42,20 +41,6 @@ import ( func TestMigrateDomainRegisters(t *testing.T) { t.Parallel() - alwaysMigrate := func(common.Address) bool { - return true - } - - neverMigrate := func(common.Address) bool { - return false - } - - migrateSpecificAccount := func(addressToMigrate common.Address) func(common.Address) bool { - return func(address common.Address) bool { - return address == addressToMigrate - } - } - isAtreeRegister := func(key string) bool { return key[0] == '$' && len(key) == 9 } @@ -74,31 +59,7 @@ func TestMigrateDomainRegisters(t *testing.T) { address1 := common.MustBytesToAddress([]byte{0x1}) address2 := common.MustBytesToAddress([]byte{0x2}) - t.Run("no accounts", func(t *testing.T) { - t.Parallel() - - ledger := NewTestLedger(nil, nil) - storage := runtime.NewStorage( - ledger, - nil, - runtime.StorageConfig{ - StorageFormatV2Enabled: true, - }, - ) - - inter := NewTestInterpreterWithStorage(t, storage) - - migrator := runtime.NewDomainRegisterMigration(ledger, storage, inter, nil) - - migratedAccounts, err := migrator.MigrateAccounts(nil, alwaysMigrate) - require.NoError(t, err) - require.True(t, migratedAccounts == nil || migratedAccounts.Len() == 0) - - err = storage.FastCommit(goruntime.NumCPU()) - require.NoError(t, err) - - require.Equal(t, 0, len(ledger.StoredValues)) - }) + addresses := []common.Address{address2, address1} t.Run("accounts without domain registers", func(t *testing.T) { t.Parallel() @@ -116,15 +77,13 @@ func TestMigrateDomainRegisters(t *testing.T) { migrator := runtime.NewDomainRegisterMigration(ledger, storage, inter, nil) - accounts := &orderedmap.OrderedMap[common.Address, struct{}]{} - accounts.Set(address2, struct{}{}) - accounts.Set(address1, struct{}{}) - - migratedAccounts, err := migrator.MigrateAccounts(accounts, alwaysMigrate) - require.NoError(t, err) - require.True(t, migratedAccounts == nil || migratedAccounts.Len() == 0) + for _, address := range addresses { + accountStorageMap, err := migrator.MigrateAccount(address) + require.Nil(t, accountStorageMap) + require.NoError(t, err) + } - err = storage.FastCommit(goruntime.NumCPU()) + err := storage.FastCommit(goruntime.NumCPU()) require.NoError(t, err) require.Equal(t, 0, len(ledger.StoredValues)) @@ -162,30 +121,26 @@ func TestMigrateDomainRegisters(t *testing.T) { migrator := runtime.NewDomainRegisterMigration(ledger, storage, inter, nil) - accounts := &orderedmap.OrderedMap[common.Address, struct{}]{} - accounts.Set(address2, struct{}{}) - accounts.Set(address1, struct{}{}) - - migratedAccounts, err := migrator.MigrateAccounts(accounts, alwaysMigrate) - require.NoError(t, err) - require.NotNil(t, migratedAccounts) - require.Equal(t, accounts.Len(), migratedAccounts.Len()) - require.Equal(t, address2, migratedAccounts.Oldest().Key) - require.Equal(t, address1, migratedAccounts.Newest().Key) + var accountStorageMaps []*interpreter.AccountStorageMap + for _, address := range addresses { + accountStorageMap, err := migrator.MigrateAccount(address) + require.NotNil(t, accountStorageMap) + require.NoError(t, err) + accountStorageMaps = append(accountStorageMaps, accountStorageMap) + } - err = storage.FastCommit(goruntime.NumCPU()) + err := storage.FastCommit(goruntime.NumCPU()) require.NoError(t, err) // Check non-atree registers nonAtreeRegisters := getNonAtreeRegisters(ledger.StoredValues) - require.Equal(t, accounts.Len(), len(nonAtreeRegisters)) + require.Equal(t, len(addresses), len(nonAtreeRegisters)) require.Contains(t, nonAtreeRegisters, string(address1[:])+"|"+runtime.AccountStorageKey) require.Contains(t, nonAtreeRegisters, string(address2[:])+"|"+runtime.AccountStorageKey) // Check atree storage - expectedRootSlabIDs := make([]atree.SlabID, 0, migratedAccounts.Len()) - for pair := migratedAccounts.Oldest(); pair != nil; pair = pair.Next() { - accountStorageMap := pair.Value + expectedRootSlabIDs := make([]atree.SlabID, 0, len(accountStorageMaps)) + for _, accountStorageMap := range accountStorageMaps { expectedRootSlabIDs = append(expectedRootSlabIDs, accountStorageMap.SlabID()) } @@ -229,100 +184,22 @@ func TestMigrateDomainRegisters(t *testing.T) { migrator := runtime.NewDomainRegisterMigration(ledger, storage, inter, nil) - accounts := &orderedmap.OrderedMap[common.Address, struct{}]{} - accounts.Set(address2, struct{}{}) - accounts.Set(address1, struct{}{}) - - migratedAccounts, err := migrator.MigrateAccounts(accounts, alwaysMigrate) - require.NoError(t, err) - require.True(t, migratedAccounts == nil || migratedAccounts.Len() == 0) + for _, address := range addresses { + accountStorageMap, err := migrator.MigrateAccount(address) + require.Nil(t, accountStorageMap) + require.NoError(t, err) + } // Check account storage map data for address, accountValues := range accountsValues { - checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) - } - }) - - t.Run("never migration predicate", func(t *testing.T) { - t.Parallel() - - accountsInfo := []accountInfo{ - { - address: address1, - domains: []domainInfo{ - {domain: common.PathDomainStorage.StorageDomain(), domainStorageMapCount: 10, maxDepth: 3}, - }, - }, - { - address: address2, - domains: []domainInfo{ - {domain: common.PathDomainPublic.StorageDomain(), domainStorageMapCount: 10, maxDepth: 3}, - }, - }, - } - - ledger, _ := newTestLedgerWithUnmigratedAccounts(t, nil, nil, accountsInfo) - storage := runtime.NewStorage( - ledger, - nil, - runtime.StorageConfig{ - StorageFormatV2Enabled: true, - }, - ) - - inter := NewTestInterpreterWithStorage(t, storage) - - migrator := runtime.NewDomainRegisterMigration(ledger, storage, inter, nil) - - accounts := &orderedmap.OrderedMap[common.Address, struct{}]{} - accounts.Set(address2, struct{}{}) - accounts.Set(address1, struct{}{}) - - migratedAccounts, err := migrator.MigrateAccounts(accounts, neverMigrate) - require.NoError(t, err) - require.True(t, migratedAccounts == nil || migratedAccounts.Len() == 0) - }) - - t.Run("selective migration predicate", func(t *testing.T) { - t.Parallel() - - accountsInfo := []accountInfo{ - { - address: address1, - domains: []domainInfo{ - {domain: common.PathDomainStorage.StorageDomain(), domainStorageMapCount: 10, maxDepth: 3}, - }, - }, - { - address: address2, - domains: []domainInfo{ - {domain: common.PathDomainPublic.StorageDomain(), domainStorageMapCount: 10, maxDepth: 3}, - }, - }, + checkAccountStorageMapData( + t, + ledger.StoredValues, + ledger.StorageIndices, + address, + accountValues, + ) } - - ledger, _ := newTestLedgerWithUnmigratedAccounts(t, nil, nil, accountsInfo) - storage := runtime.NewStorage( - ledger, - nil, - runtime.StorageConfig{ - StorageFormatV2Enabled: true, - }, - ) - - inter := NewTestInterpreterWithStorage(t, storage) - - migrator := runtime.NewDomainRegisterMigration(ledger, storage, inter, nil) - - accounts := &orderedmap.OrderedMap[common.Address, struct{}]{} - accounts.Set(address2, struct{}{}) - accounts.Set(address1, struct{}{}) - - migratedAccounts, err := migrator.MigrateAccounts(accounts, migrateSpecificAccount(address2)) - require.NoError(t, err) - require.NotNil(t, migratedAccounts) - require.Equal(t, 1, migratedAccounts.Len()) - require.Equal(t, address2, migratedAccounts.Oldest().Key) }) } diff --git a/runtime/storage.go b/runtime/storage.go index 7460c8c4b2..8bd8be96ec 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -399,62 +399,6 @@ func (s *Storage) commit(inter *interpreter.Interpreter, commitContractUpdates b } } -// TODO: -//func (s *Storage) migrateAccounts(inter *interpreter.Interpreter) error { -// // Predicate function allows migration for accounts with write ops. -// migrateAccountPred := func(address common.Address) bool { -// return s.PersistentSlabStorage.HasUnsavedChanges(atree.Address(address)) -// } -// -// // getDomainStorageMap function returns cached domain storage map if it is available -// // before loading domain storage map from storage. -// // This is necessary to migrate uncommitted (new) but cached domain storage map. -// getDomainStorageMap := func( -// ledger atree.Ledger, -// storage atree.SlabStorage, -// address common.Address, -// domain common.StorageDomain, -// ) (*interpreter.DomainStorageMap, error) { -// domainStorageKey := interpreter.NewStorageDomainKey(s.memoryGauge, address, domain) -// -// // Get cached domain storage map if available. -// domainStorageMap := s.cachedDomainStorageMaps[domainStorageKey] -// -// if domainStorageMap != nil { -// return domainStorageMap, nil -// } -// -// return getDomainStorageMapFromV1DomainRegister(ledger, storage, address, domain) -// } -// -// migrator := NewDomainRegisterMigration(s.Ledger, s.PersistentSlabStorage, inter, s.memoryGauge) -// migrator.SetGetDomainStorageMapFunc(getDomainStorageMap) -// -// migratedAccounts, err := migrator.MigrateAccounts(s.unmigratedAccounts, migrateAccountPred) -// if err != nil { -// return err -// } -// -// if migratedAccounts == nil { -// return nil -// } -// -// // Update internal state with migrated accounts -// for pair := migratedAccounts.Oldest(); pair != nil; pair = pair.Next() { -// address := pair.Key -// accountStorageMap := pair.Value -// -// // Cache migrated account storage map -// accountStorageKey := interpreter.NewStorageKey(s.memoryGauge, address, AccountStorageKey) -// s.cachedAccountStorageMaps[accountStorageKey] = accountStorageMap -// -// // Remove migrated accounts from unmigratedAccounts -// s.unmigratedAccounts.Delete(address) -// } -// -// return nil -//} - func (s *Storage) CheckHealth() error { // Check slab storage health From f23eca0cb99895d6e5e5d2f868a2cb2590389339 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Thu, 14 Nov 2024 16:06:20 -0800 Subject: [PATCH 27/52] try to run manual migration --- runtime/storage.go | 28 +++++++++++++++++++++------- runtime/storage_test.go | 24 ++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 7 deletions(-) diff --git a/runtime/storage.go b/runtime/storage.go index 8bd8be96ec..c07fff17e7 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -356,7 +356,7 @@ func (s *Storage) Commit(inter *interpreter.Interpreter, commitContractUpdates b return s.commit(inter, commitContractUpdates, true) } -// NondeterministicCommit serializes and commits all values in the deltas storage +// Deprecated: NondeterministicCommit serializes and commits all values in the deltas storage // in nondeterministic order. This function is used when commit ordering isn't // required (e.g. migration programs). func (s *Storage) NondeterministicCommit(inter *interpreter.Interpreter, commitContractUpdates bool) error { @@ -381,21 +381,35 @@ func (s *Storage) commit(inter *interpreter.Interpreter, commitContractUpdates b // Commit the underlying slab storage's writes - size := s.PersistentSlabStorage.DeltasSizeWithoutTempAddresses() + slabStorage := s.PersistentSlabStorage + + return CommitSlabStorage( + slabStorage, + inter, + deterministic, + ) +} + +func CommitSlabStorage( + slabStorage *atree.PersistentSlabStorage, + inter *interpreter.Interpreter, + deterministic bool, +) error { + size := slabStorage.DeltasSizeWithoutTempAddresses() if size > 0 { inter.ReportComputation(common.ComputationKindEncodeValue, uint(size)) usage := common.NewBytesMemoryUsage(int(size)) - common.UseMemory(s.memoryGauge, usage) + common.UseMemory(inter, usage) } - deltas := s.PersistentSlabStorage.DeltasWithoutTempAddresses() - common.UseMemory(s.memoryGauge, common.NewAtreeEncodedSlabMemoryUsage(deltas)) + deltas := slabStorage.DeltasWithoutTempAddresses() + common.UseMemory(inter, common.NewAtreeEncodedSlabMemoryUsage(deltas)) // TODO: report encoding metric for all encoded slabs if deterministic { - return s.PersistentSlabStorage.FastCommit(runtime.NumCPU()) + return slabStorage.FastCommit(runtime.NumCPU()) } else { - return s.PersistentSlabStorage.NondeterministicFastCommit(runtime.NumCPU()) + return slabStorage.NondeterministicFastCommit(runtime.NumCPU()) } } diff --git a/runtime/storage_test.go b/runtime/storage_test.go index 3b16220127..066189a086 100644 --- a/runtime/storage_test.go +++ b/runtime/storage_test.go @@ -7140,6 +7140,18 @@ func TestRuntimeStorageForUnmigratedAccount(t *testing.T) { err := storage.Commit(inter, commitContractUpdates) require.NoError(t, err) + migration := NewDomainRegisterMigration(ledger, storage, inter, nil) + accountStorageMap, err := migration.MigrateAccount(address) + require.NotNil(t, accountStorageMap) + require.NoError(t, err) + + err = CommitSlabStorage( + storage.PersistentSlabStorage, + inter, + true, + ) + require.NoError(t, err) + // Create a new storage newLedger := NewTestLedgerWithData(onRead, onWrite, ledger.StoredValues, ledger.StorageIndices) @@ -8029,6 +8041,18 @@ func TestDomainRegisterMigrationForLargeAccount(t *testing.T) { // Check there are writes to underlying storage require.True(t, writeCount > 0) + migration := NewDomainRegisterMigration(ledger, storage, inter, nil) + accountStorageMap, err := migration.MigrateAccount(address) + require.NotNil(t, accountStorageMap) + require.NoError(t, err) + + err = CommitSlabStorage( + storage.PersistentSlabStorage, + inter, + true, + ) + require.NoError(t, err) + // Check there isn't any domain registers nonAtreeRegisters := make(map[string][]byte) for k, v := range ledger.StoredValues { From dfca4978aed9e78d7bede0054c571cc92ec5a954 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Thu, 14 Nov 2024 16:35:51 -0800 Subject: [PATCH 28/52] bring back logic to automatically migrate accounts with writes on commit --- runtime/account_storage_v2.go | 17 ++--- runtime/migrate_domain_registers.go | 34 ++++++--- runtime/migrate_domain_registers_test.go | 24 ++++++- runtime/storage.go | 88 +++++++++++++++++++----- runtime/storage_test.go | 24 ------- 5 files changed, 125 insertions(+), 62 deletions(-) diff --git a/runtime/account_storage_v2.go b/runtime/account_storage_v2.go index 4c76d3a4a0..8767056b96 100644 --- a/runtime/account_storage_v2.go +++ b/runtime/account_storage_v2.go @@ -33,8 +33,7 @@ type AccountStorageV2 struct { memoryGauge common.MemoryGauge // cachedAccountStorageMaps is a cache of account storage maps. - // Key is StorageKey{address, accountStorageKey} and value is account storage map. - cachedAccountStorageMaps map[interpreter.StorageKey]*interpreter.AccountStorageMap + cachedAccountStorageMaps map[common.Address]*interpreter.AccountStorageMap // newAccountStorageMapSlabIndices contains root slab index of new account storage maps. // The indices are saved using Ledger.SetValue() during Commit(). @@ -94,12 +93,10 @@ func (s *AccountStorageV2) getAccountStorageMap( ) ( accountStorageMap *interpreter.AccountStorageMap, ) { - accountStorageKey := s.accountStorageKey(address) - // Return cached account storage map if it exists. if s.cachedAccountStorageMaps != nil { - accountStorageMap = s.cachedAccountStorageMaps[accountStorageKey] + accountStorageMap = s.cachedAccountStorageMaps[address] if accountStorageMap != nil { return accountStorageMap } @@ -108,7 +105,7 @@ func (s *AccountStorageV2) getAccountStorageMap( defer func() { if accountStorageMap != nil { s.cacheAccountStorageMap( - accountStorageKey, + address, accountStorageMap, ) } @@ -130,13 +127,13 @@ func (s *AccountStorageV2) getAccountStorageMap( } func (s *AccountStorageV2) cacheAccountStorageMap( - accountStorageKey interpreter.StorageKey, + address common.Address, accountStorageMap *interpreter.AccountStorageMap, ) { if s.cachedAccountStorageMaps == nil { - s.cachedAccountStorageMaps = map[interpreter.StorageKey]*interpreter.AccountStorageMap{} + s.cachedAccountStorageMaps = map[common.Address]*interpreter.AccountStorageMap{} } - s.cachedAccountStorageMaps[accountStorageKey] = accountStorageMap + s.cachedAccountStorageMaps[address] = accountStorageMap } func (s *AccountStorageV2) storeNewAccountStorageMap( @@ -156,7 +153,7 @@ func (s *AccountStorageV2) storeNewAccountStorageMap( s.SetNewAccountStorageMapSlabIndex(accountStorageKey, slabIndex) s.cacheAccountStorageMap( - accountStorageKey, + address, accountStorageMap, ) diff --git a/runtime/migrate_domain_registers.go b/runtime/migrate_domain_registers.go index d5b7f7aa0a..08d6afa55d 100644 --- a/runtime/migrate_domain_registers.go +++ b/runtime/migrate_domain_registers.go @@ -26,12 +26,23 @@ import ( "github.com/onflow/cadence/interpreter" ) +type GetDomainStorageMapFunc func( + ledger atree.Ledger, + storage atree.SlabStorage, + address common.Address, + domain common.StorageDomain, +) ( + *interpreter.DomainStorageMap, + error, +) + // DomainRegisterMigration migrates domain registers to account storage maps. type DomainRegisterMigration struct { - ledger atree.Ledger - storage atree.SlabStorage - inter *interpreter.Interpreter - memoryGauge common.MemoryGauge + ledger atree.Ledger + storage atree.SlabStorage + inter *interpreter.Interpreter + memoryGauge common.MemoryGauge + getDomainStorageMap GetDomainStorageMapFunc } func NewDomainRegisterMigration( @@ -39,12 +50,17 @@ func NewDomainRegisterMigration( storage atree.SlabStorage, inter *interpreter.Interpreter, memoryGauge common.MemoryGauge, + getDomainStorageMap GetDomainStorageMapFunc, ) *DomainRegisterMigration { + if getDomainStorageMap == nil { + getDomainStorageMap = getDomainStorageMapFromV1DomainRegister + } return &DomainRegisterMigration{ - ledger: ledger, - storage: storage, - inter: inter, - memoryGauge: memoryGauge, + ledger: ledger, + storage: storage, + inter: inter, + memoryGauge: memoryGauge, + getDomainStorageMap: getDomainStorageMap, } } @@ -104,7 +120,7 @@ func (m *DomainRegisterMigration) migrateDomainRegisters( for _, domain := range common.AllStorageDomains { - domainStorageMap, err := getDomainStorageMapFromV1DomainRegister( + domainStorageMap, err := m.getDomainStorageMap( m.ledger, m.storage, address, diff --git a/runtime/migrate_domain_registers_test.go b/runtime/migrate_domain_registers_test.go index db1038f61e..f1d6e4304c 100644 --- a/runtime/migrate_domain_registers_test.go +++ b/runtime/migrate_domain_registers_test.go @@ -75,7 +75,13 @@ func TestMigrateDomainRegisters(t *testing.T) { inter := NewTestInterpreterWithStorage(t, storage) - migrator := runtime.NewDomainRegisterMigration(ledger, storage, inter, nil) + migrator := runtime.NewDomainRegisterMigration( + ledger, + storage, + inter, + nil, + nil, + ) for _, address := range addresses { accountStorageMap, err := migrator.MigrateAccount(address) @@ -119,7 +125,13 @@ func TestMigrateDomainRegisters(t *testing.T) { inter := NewTestInterpreterWithStorage(t, storage) - migrator := runtime.NewDomainRegisterMigration(ledger, storage, inter, nil) + migrator := runtime.NewDomainRegisterMigration( + ledger, + storage, + inter, + nil, + nil, + ) var accountStorageMaps []*interpreter.AccountStorageMap for _, address := range addresses { @@ -182,7 +194,13 @@ func TestMigrateDomainRegisters(t *testing.T) { inter := NewTestInterpreterWithStorage(t, storage) - migrator := runtime.NewDomainRegisterMigration(ledger, storage, inter, nil) + migrator := runtime.NewDomainRegisterMigration( + ledger, + storage, + inter, + nil, + nil, + ) for _, address := range addresses { accountStorageMap, err := migrator.MigrateAccount(address) diff --git a/runtime/storage.go b/runtime/storage.go index c07fff17e7..506fdf4ff2 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -49,7 +49,7 @@ type Storage struct { // cachedV1Accounts contains the cached result of determining // if the account is in storage format v1 or not. - cachedV1Accounts map[common.Address]bool + cachedV1Accounts *orderedmap.OrderedMap[common.Address, bool] // contractUpdates is a cache of contract updates. // Key is StorageKey{contract_address, contract_name} and value is contract composite value. @@ -185,7 +185,7 @@ func (s *Storage) IsV1Account(address common.Address) (isV1 bool) { if s.cachedV1Accounts != nil { var present bool - isV1, present = s.cachedV1Accounts[address] + isV1, present = s.cachedV1Accounts.Get(address) if present { return isV1 } @@ -230,9 +230,9 @@ func (s *Storage) IsV1Account(address common.Address) (isV1 bool) { func (s *Storage) cacheIsV1Account(address common.Address, isV1 bool) { if s.cachedV1Accounts == nil { - s.cachedV1Accounts = map[common.Address]bool{} + s.cachedV1Accounts = &orderedmap.OrderedMap[common.Address, bool]{} } - s.cachedV1Accounts[address] = isV1 + s.cachedV1Accounts.Set(address, isV1) } func (s *Storage) cacheDomainStorageMap( @@ -379,22 +379,17 @@ func (s *Storage) commit(inter *interpreter.Interpreter, commitContractUpdates b return err } + if s.Config.StorageFormatV2Enabled { + err = s.migrateV1AccountsToV2(inter) + if err != nil { + return err + } + } + // Commit the underlying slab storage's writes slabStorage := s.PersistentSlabStorage - return CommitSlabStorage( - slabStorage, - inter, - deterministic, - ) -} - -func CommitSlabStorage( - slabStorage *atree.PersistentSlabStorage, - inter *interpreter.Interpreter, - deterministic bool, -) error { size := slabStorage.DeltasSizeWithoutTempAddresses() if size > 0 { inter.ReportComputation(common.ComputationKindEncodeValue, uint(size)) @@ -413,6 +408,67 @@ func CommitSlabStorage( } } +func (s *Storage) migrateV1AccountsToV2(inter *interpreter.Interpreter) error { + + if s.cachedV1Accounts == nil { + return nil + } + + // getDomainStorageMap function returns cached domain storage map if it is available + // before loading domain storage map from storage. + // This is necessary to migrate uncommitted (new) but cached domain storage map. + getDomainStorageMap := func( + ledger atree.Ledger, + storage atree.SlabStorage, + address common.Address, + domain common.StorageDomain, + ) (*interpreter.DomainStorageMap, error) { + domainStorageKey := interpreter.NewStorageDomainKey(s.memoryGauge, address, domain) + + // Get cached domain storage map if available. + domainStorageMap := s.cachedDomainStorageMaps[domainStorageKey] + + if domainStorageMap != nil { + return domainStorageMap, nil + } + + return getDomainStorageMapFromV1DomainRegister(ledger, storage, address, domain) + } + + migrator := NewDomainRegisterMigration( + s.Ledger, + s.PersistentSlabStorage, + inter, + s.memoryGauge, + getDomainStorageMap, + ) + + for pair := s.cachedV1Accounts.Oldest(); pair != nil; pair = pair.Next() { + address := pair.Key + isV1 := pair.Value + + if !isV1 || !s.PersistentSlabStorage.HasUnsavedChanges(atree.Address(address)) { + continue + } + + accountStorageMap, err := migrator.MigrateAccount(address) + if err != nil { + return err + } + + // TODO: is this all that is needed? + + s.AccountStorageV2.cacheAccountStorageMap( + address, + accountStorageMap, + ) + + s.cacheIsV1Account(address, false) + } + + return nil +} + func (s *Storage) CheckHealth() error { // Check slab storage health diff --git a/runtime/storage_test.go b/runtime/storage_test.go index 066189a086..3b16220127 100644 --- a/runtime/storage_test.go +++ b/runtime/storage_test.go @@ -7140,18 +7140,6 @@ func TestRuntimeStorageForUnmigratedAccount(t *testing.T) { err := storage.Commit(inter, commitContractUpdates) require.NoError(t, err) - migration := NewDomainRegisterMigration(ledger, storage, inter, nil) - accountStorageMap, err := migration.MigrateAccount(address) - require.NotNil(t, accountStorageMap) - require.NoError(t, err) - - err = CommitSlabStorage( - storage.PersistentSlabStorage, - inter, - true, - ) - require.NoError(t, err) - // Create a new storage newLedger := NewTestLedgerWithData(onRead, onWrite, ledger.StoredValues, ledger.StorageIndices) @@ -8041,18 +8029,6 @@ func TestDomainRegisterMigrationForLargeAccount(t *testing.T) { // Check there are writes to underlying storage require.True(t, writeCount > 0) - migration := NewDomainRegisterMigration(ledger, storage, inter, nil) - accountStorageMap, err := migration.MigrateAccount(address) - require.NotNil(t, accountStorageMap) - require.NoError(t, err) - - err = CommitSlabStorage( - storage.PersistentSlabStorage, - inter, - true, - ) - require.NoError(t, err) - // Check there isn't any domain registers nonAtreeRegisters := make(map[string][]byte) for k, v := range ledger.StoredValues { From 16130ed936b181f9ea3f14813d95ba8f1daf0022 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Thu, 14 Nov 2024 19:04:07 -0600 Subject: [PATCH 29/52] Fix storage test --- runtime/storage_test.go | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/runtime/storage_test.go b/runtime/storage_test.go index 3b16220127..d01458284a 100644 --- a/runtime/storage_test.go +++ b/runtime/storage_test.go @@ -7102,7 +7102,7 @@ func TestRuntimeStorageForUnmigratedAccount(t *testing.T) { ledger, nil, StorageConfig{ - StorageFormatV2Enabled: true, + StorageFormatV2Enabled: false, }, ) @@ -7353,12 +7353,28 @@ func TestRuntimeStorageForUnmigratedAccount(t *testing.T) { require.True(t, len(writeEntries) > 1+len(tc.existingDomains)+len(tc.newDomains)) i := 0 + + // Check new domain register committed in V1 format. + for _, domain := range common.AllStorageDomains { + + if slices.Contains(tc.newDomains, domain) { + + // New domains are committed in V1 format (with domain register). + require.Equal(t, address[:], writeEntries[i].Owner) + require.Equal(t, []byte(domain.Identifier()), writeEntries[i].Key) + require.True(t, len(writeEntries[i].Value) > 0) + + i++ + } + } + + // Check modified registers in migration. for _, domain := range common.AllStorageDomains { if slices.Contains(tc.existingDomains, domain) || slices.Contains(tc.newDomains, domain) { - // Existing and new domain registers are removed. + // Existing and new domain registers are removed (migrated). // Removing new (non-existent) domain registers is no-op. require.Equal(t, address[:], writeEntries[i].Owner) require.Equal(t, []byte(domain.Identifier()), writeEntries[i].Key) From d3c76d5311e7a78dea6d3637df46d3dc22f8cf7d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Fri, 15 Nov 2024 10:21:27 -0800 Subject: [PATCH 30/52] allow scheduling migrations of accounts to V2 --- common/address.go | 5 ++++ runtime/storage.go | 54 +++++++++++++++++++++++++++-------------- runtime/storage_test.go | 14 +++++++++++ 3 files changed, 55 insertions(+), 18 deletions(-) diff --git a/common/address.go b/common/address.go index 5c1a354f59..04c2bade70 100644 --- a/common/address.go +++ b/common/address.go @@ -19,6 +19,7 @@ package common import ( + "bytes" "encoding/hex" goErrors "errors" "fmt" @@ -112,6 +113,10 @@ func (a Address) HexWithPrefix() string { return fmt.Sprintf("0x%x", [AddressLength]byte(a)) } +func (a Address) Compare(other Address) int { + return bytes.Compare(a[:], other[:]) +} + // HexToAddress converts a hex string to an Address after // ensuring that the hex string starts with the prefix 0x. func HexToAddressAssertPrefix(h string) (Address, error) { diff --git a/runtime/storage.go b/runtime/storage.go index 506fdf4ff2..843c3e9e04 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -49,7 +49,7 @@ type Storage struct { // cachedV1Accounts contains the cached result of determining // if the account is in storage format v1 or not. - cachedV1Accounts *orderedmap.OrderedMap[common.Address, bool] + cachedV1Accounts map[common.Address]bool // contractUpdates is a cache of contract updates. // Key is StorageKey{contract_address, contract_name} and value is contract composite value. @@ -61,8 +61,9 @@ type Storage struct { Config StorageConfig - AccountStorageV1 *AccountStorageV1 - AccountStorageV2 *AccountStorageV2 + AccountStorageV1 *AccountStorageV1 + AccountStorageV2 *AccountStorageV2 + scheduledV2Migrations []common.Address } var _ atree.SlabStorage = &Storage{} @@ -183,12 +184,8 @@ func (s *Storage) IsV1Account(address common.Address) (isV1 bool) { // Check cache - if s.cachedV1Accounts != nil { - var present bool - isV1, present = s.cachedV1Accounts.Get(address) - if present { - return isV1 - } + if isV1, present := s.cachedV1Accounts[address]; present { + return isV1 } // Cache result @@ -230,9 +227,9 @@ func (s *Storage) IsV1Account(address common.Address) (isV1 bool) { func (s *Storage) cacheIsV1Account(address common.Address, isV1 bool) { if s.cachedV1Accounts == nil { - s.cachedV1Accounts = &orderedmap.OrderedMap[common.Address, bool]{} + s.cachedV1Accounts = map[common.Address]bool{} } - s.cachedV1Accounts.Set(address, isV1) + s.cachedV1Accounts[address] = isV1 } func (s *Storage) cacheDomainStorageMap( @@ -408,9 +405,23 @@ func (s *Storage) commit(inter *interpreter.Interpreter, commitContractUpdates b } } +func (s *Storage) ScheduleV2Migration(address common.Address) { + s.scheduledV2Migrations = append(s.scheduledV2Migrations, address) +} + +func (s *Storage) ScheduleV2MigrationForModifiedAccounts() { + for address, isV1 := range s.cachedV1Accounts { //nolint:maprange + if !isV1 || !s.PersistentSlabStorage.HasUnsavedChanges(atree.Address(address)) { + continue + } + + s.ScheduleV2Migration(address) + } +} + func (s *Storage) migrateV1AccountsToV2(inter *interpreter.Interpreter) error { - if s.cachedV1Accounts == nil { + if len(s.scheduledV2Migrations) == 0 { return nil } @@ -443,13 +454,18 @@ func (s *Storage) migrateV1AccountsToV2(inter *interpreter.Interpreter) error { getDomainStorageMap, ) - for pair := s.cachedV1Accounts.Oldest(); pair != nil; pair = pair.Next() { - address := pair.Key - isV1 := pair.Value + // Ensure the scheduled accounts are migrated in a deterministic order - if !isV1 || !s.PersistentSlabStorage.HasUnsavedChanges(atree.Address(address)) { - continue - } + sort.Slice( + s.scheduledV2Migrations, + func(i, j int) bool { + address1 := s.scheduledV2Migrations[i] + address2 := s.scheduledV2Migrations[j] + return address1.Compare(address2) < 0 + }, + ) + + for _, address := range s.scheduledV2Migrations { accountStorageMap, err := migrator.MigrateAccount(address) if err != nil { @@ -466,6 +482,8 @@ func (s *Storage) migrateV1AccountsToV2(inter *interpreter.Interpreter) error { s.cacheIsV1Account(address, false) } + s.scheduledV2Migrations = nil + return nil } diff --git a/runtime/storage_test.go b/runtime/storage_test.go index d01458284a..3ebe7d88fe 100644 --- a/runtime/storage_test.go +++ b/runtime/storage_test.go @@ -7181,6 +7181,8 @@ func TestRuntimeStorageForUnmigratedAccount(t *testing.T) { domainStorageMap := storage.GetDomainStorageMap(inter, address, nonExistingDomain, createIfNotExists) require.Nil(t, domainStorageMap) + storage.ScheduleV2MigrationForModifiedAccounts() + // Commit changes const commitContractUpdates = false err := storage.Commit(inter, commitContractUpdates) @@ -7335,6 +7337,9 @@ func TestRuntimeStorageForUnmigratedAccount(t *testing.T) { accountValues[domain] = writeToDomainStorageMap(inter, domainStorageMap, tc.newDomainStorageMapCount, random) } + // TODO: + storage.ScheduleV2MigrationForModifiedAccounts() + // Commit changes const commitContractUpdates = false err := storage.Commit(inter, commitContractUpdates) @@ -7480,6 +7485,9 @@ func TestRuntimeStorageForUnmigratedAccount(t *testing.T) { } } + // TODO: + storage.ScheduleV2MigrationForModifiedAccounts() + // Commit changes const commitContractUpdates = false err := storage.Commit(inter, commitContractUpdates) @@ -7610,6 +7618,9 @@ func TestRuntimeStorageForUnmigratedAccount(t *testing.T) { } } + // TODO: + storage.ScheduleV2MigrationForModifiedAccounts() + // Commit changes const commitContractUpdates = false err := storage.Commit(inter, commitContractUpdates) @@ -8037,6 +8048,9 @@ func TestDomainRegisterMigrationForLargeAccount(t *testing.T) { accountValues[domain] = make(domainStorageMapValues) + // TODO: + storage.ScheduleV2MigrationForModifiedAccounts() + // Commit changes const commitContractUpdates = false err := storage.Commit(inter, commitContractUpdates) From 6d954443c88363d658aceaf0305a721581224d97 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Fri, 15 Nov 2024 14:12:56 -0800 Subject: [PATCH 31/52] simplify ordered maps to simple maps. sort keys on commit --- interpreter/storage.go | 14 +++++++ runtime/account_storage_v1.go | 47 +++++++++++++++++++---- runtime/account_storage_v2.go | 70 +++++++++++++++++++++++------------ runtime/storage_test.go | 7 +--- 4 files changed, 101 insertions(+), 37 deletions(-) diff --git a/interpreter/storage.go b/interpreter/storage.go index 3b0951efce..1927cdfa0d 100644 --- a/interpreter/storage.go +++ b/interpreter/storage.go @@ -20,6 +20,7 @@ package interpreter import ( "bytes" + "cmp" "io" "math" "strings" @@ -106,6 +107,19 @@ type StorageDomainKey struct { Address common.Address } +func (k StorageDomainKey) Compare(o StorageDomainKey) int { + switch bytes.Compare(k.Address[:], o.Address[:]) { + case -1: + return -1 + case 0: + return cmp.Compare(k.Domain, o.Domain) + case 1: + return 1 + default: + panic(errors.NewUnreachableError()) + } +} + func NewStorageDomainKey( memoryGauge common.MemoryGauge, address common.Address, diff --git a/runtime/account_storage_v1.go b/runtime/account_storage_v1.go index 0c52537726..a0ababd3ce 100644 --- a/runtime/account_storage_v1.go +++ b/runtime/account_storage_v1.go @@ -19,10 +19,11 @@ package runtime import ( + "sort" + "github.com/onflow/atree" "github.com/onflow/cadence/common" - "github.com/onflow/cadence/common/orderedmap" "github.com/onflow/cadence/errors" "github.com/onflow/cadence/interpreter" ) @@ -35,7 +36,7 @@ type AccountStorageV1 struct { // newDomainStorageMapSlabIndices contains root slab indices of new domain storage maps. // The indices are saved using Ledger.SetValue() during commit(). // Key is StorageDomainKey{common.StorageDomain, Address} and value is 8-byte slab index. - newDomainStorageMapSlabIndices *orderedmap.OrderedMap[interpreter.StorageDomainKey, atree.SlabIndex] + newDomainStorageMapSlabIndices map[interpreter.StorageDomainKey]atree.SlabIndex } func NewAccountStorageV1( @@ -91,9 +92,9 @@ func (s *AccountStorageV1) storeNewDomainStorageMap( storageKey := interpreter.NewStorageDomainKey(s.memoryGauge, address, domain) if s.newDomainStorageMapSlabIndices == nil { - s.newDomainStorageMapSlabIndices = &orderedmap.OrderedMap[interpreter.StorageDomainKey, atree.SlabIndex]{} + s.newDomainStorageMapSlabIndices = map[interpreter.StorageDomainKey]atree.SlabIndex{} } - s.newDomainStorageMapSlabIndices.Set(storageKey, slabIndex) + s.newDomainStorageMapSlabIndices[storageKey] = slabIndex return domainStorageMap } @@ -103,13 +104,41 @@ func (s *AccountStorageV1) commit() error { return nil } - for pair := s.newDomainStorageMapSlabIndices.Oldest(); pair != nil; pair = pair.Next() { + type domainStorageMapSlabIndex struct { + StorageDomainKey interpreter.StorageDomainKey + SlabIndex atree.SlabIndex + } + + slabIndices := make([]domainStorageMapSlabIndex, 0, len(s.newDomainStorageMapSlabIndices)) + for storageDomainKey, slabIndex := range s.newDomainStorageMapSlabIndices { //nolint:maprange + slabIndices = append( + slabIndices, + domainStorageMapSlabIndex{ + StorageDomainKey: storageDomainKey, + SlabIndex: slabIndex, + }, + ) + } + sort.Slice( + slabIndices, + func(i, j int) bool { + slabIndex1 := slabIndices[i] + slabIndex2 := slabIndices[j] + domainKey1 := slabIndex1.StorageDomainKey + domainKey2 := slabIndex2.StorageDomainKey + return domainKey1.Compare(domainKey2) < 0 + }, + ) + + for _, slabIndex := range slabIndices { + storageDomainKey := slabIndex.StorageDomainKey + var err error errors.WrapPanic(func() { err = s.ledger.SetValue( - pair.Key.Address[:], - []byte(pair.Key.Domain.Identifier()), - pair.Value[:], + storageDomainKey.Address[:], + []byte(storageDomainKey.Domain.Identifier()), + slabIndex.SlabIndex[:], ) }) if err != nil { @@ -117,6 +146,8 @@ func (s *AccountStorageV1) commit() error { } } + s.newDomainStorageMapSlabIndices = nil + return nil } diff --git a/runtime/account_storage_v2.go b/runtime/account_storage_v2.go index 8767056b96..9e737053de 100644 --- a/runtime/account_storage_v2.go +++ b/runtime/account_storage_v2.go @@ -19,10 +19,11 @@ package runtime import ( + "sort" + "github.com/onflow/atree" "github.com/onflow/cadence/common" - "github.com/onflow/cadence/common/orderedmap" "github.com/onflow/cadence/errors" "github.com/onflow/cadence/interpreter" ) @@ -35,10 +36,9 @@ type AccountStorageV2 struct { // cachedAccountStorageMaps is a cache of account storage maps. cachedAccountStorageMaps map[common.Address]*interpreter.AccountStorageMap - // newAccountStorageMapSlabIndices contains root slab index of new account storage maps. - // The indices are saved using Ledger.SetValue() during Commit(). - // Key is StorageKey{address, accountStorageKey} and value is 8-byte slab index. - newAccountStorageMapSlabIndices *orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex] + // newAccountStorageMapSlabIndices contains root slab indices of new account storage maps. + // The indices are saved using Ledger.SetValue() during commit(). + newAccountStorageMapSlabIndices map[common.Address]atree.SlabIndex } func NewAccountStorageV2( @@ -53,14 +53,6 @@ func NewAccountStorageV2( } } -func (s *AccountStorageV2) accountStorageKey(address common.Address) interpreter.StorageKey { - return interpreter.NewStorageKey( - s.memoryGauge, - address, - AccountStorageKey, - ) -} - func (s *AccountStorageV2) GetDomainStorageMap( inter *interpreter.Interpreter, address common.Address, @@ -148,9 +140,10 @@ func (s *AccountStorageV2) storeNewAccountStorageMap( slabIndex := accountStorageMap.SlabID().Index() - accountStorageKey := s.accountStorageKey(address) - - s.SetNewAccountStorageMapSlabIndex(accountStorageKey, slabIndex) + s.SetNewAccountStorageMapSlabIndex( + address, + slabIndex, + ) s.cacheAccountStorageMap( address, @@ -161,13 +154,13 @@ func (s *AccountStorageV2) storeNewAccountStorageMap( } func (s *AccountStorageV2) SetNewAccountStorageMapSlabIndex( - accountStorageKey interpreter.StorageKey, + address common.Address, slabIndex atree.SlabIndex, ) { if s.newAccountStorageMapSlabIndices == nil { - s.newAccountStorageMapSlabIndices = &orderedmap.OrderedMap[interpreter.StorageKey, atree.SlabIndex]{} + s.newAccountStorageMapSlabIndices = map[common.Address]atree.SlabIndex{} } - s.newAccountStorageMapSlabIndices.Set(accountStorageKey, slabIndex) + s.newAccountStorageMapSlabIndices[address] = slabIndex } func (s *AccountStorageV2) commit() error { @@ -175,13 +168,42 @@ func (s *AccountStorageV2) commit() error { return nil } - for pair := s.newAccountStorageMapSlabIndices.Oldest(); pair != nil; pair = pair.Next() { + type accountStorageMapSlabIndex struct { + Address common.Address + SlabIndex atree.SlabIndex + } + + slabIndices := make([]accountStorageMapSlabIndex, 0, len(s.newAccountStorageMapSlabIndices)) + for address, slabIndex := range s.newAccountStorageMapSlabIndices { //nolint:maprange + slabIndices = append( + slabIndices, + accountStorageMapSlabIndex{ + Address: address, + SlabIndex: slabIndex, + }, + ) + } + sort.Slice( + slabIndices, + func(i, j int) bool { + slabIndex1 := slabIndices[i] + slabIndex2 := slabIndices[j] + address1 := slabIndex1.Address + address2 := slabIndex2.Address + return address1.Compare(address2) < 0 + }, + ) + + storageKey := []byte(AccountStorageKey) + + for _, slabIndex := range slabIndices { + var err error errors.WrapPanic(func() { err = s.ledger.SetValue( - pair.Key.Address[:], - []byte(pair.Key.Key), - pair.Value[:], + slabIndex.Address[:], + storageKey, + slabIndex.SlabIndex[:], ) }) if err != nil { @@ -189,6 +211,8 @@ func (s *AccountStorageV2) commit() error { } } + s.newAccountStorageMapSlabIndices = nil + return nil } diff --git a/runtime/storage_test.go b/runtime/storage_test.go index 3ebe7d88fe..4e4dc261bc 100644 --- a/runtime/storage_test.go +++ b/runtime/storage_test.go @@ -68,15 +68,10 @@ func withWritesToStorage( var address common.Address random.Read(address[:]) - storageKey := interpreter.StorageKey{ - Address: address, - Key: AccountStorageKey, - } - var slabIndex atree.SlabIndex binary.BigEndian.PutUint32(slabIndex[:], randomIndex) - storage.AccountStorageV2.SetNewAccountStorageMapSlabIndex(storageKey, slabIndex) + storage.AccountStorageV2.SetNewAccountStorageMapSlabIndex(address, slabIndex) } handler(storage, inter) From 95933355022d1c1c4402a823135f01523eef6c12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Fri, 15 Nov 2024 16:48:17 -0800 Subject: [PATCH 32/52] optimize commit for single map --- runtime/account_storage_v1.go | 108 +++++++++++++++++++++++----------- runtime/account_storage_v2.go | 108 ++++++++++++++++++++++------------ 2 files changed, 145 insertions(+), 71 deletions(-) diff --git a/runtime/account_storage_v1.go b/runtime/account_storage_v1.go index a0ababd3ce..76ff904d5d 100644 --- a/runtime/account_storage_v1.go +++ b/runtime/account_storage_v1.go @@ -100,49 +100,69 @@ func (s *AccountStorageV1) storeNewDomainStorageMap( } func (s *AccountStorageV1) commit() error { - if s.newDomainStorageMapSlabIndices == nil { + + switch len(s.newDomainStorageMapSlabIndices) { + case 0: + // Nothing to commit. return nil - } - type domainStorageMapSlabIndex struct { - StorageDomainKey interpreter.StorageDomainKey - SlabIndex atree.SlabIndex - } + case 1: + // Optimize for the common case of a single domain storage map. + + var updated int + for storageDomainKey, slabIndex := range s.newDomainStorageMapSlabIndices { //nolint:maprange + if updated > 0 { + panic(errors.NewUnreachableError()) + } + + err := s.writeStorageDomainSlabIndex( + storageDomainKey, + slabIndex, + ) + if err != nil { + return err + } + + updated++ + } + + default: + // Sort the indices to ensure deterministic order + + type domainStorageMapSlabIndex struct { + StorageDomainKey interpreter.StorageDomainKey + SlabIndex atree.SlabIndex + } - slabIndices := make([]domainStorageMapSlabIndex, 0, len(s.newDomainStorageMapSlabIndices)) - for storageDomainKey, slabIndex := range s.newDomainStorageMapSlabIndices { //nolint:maprange - slabIndices = append( + slabIndices := make([]domainStorageMapSlabIndex, 0, len(s.newDomainStorageMapSlabIndices)) + for storageDomainKey, slabIndex := range s.newDomainStorageMapSlabIndices { //nolint:maprange + slabIndices = append( + slabIndices, + domainStorageMapSlabIndex{ + StorageDomainKey: storageDomainKey, + SlabIndex: slabIndex, + }, + ) + } + sort.Slice( slabIndices, - domainStorageMapSlabIndex{ - StorageDomainKey: storageDomainKey, - SlabIndex: slabIndex, + func(i, j int) bool { + slabIndex1 := slabIndices[i] + slabIndex2 := slabIndices[j] + domainKey1 := slabIndex1.StorageDomainKey + domainKey2 := slabIndex2.StorageDomainKey + return domainKey1.Compare(domainKey2) < 0 }, ) - } - sort.Slice( - slabIndices, - func(i, j int) bool { - slabIndex1 := slabIndices[i] - slabIndex2 := slabIndices[j] - domainKey1 := slabIndex1.StorageDomainKey - domainKey2 := slabIndex2.StorageDomainKey - return domainKey1.Compare(domainKey2) < 0 - }, - ) - - for _, slabIndex := range slabIndices { - storageDomainKey := slabIndex.StorageDomainKey - var err error - errors.WrapPanic(func() { - err = s.ledger.SetValue( - storageDomainKey.Address[:], - []byte(storageDomainKey.Domain.Identifier()), - slabIndex.SlabIndex[:], + for _, slabIndex := range slabIndices { + err := s.writeStorageDomainSlabIndex( + slabIndex.StorageDomainKey, + slabIndex.SlabIndex, ) - }) - if err != nil { - return interpreter.WrappedExternalError(err) + if err != nil { + return err + } } } @@ -151,6 +171,24 @@ func (s *AccountStorageV1) commit() error { return nil } +func (s *AccountStorageV1) writeStorageDomainSlabIndex( + storageDomainKey interpreter.StorageDomainKey, + slabIndex atree.SlabIndex, +) error { + var err error + errors.WrapPanic(func() { + err = s.ledger.SetValue( + storageDomainKey.Address[:], + []byte(storageDomainKey.Domain.Identifier()), + slabIndex[:], + ) + }) + if err != nil { + return interpreter.WrappedExternalError(err) + } + return nil +} + // getDomainStorageMapFromV1DomainRegister returns domain storage map from legacy domain register. func getDomainStorageMapFromV1DomainRegister( ledger atree.Ledger, diff --git a/runtime/account_storage_v2.go b/runtime/account_storage_v2.go index 9e737053de..8f2330fe10 100644 --- a/runtime/account_storage_v2.go +++ b/runtime/account_storage_v2.go @@ -164,50 +164,68 @@ func (s *AccountStorageV2) SetNewAccountStorageMapSlabIndex( } func (s *AccountStorageV2) commit() error { - if s.newAccountStorageMapSlabIndices == nil { + switch len(s.newAccountStorageMapSlabIndices) { + case 0: + // Nothing to commit. return nil - } - type accountStorageMapSlabIndex struct { - Address common.Address - SlabIndex atree.SlabIndex - } + case 1: + // Optimize for the common case of a single account storage map. + + var updated int + for address, slabIndex := range s.newAccountStorageMapSlabIndices { //nolint:maprange + if updated > 0 { + panic(errors.NewUnreachableError()) + } + + err := s.writeAccountStorageSlabIndex( + address, + slabIndex, + ) + if err != nil { + return err + } - slabIndices := make([]accountStorageMapSlabIndex, 0, len(s.newAccountStorageMapSlabIndices)) - for address, slabIndex := range s.newAccountStorageMapSlabIndices { //nolint:maprange - slabIndices = append( + updated++ + } + + default: + // Sort the indices to ensure deterministic order + + type accountStorageMapSlabIndex struct { + Address common.Address + SlabIndex atree.SlabIndex + } + + slabIndices := make([]accountStorageMapSlabIndex, 0, len(s.newAccountStorageMapSlabIndices)) + for address, slabIndex := range s.newAccountStorageMapSlabIndices { //nolint:maprange + slabIndices = append( + slabIndices, + accountStorageMapSlabIndex{ + Address: address, + SlabIndex: slabIndex, + }, + ) + } + sort.Slice( slabIndices, - accountStorageMapSlabIndex{ - Address: address, - SlabIndex: slabIndex, + func(i, j int) bool { + slabIndex1 := slabIndices[i] + slabIndex2 := slabIndices[j] + address1 := slabIndex1.Address + address2 := slabIndex2.Address + return address1.Compare(address2) < 0 }, ) - } - sort.Slice( - slabIndices, - func(i, j int) bool { - slabIndex1 := slabIndices[i] - slabIndex2 := slabIndices[j] - address1 := slabIndex1.Address - address2 := slabIndex2.Address - return address1.Compare(address2) < 0 - }, - ) - - storageKey := []byte(AccountStorageKey) - - for _, slabIndex := range slabIndices { - var err error - errors.WrapPanic(func() { - err = s.ledger.SetValue( - slabIndex.Address[:], - storageKey, - slabIndex.SlabIndex[:], + for _, slabIndex := range slabIndices { + err := s.writeAccountStorageSlabIndex( + slabIndex.Address, + slabIndex.SlabIndex, ) - }) - if err != nil { - return interpreter.WrappedExternalError(err) + if err != nil { + return err + } } } @@ -216,6 +234,24 @@ func (s *AccountStorageV2) commit() error { return nil } +func (s *AccountStorageV2) writeAccountStorageSlabIndex( + address common.Address, + slabIndex atree.SlabIndex, +) error { + var err error + errors.WrapPanic(func() { + err = s.ledger.SetValue( + address[:], + []byte(AccountStorageKey), + slabIndex[:], + ) + }) + if err != nil { + return interpreter.WrappedExternalError(err) + } + return nil +} + func getAccountStorageSlabIndex( ledger atree.Ledger, address common.Address, From 0dae0600bef19f3f98a81d63bb6833ce0573ca37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Fri, 15 Nov 2024 16:53:02 -0800 Subject: [PATCH 33/52] gate initializaton and use of account storage V2 behind feature flag --- runtime/storage.go | 40 +++++++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/runtime/storage.go b/runtime/storage.go index 843c3e9e04..fb6baabb9f 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -108,11 +108,15 @@ func NewStorage( persistentSlabStorage, memoryGauge, ) - accountStorageV2 := NewAccountStorageV2( - ledger, - persistentSlabStorage, - memoryGauge, - ) + + var accountStorageV2 *AccountStorageV2 + if config.StorageFormatV2Enabled { + accountStorageV2 = NewAccountStorageV2( + ledger, + persistentSlabStorage, + memoryGauge, + ) + } return &Storage{ Ledger: ledger, @@ -371,12 +375,12 @@ func (s *Storage) commit(inter *interpreter.Interpreter, commitContractUpdates b return err } - err = s.AccountStorageV2.commit() - if err != nil { - return err - } - if s.Config.StorageFormatV2Enabled { + err = s.AccountStorageV2.commit() + if err != nil { + return err + } + err = s.migrateV1AccountsToV2(inter) if err != nil { return err @@ -421,6 +425,10 @@ func (s *Storage) ScheduleV2MigrationForModifiedAccounts() { func (s *Storage) migrateV1AccountsToV2(inter *interpreter.Interpreter) error { + if !s.Config.StorageFormatV2Enabled { + return errors.NewUnexpectedError("cannot migrate to storage format v2, as it is not enabled") + } + if len(s.scheduledV2Migrations) == 0 { return nil } @@ -513,11 +521,13 @@ func (s *Storage) CheckHealth() error { var storageMapStorageIDs []atree.SlabID - // Get cached account storage map slab IDs. - storageMapStorageIDs = append( - storageMapStorageIDs, - s.AccountStorageV2.cachedRootSlabIDs()..., - ) + if s.Config.StorageFormatV2Enabled { + // Get cached account storage map slab IDs. + storageMapStorageIDs = append( + storageMapStorageIDs, + s.AccountStorageV2.cachedRootSlabIDs()..., + ) + } // Get slab IDs of cached domain storage maps that are in account storage format v1. for storageKey, storageMap := range s.cachedDomainStorageMaps { //nolint:maprange From 3fa8ad1274eed90a40f9c13db09854e3cbba6917 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Fri, 15 Nov 2024 16:54:24 -0800 Subject: [PATCH 34/52] simplify logic Co-authored-by: Faye Amacker <33205765+fxamacker@users.noreply.github.com> --- runtime/storage.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/runtime/storage.go b/runtime/storage.go index fb6baabb9f..cf58e569c6 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -415,8 +415,9 @@ func (s *Storage) ScheduleV2Migration(address common.Address) { func (s *Storage) ScheduleV2MigrationForModifiedAccounts() { for address, isV1 := range s.cachedV1Accounts { //nolint:maprange - if !isV1 || !s.PersistentSlabStorage.HasUnsavedChanges(atree.Address(address)) { - continue + if isV1 && s.PersistentSlabStorage.HasUnsavedChanges(atree.Address(address)) { + s.ScheduleV2Migration(address) + } } s.ScheduleV2Migration(address) From c7b02e311272b5652609f65ffa506e707c008d97 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Fri, 15 Nov 2024 16:54:46 -0800 Subject: [PATCH 35/52] remove TODO Co-authored-by: Faye Amacker <33205765+fxamacker@users.noreply.github.com> --- runtime/storage.go | 1 - 1 file changed, 1 deletion(-) diff --git a/runtime/storage.go b/runtime/storage.go index cf58e569c6..40a994d301 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -481,7 +481,6 @@ func (s *Storage) migrateV1AccountsToV2(inter *interpreter.Interpreter) error { return err } - // TODO: is this all that is needed? s.AccountStorageV2.cacheAccountStorageMap( address, From 29aea54690005bdeaddbbfd03cdb2179cd7e87e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Mon, 18 Nov 2024 09:22:38 -0800 Subject: [PATCH 36/52] fix suggestion --- runtime/storage.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/runtime/storage.go b/runtime/storage.go index 40a994d301..ef2c5cf14f 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -416,11 +416,8 @@ func (s *Storage) ScheduleV2Migration(address common.Address) { func (s *Storage) ScheduleV2MigrationForModifiedAccounts() { for address, isV1 := range s.cachedV1Accounts { //nolint:maprange if isV1 && s.PersistentSlabStorage.HasUnsavedChanges(atree.Address(address)) { - s.ScheduleV2Migration(address) + s.ScheduleV2Migration(address) } - } - - s.ScheduleV2Migration(address) } } @@ -481,7 +478,6 @@ func (s *Storage) migrateV1AccountsToV2(inter *interpreter.Interpreter) error { return err } - s.AccountStorageV2.cacheAccountStorageMap( address, accountStorageMap, From 4addf32051aa9cf9f39e18a3d4036e6e9af1d315 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Mon, 18 Nov 2024 13:58:11 -0800 Subject: [PATCH 37/52] refactor slab index writing --- runtime/account_storage_v1.go | 18 ++++++------------ runtime/account_storage_v2.go | 18 ++++++------------ runtime/storage.go | 20 ++++++++++++++++++++ 3 files changed, 32 insertions(+), 24 deletions(-) diff --git a/runtime/account_storage_v1.go b/runtime/account_storage_v1.go index 76ff904d5d..fe1419a356 100644 --- a/runtime/account_storage_v1.go +++ b/runtime/account_storage_v1.go @@ -175,18 +175,12 @@ func (s *AccountStorageV1) writeStorageDomainSlabIndex( storageDomainKey interpreter.StorageDomainKey, slabIndex atree.SlabIndex, ) error { - var err error - errors.WrapPanic(func() { - err = s.ledger.SetValue( - storageDomainKey.Address[:], - []byte(storageDomainKey.Domain.Identifier()), - slabIndex[:], - ) - }) - if err != nil { - return interpreter.WrappedExternalError(err) - } - return nil + return writeSlabIndex( + s.ledger, + storageDomainKey.Address, + []byte(storageDomainKey.Domain.Identifier()), + slabIndex, + ) } // getDomainStorageMapFromV1DomainRegister returns domain storage map from legacy domain register. diff --git a/runtime/account_storage_v2.go b/runtime/account_storage_v2.go index 8f2330fe10..32619ac452 100644 --- a/runtime/account_storage_v2.go +++ b/runtime/account_storage_v2.go @@ -238,18 +238,12 @@ func (s *AccountStorageV2) writeAccountStorageSlabIndex( address common.Address, slabIndex atree.SlabIndex, ) error { - var err error - errors.WrapPanic(func() { - err = s.ledger.SetValue( - address[:], - []byte(AccountStorageKey), - slabIndex[:], - ) - }) - if err != nil { - return interpreter.WrappedExternalError(err) - } - return nil + return writeSlabIndex( + s.ledger, + address, + []byte(AccountStorageKey), + slabIndex, + ) } func getAccountStorageSlabIndex( diff --git a/runtime/storage.go b/runtime/storage.go index ef2c5cf14f..1ca3906dc0 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -606,3 +606,23 @@ func (UnreferencedRootSlabsError) IsInternalError() {} func (e UnreferencedRootSlabsError) Error() string { return fmt.Sprintf("slabs not referenced: %s", e.UnreferencedRootSlabIDs) } + +func writeSlabIndex( + ledger atree.Ledger, + address common.Address, + key []byte, + slabIndex atree.SlabIndex, +) error { + var err error + errors.WrapPanic(func() { + err = ledger.SetValue( + address[:], + key, + slabIndex[:], + ) + }) + if err != nil { + return interpreter.WrappedExternalError(err) + } + return nil +} From 4fd610ac529be171c69c20ca024ef7e76af09d1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Mon, 18 Nov 2024 14:01:52 -0800 Subject: [PATCH 38/52] refactor slab index register reading/writing to separate file and improve naming --- runtime/account_storage_v1.go | 4 +- runtime/account_storage_v2.go | 10 ++-- runtime/slabindex.go | 86 +++++++++++++++++++++++++++++++++++ runtime/storage.go | 61 +------------------------ 4 files changed, 94 insertions(+), 67 deletions(-) create mode 100644 runtime/slabindex.go diff --git a/runtime/account_storage_v1.go b/runtime/account_storage_v1.go index fe1419a356..53eec2fef2 100644 --- a/runtime/account_storage_v1.go +++ b/runtime/account_storage_v1.go @@ -175,7 +175,7 @@ func (s *AccountStorageV1) writeStorageDomainSlabIndex( storageDomainKey interpreter.StorageDomainKey, slabIndex atree.SlabIndex, ) error { - return writeSlabIndex( + return writeSlabIndexToRegister( s.ledger, storageDomainKey.Address, []byte(storageDomainKey.Domain.Identifier()), @@ -191,7 +191,7 @@ func getDomainStorageMapFromV1DomainRegister( domain common.StorageDomain, ) (*interpreter.DomainStorageMap, error) { - domainStorageSlabIndex, domainRegisterExists, err := getSlabIndexFromRegisterValue( + domainStorageSlabIndex, domainRegisterExists, err := readSlabIndexFromRegister( ledger, address, []byte(domain.Identifier()), diff --git a/runtime/account_storage_v2.go b/runtime/account_storage_v2.go index 32619ac452..71e19fdaed 100644 --- a/runtime/account_storage_v2.go +++ b/runtime/account_storage_v2.go @@ -238,7 +238,7 @@ func (s *AccountStorageV2) writeAccountStorageSlabIndex( address common.Address, slabIndex atree.SlabIndex, ) error { - return writeSlabIndex( + return writeSlabIndexToRegister( s.ledger, address, []byte(AccountStorageKey), @@ -246,7 +246,7 @@ func (s *AccountStorageV2) writeAccountStorageSlabIndex( ) } -func getAccountStorageSlabIndex( +func readAccountStorageSlabIndexFromRegister( ledger atree.Ledger, address common.Address, ) ( @@ -254,7 +254,7 @@ func getAccountStorageSlabIndex( bool, error, ) { - return getSlabIndexFromRegisterValue( + return readSlabIndexFromRegister( ledger, address, []byte(AccountStorageKey), @@ -269,7 +269,7 @@ func getAccountStorageMapFromRegister( *interpreter.AccountStorageMap, error, ) { - slabIndex, registerExists, err := getAccountStorageSlabIndex( + slabIndex, registerExists, err := readAccountStorageSlabIndexFromRegister( ledger, address, ) @@ -293,7 +293,7 @@ func hasAccountStorageMap( address common.Address, ) (bool, error) { - _, registerExists, err := getAccountStorageSlabIndex( + _, registerExists, err := readAccountStorageSlabIndexFromRegister( ledger, address, ) diff --git a/runtime/slabindex.go b/runtime/slabindex.go new file mode 100644 index 0000000000..00178608d0 --- /dev/null +++ b/runtime/slabindex.go @@ -0,0 +1,86 @@ +/* + * Cadence - The resource-oriented smart contract programming language + * + * Copyright Flow Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package runtime + +import ( + "github.com/onflow/atree" + + "github.com/onflow/cadence/common" + "github.com/onflow/cadence/errors" + "github.com/onflow/cadence/interpreter" +) + +// readSlabIndexFromRegister returns register value as atree.SlabIndex. +// This function returns error if +// - underlying ledger panics, or +// - underlying ledger returns error when retrieving ledger value, or +// - retrieved ledger value is invalid (for atree.SlabIndex). +func readSlabIndexFromRegister( + ledger atree.Ledger, + address common.Address, + key []byte, +) (atree.SlabIndex, bool, error) { + var data []byte + var err error + errors.WrapPanic(func() { + data, err = ledger.GetValue(address[:], key) + }) + if err != nil { + return atree.SlabIndex{}, false, interpreter.WrappedExternalError(err) + } + + dataLength := len(data) + + if dataLength == 0 { + return atree.SlabIndex{}, false, nil + } + + isStorageIndex := dataLength == storageIndexLength + if !isStorageIndex { + // Invalid data in register + + // TODO: add dedicated error type? + return atree.SlabIndex{}, false, errors.NewUnexpectedError( + "invalid storage index for storage map of account '%x': expected length %d, got %d", + address[:], storageIndexLength, dataLength, + ) + } + + return atree.SlabIndex(data), true, nil +} + +func writeSlabIndexToRegister( + ledger atree.Ledger, + address common.Address, + key []byte, + slabIndex atree.SlabIndex, +) error { + var err error + errors.WrapPanic(func() { + err = ledger.SetValue( + address[:], + key, + slabIndex[:], + ) + }) + if err != nil { + return interpreter.WrappedExternalError(err) + } + return nil +} diff --git a/runtime/storage.go b/runtime/storage.go index 1ca3906dc0..8198e69fa1 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -213,7 +213,7 @@ func (s *Storage) IsV1Account(address common.Address) (isV1 bool) { // Check if a storage map register exists for any of the domains. // Check the most frequently used domains first, such as storage, public, private. for _, domain := range common.AllStorageDomains { - _, domainExists, err := getSlabIndexFromRegisterValue( + _, domainExists, err := readSlabIndexFromRegister( s.Ledger, address, []byte(domain.Identifier()), @@ -247,45 +247,6 @@ func (s *Storage) cacheDomainStorageMap( s.cachedDomainStorageMaps[storageDomainKey] = domainStorageMap } -// getSlabIndexFromRegisterValue returns register value as atree.SlabIndex. -// This function returns error if -// - underlying ledger panics, or -// - underlying ledger returns error when retrieving ledger value, or -// - retrieved ledger value is invalid (for atree.SlabIndex). -func getSlabIndexFromRegisterValue( - ledger atree.Ledger, - address common.Address, - key []byte, -) (atree.SlabIndex, bool, error) { - var data []byte - var err error - errors.WrapPanic(func() { - data, err = ledger.GetValue(address[:], key) - }) - if err != nil { - return atree.SlabIndex{}, false, interpreter.WrappedExternalError(err) - } - - dataLength := len(data) - - if dataLength == 0 { - return atree.SlabIndex{}, false, nil - } - - isStorageIndex := dataLength == storageIndexLength - if !isStorageIndex { - // Invalid data in register - - // TODO: add dedicated error type? - return atree.SlabIndex{}, false, errors.NewUnexpectedError( - "invalid storage index for storage map of account '%x': expected length %d, got %d", - address[:], storageIndexLength, dataLength, - ) - } - - return atree.SlabIndex(data), true, nil -} - func (s *Storage) recordContractUpdate( location common.AddressLocation, contractValue *interpreter.CompositeValue, @@ -606,23 +567,3 @@ func (UnreferencedRootSlabsError) IsInternalError() {} func (e UnreferencedRootSlabsError) Error() string { return fmt.Sprintf("slabs not referenced: %s", e.UnreferencedRootSlabIDs) } - -func writeSlabIndex( - ledger atree.Ledger, - address common.Address, - key []byte, - slabIndex atree.SlabIndex, -) error { - var err error - errors.WrapPanic(func() { - err = ledger.SetValue( - address[:], - key, - slabIndex[:], - ) - }) - if err != nil { - return interpreter.WrappedExternalError(err) - } - return nil -} From c795bfc13fe6db729410b877c45de3a69632a657 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Tue, 19 Nov 2024 11:10:44 -0600 Subject: [PATCH 39/52] Reduce register reads with StorageFormatV2Enabled Currently, all domain registers are being read for a scenario that does not require it. When StorageFormatV2 feature flag is enabled, Storage.GetDomainStorageMap() reads all domain registers to determine if account is new account or in V1 format. These register reads are unnecessary for V1 accounts when: - the given domain register exists, or - the given domain register doesn't exist and createIfNotExists param is false. This commit modifies Storage.GetDomainStorageMap() to read the given domain register first (before iterating all domain registers) to reduce register reads. - If the given domain register exists, then account format is V1 and no more register reading is needed. - If the given domain register doesn't exist and createIfNotExists is false, then nil domain storage map is returned and no more register reading is needed. - Otherwise, account format (V1 account or new account) is determined by iterating all domain registers. --- runtime/sharedstate_test.go | 8 +- runtime/storage.go | 172 +++++++++++++++++++++++++++++++----- 2 files changed, 157 insertions(+), 23 deletions(-) diff --git a/runtime/sharedstate_test.go b/runtime/sharedstate_test.go index 52dd68f00a..b2cd5cc495 100644 --- a/runtime/sharedstate_test.go +++ b/runtime/sharedstate_test.go @@ -255,12 +255,18 @@ func TestRuntimeSharedState(t *testing.T) { test( true, []ownerKeyPair{ - // Read account domain register to check if it is a migrated account + // Read account register to check if it is a migrated account // Read returns no value. { owner: signerAddress[:], key: []byte(AccountStorageKey), }, + // Read contract domain register. + // Read returns no value. + { + owner: signerAddress[:], + key: []byte(common.StorageDomainContract.Identifier()), + }, // Read all available domain registers to check if it is a new account // Read returns no value. { diff --git a/runtime/storage.go b/runtime/storage.go index 8198e69fa1..aab2f1daba 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -40,6 +40,15 @@ type StorageConfig struct { StorageFormatV2Enabled bool } +type storageFormat uint8 + +const ( + storageFormatUnknown storageFormat = iota + storageFormatNew + storageFormatV1 + StorageFormatV2 +) + type Storage struct { *atree.PersistentSlabStorage @@ -160,7 +169,10 @@ func (s *Storage) GetDomainStorageMap( } }() - if !s.Config.StorageFormatV2Enabled || s.IsV1Account(address) { + if !s.Config.StorageFormatV2Enabled { + + // When StorageFormatV2 is disabled, handle all accounts as v1 accounts. + domainStorageMap = s.AccountStorageV1.GetDomainStorageMap( address, domain, @@ -172,44 +184,160 @@ func (s *Storage) GetDomainStorageMap( } } else { - domainStorageMap = s.AccountStorageV2.GetDomainStorageMap( - inter, - address, - domain, - createIfNotExists, - ) + + // StorageFormatV2 is enabled. + + onlyCheckSpecifiedDomainForV1 := !createIfNotExists + format := s.getAccountStorageFormat(address, domain, onlyCheckSpecifiedDomainForV1) + + switch format { + + case storageFormatUnknown: + // storageFormatUnknown is returned when !createIfNotExists + // and domain register doesn't exist. + + if createIfNotExists { + panic(errors.NewUnreachableError()) + } + + domainStorageMap = nil + + case storageFormatV1: + domainStorageMap = s.AccountStorageV1.GetDomainStorageMap( + address, + domain, + createIfNotExists, + ) + + s.cacheIsV1Account(address, true) + + case StorageFormatV2, storageFormatNew: + domainStorageMap = s.AccountStorageV2.GetDomainStorageMap( + inter, + address, + domain, + createIfNotExists, + ) + + s.cacheIsV1Account(address, false) + + default: + panic(errors.NewUnreachableError()) + } } return domainStorageMap } -// IsV1Account returns true if given account is in account storage format v1. -func (s *Storage) IsV1Account(address common.Address) (isV1 bool) { +// getAccountStorageFormat returns storageFormat for the given account. +// This function determines account format by reading registers. +// If onlyCheckSpecifiedDomainForV1 is true, only the given domain +// register is checked to determine if account format is v1. If +// domain register doesn't exist, StorageFormatUnknown is returned. +// +// When StorageFormatV2 is disabled: +// - No register reading (accounts are assumed to be in v1 format). +// +// When StorageFormatV2 is enabled: +// - For v2 accounts, "stored" register is read. +// +// - For v1 accounts, +// - If onlyCheckSpecifiedDomainForV1 is true, +// "stored" register and given domain register are read. +// - If onlyCheckSpecifiedDomainForV1 is false and given domain exists, +// "stored" register and given domain register are read. +// - If onlyCheckSpecifiedDomainForV1 is false and given domain doesn't exist, +// "stored" register, given domain register, and all domain registers are read. +// +// - For new accounts, "stored" register, given domain register, and all domain registers are read. +func (s *Storage) getAccountStorageFormat( + address common.Address, + domain common.StorageDomain, + onlyCheckSpecifiedDomainForV1 bool, +) (format storageFormat) { - // Check cache + // All accounts are assumed to be in v1 format when StorageFormatV2 is disabled. - if isV1, present := s.cachedV1Accounts[address]; present { - return isV1 + if !s.Config.StorageFormatV2Enabled { + return storageFormatV1 } - // Cache result + // Return cached account format (no register reading). - defer func() { - s.cacheIsV1Account(address, isV1) - }() + isCachedV1, isCachedV2 := s.getCachedAccountFormat(address) - // First check if account storage map exists. - // In that case the account was already migrated to account storage format v2, - // and we do not need to check the domain storage map registers. + if isCachedV1 { + return storageFormatV1 + } + + if isCachedV2 { + return StorageFormatV2 + } + + // Check if account is v2 (by reading "stored" register). + + if s.isV2Account(address) { + return StorageFormatV2 + } + + // Check if account is v1 (by reading given domain register). + + if s.hasDomainRegister(address, domain) { + return storageFormatV1 + } + + // Return early if onlyCheckSpecifiedDomainForV1 to prevent more register reading. + + if onlyCheckSpecifiedDomainForV1 { + return storageFormatUnknown + } + // At this point, account is either new account or v1 account without given domain register. + + if s.isV1Account(address) { + return storageFormatV1 + } + + return storageFormatNew +} + +func (s *Storage) getCachedAccountFormat(address common.Address) (isV1 bool, isV2 bool) { + isV1, cached := s.cachedV1Accounts[address] + if !cached { + return false, false + } + return isV1, !isV1 +} + +// isV2Account returns true if given account is in account storage format v2. +func (s *Storage) isV2Account(address common.Address) bool { accountStorageMapExists, err := hasAccountStorageMap(s.Ledger, address) if err != nil { panic(err) } - if accountStorageMapExists { - return false + + return accountStorageMapExists +} + +// hasDomainRegister returns true if given account has given domain register. +// NOTE: account storage format v1 has domain registers. +func (s *Storage) hasDomainRegister(address common.Address, domain common.StorageDomain) (domainExists bool) { + _, domainExists, err := readSlabIndexFromRegister( + s.Ledger, + address, + []byte(domain.Identifier()), + ) + if err != nil { + panic(err) } + return domainExists +} + +// isV1Account returns true if given account is in account storage format v1 +// by checking if any of the domain registers exist. +func (s *Storage) isV1Account(address common.Address) (isV1 bool) { + // Check if a storage map register exists for any of the domains. // Check the most frequently used domains first, such as storage, public, private. for _, domain := range common.AllStorageDomains { @@ -492,7 +620,7 @@ func (s *Storage) CheckHealth() error { // Only accounts in storage format v1 store domain storage maps // directly at the root of the account - if !s.IsV1Account(address) { + if !s.isV1Account(address) { continue } From f56272754ac0c867f15059476bdacab38c65ab3b Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Wed, 20 Nov 2024 10:13:18 -0600 Subject: [PATCH 40/52] Add tests for register reads for GetDomainStorageMap() This commit adds tests for register reads from GetStorageMap() for account storage format v1 and v2. --- runtime/storage_test.go | 829 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 829 insertions(+) diff --git a/runtime/storage_test.go b/runtime/storage_test.go index 4e4dc261bc..ab5640fe81 100644 --- a/runtime/storage_test.go +++ b/runtime/storage_test.go @@ -23,12 +23,14 @@ import ( "encoding/hex" "fmt" "math/rand" + "runtime" "slices" "sort" "strconv" "strings" "testing" + "github.com/fxamacker/cbor/v2" "github.com/onflow/atree" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -8081,6 +8083,801 @@ func TestDomainRegisterMigrationForLargeAccount(t *testing.T) { checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) } +func TestGetDomainStorageMapRegisterReadsForNewAccount(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + testCases := []struct { + name string + storageFormatV2Enabled bool + domain common.StorageDomain + createIfNotExists bool + expectedDomainStorageMapIsNil bool + expectedReadsFor1stGetDomainStorageMapCall []ownerKeyPair + expectedReadsFor2ndGetDomainStorageMapCall []ownerKeyPair + }{ + // Test cases with storageFormatV2Enabled = false + { + name: "storageFormatV2Enabled = false, domain storage map does not exist, createIfNotExists = false", + storageFormatV2Enabled: false, + domain: common.StorageDomainPathStorage, + createIfNotExists: false, + expectedDomainStorageMapIsNil: true, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Read domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // Read domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + }, + { + name: "storageFormatV2Enabled = false, domain storage map does not exist, createIfNotExists = true", + storageFormatV2Enabled: false, + domain: common.StorageDomainPathStorage, + createIfNotExists: true, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Read domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reads from the second GetDomainStorageMap() because + // domain storage map is created and cached in the first GetDomainStorageMap(). + }, + }, + // Test cases with storageFormatV2Enabled = true + { + name: "storageFormatV2Enabled = true, domain storage map does not exist, createIfNotExists = false", + storageFormatV2Enabled: true, + domain: common.StorageDomainPathStorage, + createIfNotExists: false, + expectedDomainStorageMapIsNil: true, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Check domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // Second GetDomainStorageMap() has the same register reading as the first GetDomainStorageMap() + // because account status can't be cached in previous call. + + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Check domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + }, + { + name: "storageFormatV2Enabled = true, domain storage map does not exist, createIfNotExists = true", + storageFormatV2Enabled: true, + domain: common.StorageDomainPathStorage, + createIfNotExists: true, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Check domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + // Check all domain registers + { + owner: address[:], + key: []byte(common.PathDomainStorage.Identifier()), + }, + { + owner: address[:], + key: []byte(common.PathDomainPrivate.Identifier()), + }, + { + owner: address[:], + key: []byte(common.PathDomainPublic.Identifier()), + }, + { + owner: address[:], + key: []byte(common.StorageDomainContract.Identifier()), + }, + { + owner: address[:], + key: []byte(common.StorageDomainInbox.Identifier()), + }, + { + owner: address[:], + key: []byte(common.StorageDomainCapabilityController.Identifier()), + }, + { + owner: address[:], + key: []byte(common.StorageDomainCapabilityControllerTag.Identifier()), + }, + { + owner: address[:], + key: []byte(common.StorageDomainPathCapability.Identifier()), + }, + { + owner: address[:], + key: []byte(common.StorageDomainAccountCapability.Identifier()), + }, + // Read account register to load account storage map + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reads from the second GetDomainStorageMap() because + // domain storage map is created and cached in the first GetDomainStorageMap(). + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + var ledgerReads []ownerKeyPair + + // Create empty storage + ledger := NewTestLedger( + func(owner, key, _ []byte) { + ledgerReads = append( + ledgerReads, + ownerKeyPair{ + owner: owner, + key: key, + }, + ) + }, + nil) + + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: tc.storageFormatV2Enabled, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + domainStorageMap := storage.GetDomainStorageMap(inter, address, tc.domain, tc.createIfNotExists) + require.Equal(t, tc.expectedDomainStorageMapIsNil, domainStorageMap == nil) + require.Equal(t, tc.expectedReadsFor1stGetDomainStorageMapCall, ledgerReads) + + ledgerReads = ledgerReads[:0] + + // Call GetDomainStorageMap() again to test account status is cached and no register reading is needed. + + domainStorageMap = storage.GetDomainStorageMap(inter, address, tc.domain, tc.createIfNotExists) + require.Equal(t, tc.expectedDomainStorageMapIsNil, domainStorageMap == nil) + require.Equal(t, tc.expectedReadsFor2ndGetDomainStorageMapCall, ledgerReads) + }) + } +} + +func TestGetDomainStorageMapRegisterReadsForV1Account(t *testing.T) { + + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + type getStorageDataFunc func() (storedValues map[string][]byte, StorageIndices map[string]uint64) + + createV1AccountWithDomain := func( + address common.Address, + domain common.StorageDomain, + ) getStorageDataFunc { + return func() (storedValues map[string][]byte, StorageIndices map[string]uint64) { + ledger := NewTestLedger(nil, nil) + + persistentSlabStorage := newSlabStorage(ledger) + + orderedMap, err := atree.NewMap( + persistentSlabStorage, + atree.Address(address), + atree.NewDefaultDigesterBuilder(), + interpreter.EmptyTypeInfo{}, + ) + require.NoError(t, err) + + slabIndex := orderedMap.SlabID().Index() + + for i := range 3 { + + key := interpreter.StringStorageMapKey(strconv.Itoa(i)) + + value := interpreter.NewUnmeteredIntValueFromInt64(int64(i)) + + existingStorable, err := orderedMap.Set( + key.AtreeValueCompare, + key.AtreeValueHashInput, + key.AtreeValue(), + value, + ) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // Commit domain storage map + err = persistentSlabStorage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + + // Create domain register + err = ledger.SetValue(address[:], []byte(domain.Identifier()), slabIndex[:]) + require.NoError(t, err) + + return ledger.StoredValues, ledger.StorageIndices + } + } + + testCases := []struct { + name string + getStorageData getStorageDataFunc + storageFormatV2Enabled bool + domain common.StorageDomain + createIfNotExists bool + expectedDomainStorageMapIsNil bool + expectedReadsFor1stGetDomainStorageMapCall []ownerKeyPair + expectedReadsFor2ndGetDomainStorageMapCall []ownerKeyPair + }{ + // Test cases with storageFormatV2Enabled = false + { + name: "storageFormatV2Enabled = false, domain storage map does not exist, createIfNotExists = false", + storageFormatV2Enabled: false, + getStorageData: createV1AccountWithDomain(address, common.StorageDomainPathPublic), + domain: common.StorageDomainPathStorage, + createIfNotExists: false, + expectedDomainStorageMapIsNil: true, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Read domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // Read domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + }, + { + name: "storageFormatV2Enabled = false, domain storage map does not exist, createIfNotExists = true", + storageFormatV2Enabled: false, + getStorageData: createV1AccountWithDomain(address, common.StorageDomainPathPublic), + domain: common.StorageDomainPathStorage, + createIfNotExists: true, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Read domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading in second GetDomainStorageMap() because + // domain storage map is created and cached in the first + // GetDomainStorageMap(0). + }, + }, + { + name: "storageFormatV2Enabled = false, domain storage map exists, createIfNotExists = false", + storageFormatV2Enabled: false, + getStorageData: createV1AccountWithDomain(address, common.StorageDomainPathStorage), + domain: common.StorageDomainPathStorage, + createIfNotExists: false, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Read domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + // Read domain storage map register + { + owner: address[:], + key: []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}, + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading in second GetDomainStorageMap() because + // domain storage map is loaded and cached in the first + // GetDomainStorageMap(0). + }, + }, + { + name: "storageFormatV2Enabled = false, domain storage map exists, createIfNotExists = true", + storageFormatV2Enabled: false, + getStorageData: createV1AccountWithDomain(address, common.StorageDomainPathStorage), + domain: common.StorageDomainPathStorage, + createIfNotExists: true, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Read domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + // Read domain storage map register + { + owner: address[:], + key: []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}, + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading in second GetDomainStorageMap() because + // domain storage map is loaded and cached in the first + // GetDomainStorageMap(0). + }, + }, + // Test cases with storageFormatV2Enabled = true + { + name: "storageFormatV2Enabled = true, domain storage map does not exist, createIfNotExists = false", + storageFormatV2Enabled: true, + getStorageData: createV1AccountWithDomain(address, common.StorageDomainPathPublic), + domain: common.StorageDomainPathStorage, + createIfNotExists: false, + expectedDomainStorageMapIsNil: true, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Check domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Check domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + }, + { + name: "storageFormatV2Enabled = true, domain storage map does not exist, createIfNotExists = true", + storageFormatV2Enabled: true, + getStorageData: createV1AccountWithDomain(address, common.StorageDomainPathPublic), + domain: common.StorageDomainPathStorage, + createIfNotExists: true, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Check domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + // Check all domain registers until any existing domain is checked + { + owner: address[:], + key: []byte(common.PathDomainStorage.Identifier()), + }, + { + owner: address[:], + key: []byte(common.PathDomainPrivate.Identifier()), + }, + { + owner: address[:], + key: []byte(common.PathDomainPublic.Identifier()), + }, + // Check domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading from second GetDomainStorageMap() because + // domain storage map is created and cached in the first + // GetDomainStorageMap(). + }, + }, + { + name: "storageFormatV2Enabled = true, domain storage map exists, createIfNotExists = false", + storageFormatV2Enabled: true, + getStorageData: createV1AccountWithDomain(address, common.StorageDomainPathStorage), + domain: common.StorageDomainPathStorage, + createIfNotExists: false, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Check domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + // Read domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + // Read domain storage map register + { + owner: address[:], + key: []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}, + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading from second GetDomainStorageMap() because + // domain storage map is created and cached in the first + // GetDomainStorageMap(). + }, + }, + { + name: "storageFormatV2Enabled = true, domain storage map exists, createIfNotExists = true", + storageFormatV2Enabled: true, + getStorageData: createV1AccountWithDomain(address, common.StorageDomainPathStorage), + domain: common.StorageDomainPathStorage, + createIfNotExists: true, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Check given domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + // Read given domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + // Read domain storage map register + { + owner: address[:], + key: []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}, + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading from second GetDomainStorageMap() because + // domain storage map is created and cached in the first + // GetDomainStorageMap(). + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + storedValues, storedIndices := tc.getStorageData() + + var ledgerReads []ownerKeyPair + + ledger := NewTestLedgerWithData( + func(owner, key, _ []byte) { + ledgerReads = append( + ledgerReads, + ownerKeyPair{ + owner: owner, + key: key, + }, + ) + }, + nil, + storedValues, + storedIndices, + ) + + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: tc.storageFormatV2Enabled, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + domainStorageMap := storage.GetDomainStorageMap(inter, address, tc.domain, tc.createIfNotExists) + require.Equal(t, tc.expectedDomainStorageMapIsNil, domainStorageMap == nil) + require.Equal(t, tc.expectedReadsFor1stGetDomainStorageMapCall, ledgerReads) + + ledgerReads = ledgerReads[:0] + + domainStorageMap = storage.GetDomainStorageMap(inter, address, tc.domain, tc.createIfNotExists) + require.Equal(t, tc.expectedDomainStorageMapIsNil, domainStorageMap == nil) + require.Equal(t, tc.expectedReadsFor2ndGetDomainStorageMapCall, ledgerReads) + }) + } +} + +func TestGetDomainStorageMapRegisterReadsForV2Account(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + type getStorageDataFunc func() (storedValues map[string][]byte, StorageIndices map[string]uint64) + + createV2AccountWithDomain := func( + address common.Address, + domain common.StorageDomain, + ) getStorageDataFunc { + return func() (storedValues map[string][]byte, StorageIndices map[string]uint64) { + ledger := NewTestLedger(nil, nil) + + persistentSlabStorage := newSlabStorage(ledger) + + accountOrderedMap, err := atree.NewMap( + persistentSlabStorage, + atree.Address(address), + atree.NewDefaultDigesterBuilder(), + interpreter.EmptyTypeInfo{}, + ) + require.NoError(t, err) + + slabIndex := accountOrderedMap.SlabID().Index() + + domainOrderedMap, err := atree.NewMap( + persistentSlabStorage, + atree.Address(address), + atree.NewDefaultDigesterBuilder(), + interpreter.EmptyTypeInfo{}, + ) + require.NoError(t, err) + + domainKey := interpreter.Uint64StorageMapKey(domain) + + existingDomain, err := accountOrderedMap.Set( + domainKey.AtreeValueCompare, + domainKey.AtreeValueHashInput, + domainKey.AtreeValue(), + domainOrderedMap, + ) + require.NoError(t, err) + require.Nil(t, existingDomain) + + for i := range 3 { + + key := interpreter.StringStorageMapKey(strconv.Itoa(i)) + + value := interpreter.NewUnmeteredIntValueFromInt64(int64(i)) + + existingStorable, err := domainOrderedMap.Set( + key.AtreeValueCompare, + key.AtreeValueHashInput, + key.AtreeValue(), + value, + ) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // Commit domain storage map + err = persistentSlabStorage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + + // Create account register + err = ledger.SetValue(address[:], []byte(AccountStorageKey), slabIndex[:]) + require.NoError(t, err) + + return ledger.StoredValues, ledger.StorageIndices + } + } + + testCases := []struct { + name string + getStorageData getStorageDataFunc + domain common.StorageDomain + createIfNotExists bool + expectedDomainStorageMapIsNil bool + expectedReadsFor1stGetDomainStorageMapCall []ownerKeyPair + expectedReadsFor2ndGetDomainStorageMapCall []ownerKeyPair + }{ + { + name: "domain storage map does not exist, createIfNotExists = false", + getStorageData: createV2AccountWithDomain(address, common.StorageDomainPathPublic), + domain: common.StorageDomainPathStorage, + createIfNotExists: false, + expectedDomainStorageMapIsNil: true, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Read account register + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Read account storage map + { + owner: address[:], + key: []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}, + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading from second GetDomainStorageMap because + // account storage map is loaded and cached from first + // GetDomainStorageMap(). + }, + }, + { + name: "domain storage map does not exist, createIfNotExists = true", + getStorageData: createV2AccountWithDomain(address, common.StorageDomainPathPublic), + domain: common.StorageDomainPathStorage, + createIfNotExists: true, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Read account register + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Read account storage map + { + owner: address[:], + key: []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}, + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading from second GetDomainStorageMap() because + // domain storage map is created and cached in the first + // GetDomainStorageMap(). + }, + }, + { + name: "domain storage map exists, createIfNotExists = false", + getStorageData: createV2AccountWithDomain(address, common.StorageDomainPathStorage), + domain: common.StorageDomainPathStorage, + createIfNotExists: false, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Read account register + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Read account storage map + { + owner: address[:], + key: []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}, + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading from second GetDomainStorageMap() because + // domain storage map is created and cached in the first + // GetDomainStorageMap(). + }, + }, + { + name: "domain storage map exists, createIfNotExists = true", + getStorageData: createV2AccountWithDomain(address, common.StorageDomainPathStorage), + domain: common.StorageDomainPathStorage, + createIfNotExists: true, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Read account register + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Read account storage map + { + owner: address[:], + key: []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}, + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading from second GetDomainStorageMap() because + // domain storage map is created and cached in the first + // GetDomainStorageMap(). + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + storedValues, storedIndices := tc.getStorageData() + + var ledgerReads []ownerKeyPair + + ledger := NewTestLedgerWithData( + func(owner, key, _ []byte) { + ledgerReads = append( + ledgerReads, + ownerKeyPair{ + owner: owner, + key: key, + }, + ) + }, + nil, + storedValues, + storedIndices, + ) + + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + domainStorageMap := storage.GetDomainStorageMap(inter, address, tc.domain, tc.createIfNotExists) + require.Equal(t, tc.expectedDomainStorageMapIsNil, domainStorageMap == nil) + require.Equal(t, tc.expectedReadsFor1stGetDomainStorageMapCall, ledgerReads) + + ledgerReads = ledgerReads[:0] + + domainStorageMap = storage.GetDomainStorageMap(inter, address, tc.domain, tc.createIfNotExists) + require.Equal(t, tc.expectedDomainStorageMapIsNil, domainStorageMap == nil) + require.Equal(t, tc.expectedReadsFor2ndGetDomainStorageMapCall, ledgerReads) + }) + } +} + // createAndWriteAccountStorageMap creates account storage map with given domains and writes random values to domain storage map. func createAndWriteAccountStorageMap( t testing.TB, @@ -8207,3 +9004,35 @@ func checkAccountStorageMapData( require.Equal(tb, 1, len(rootSlabIDs)) require.Contains(tb, rootSlabIDs, accountSlabID) } + +func newSlabStorage(ledger atree.Ledger) *atree.PersistentSlabStorage { + decodeStorable := func( + decoder *cbor.StreamDecoder, + slabID atree.SlabID, + inlinedExtraData []atree.ExtraData, + ) ( + atree.Storable, + error, + ) { + return interpreter.DecodeStorable( + decoder, + slabID, + inlinedExtraData, + nil, + ) + } + + decodeTypeInfo := func(decoder *cbor.StreamDecoder) (atree.TypeInfo, error) { + return interpreter.DecodeTypeInfo(decoder, nil) + } + + ledgerStorage := atree.NewLedgerBaseStorage(ledger) + + return atree.NewPersistentSlabStorage( + ledgerStorage, + interpreter.CBOREncMode, + interpreter.CBORDecMode, + decodeStorable, + decodeTypeInfo, + ) +} From 3a71bf0e8e00ede8ebaf2e339159a272bd8b2814 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 25 Nov 2024 10:19:36 -0600 Subject: [PATCH 41/52] Add tests for register reads for GetDomainStorageMap() This commit adds tests for register reads from GetStorageMap() for account storage format v1 and v2. --- runtime/storage_test.go | 968 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 968 insertions(+) diff --git a/runtime/storage_test.go b/runtime/storage_test.go index 4e4dc261bc..c0c8c0d3fb 100644 --- a/runtime/storage_test.go +++ b/runtime/storage_test.go @@ -23,12 +23,14 @@ import ( "encoding/hex" "fmt" "math/rand" + "runtime" "slices" "sort" "strconv" "strings" "testing" + "github.com/fxamacker/cbor/v2" "github.com/onflow/atree" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -8081,6 +8083,940 @@ func TestDomainRegisterMigrationForLargeAccount(t *testing.T) { checkAccountStorageMapData(t, ledger.StoredValues, ledger.StorageIndices, address, accountValues) } +func TestGetDomainStorageMapRegisterReadsForNewAccount(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + testCases := []struct { + name string + storageFormatV2Enabled bool + domain common.StorageDomain + createIfNotExists bool + expectedDomainStorageMapIsNil bool + expectedReadsFor1stGetDomainStorageMapCall []ownerKeyPair + expectedReadsFor2ndGetDomainStorageMapCall []ownerKeyPair + expectedReadsSet map[string]struct{} + }{ + // Test cases with storageFormatV2Enabled = false + { + name: "storageFormatV2Enabled = false, domain storage map does not exist, createIfNotExists = false", + storageFormatV2Enabled: false, + domain: common.StorageDomainPathStorage, + createIfNotExists: false, + expectedDomainStorageMapIsNil: true, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Read domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // Read domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, + }, + }, + { + name: "storageFormatV2Enabled = false, domain storage map does not exist, createIfNotExists = true", + storageFormatV2Enabled: false, + domain: common.StorageDomainPathStorage, + createIfNotExists: true, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Read domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reads from the second GetDomainStorageMap() because + // domain storage map is created and cached in the first GetDomainStorageMap(). + }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, + }, + }, + // Test cases with storageFormatV2Enabled = true + { + name: "storageFormatV2Enabled = true, domain storage map does not exist, createIfNotExists = false", + storageFormatV2Enabled: true, + domain: common.StorageDomainPathStorage, + createIfNotExists: false, + expectedDomainStorageMapIsNil: true, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Read all available domain registers to check if it is a new account + // Read returns no value. + { + owner: address[:], + key: []byte(common.PathDomainStorage.Identifier()), + }, + { + owner: address[:], + key: []byte(common.PathDomainPrivate.Identifier()), + }, + { + owner: address[:], + key: []byte(common.PathDomainPublic.Identifier()), + }, + { + owner: address[:], + key: []byte(common.StorageDomainContract.Identifier()), + }, + { + owner: address[:], + key: []byte(common.StorageDomainInbox.Identifier()), + }, + { + owner: address[:], + key: []byte(common.StorageDomainCapabilityController.Identifier()), + }, + { + owner: address[:], + key: []byte(common.StorageDomainCapabilityControllerTag.Identifier()), + }, + { + owner: address[:], + key: []byte(common.StorageDomainPathCapability.Identifier()), + }, + { + owner: address[:], + key: []byte(common.StorageDomainAccountCapability.Identifier()), + }, + // Try to read account register to create account storage map + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // Second GetDomainStorageMap() get cached account format v2 (cached during first GetDomainStorageMap()). + + // Try to read account register to create account storage map + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + AccountStorageKey: {}, + string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainPathPrivate.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainPathPublic.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainContract.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainInbox.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainCapabilityController.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainCapabilityControllerTag.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainPathCapability.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainAccountCapability.Identifier(): {}, + }, + }, + { + name: "storageFormatV2Enabled = true, domain storage map does not exist, createIfNotExists = true", + storageFormatV2Enabled: true, + domain: common.StorageDomainPathStorage, + createIfNotExists: true, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Check all domain registers + { + owner: address[:], + key: []byte(common.PathDomainStorage.Identifier()), + }, + { + owner: address[:], + key: []byte(common.PathDomainPrivate.Identifier()), + }, + { + owner: address[:], + key: []byte(common.PathDomainPublic.Identifier()), + }, + { + owner: address[:], + key: []byte(common.StorageDomainContract.Identifier()), + }, + { + owner: address[:], + key: []byte(common.StorageDomainInbox.Identifier()), + }, + { + owner: address[:], + key: []byte(common.StorageDomainCapabilityController.Identifier()), + }, + { + owner: address[:], + key: []byte(common.StorageDomainCapabilityControllerTag.Identifier()), + }, + { + owner: address[:], + key: []byte(common.StorageDomainPathCapability.Identifier()), + }, + { + owner: address[:], + key: []byte(common.StorageDomainAccountCapability.Identifier()), + }, + // Read account register to load account storage map + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reads from the second GetDomainStorageMap() because + // domain storage map is created and cached in the first GetDomainStorageMap(). + }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + AccountStorageKey: {}, + string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainPathPrivate.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainPathPublic.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainContract.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainInbox.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainCapabilityController.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainCapabilityControllerTag.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainPathCapability.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainAccountCapability.Identifier(): {}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + var ledgerReads []ownerKeyPair + ledgerReadsSet := make(map[string]struct{}) + + // Create empty storage + ledger := NewTestLedger( + func(owner, key, _ []byte) { + ledgerReads = append( + ledgerReads, + ownerKeyPair{ + owner: owner, + key: key, + }, + ) + ledgerReadsSet[string(owner)+"|"+string(key)] = struct{}{} + }, + nil) + + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: tc.storageFormatV2Enabled, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + domainStorageMap := storage.GetDomainStorageMap(inter, address, tc.domain, tc.createIfNotExists) + require.Equal(t, tc.expectedDomainStorageMapIsNil, domainStorageMap == nil) + require.Equal(t, tc.expectedReadsFor1stGetDomainStorageMapCall, ledgerReads) + + ledgerReads = ledgerReads[:0] + + // Call GetDomainStorageMap() again to test account status is cached and no register reading is needed. + + domainStorageMap = storage.GetDomainStorageMap(inter, address, tc.domain, tc.createIfNotExists) + require.Equal(t, tc.expectedDomainStorageMapIsNil, domainStorageMap == nil) + require.Equal(t, tc.expectedReadsFor2ndGetDomainStorageMapCall, ledgerReads) + + // Check underlying ledger reads + require.Equal(t, len(ledgerReadsSet), len(tc.expectedReadsSet)) + for k := range ledgerReadsSet { + require.Contains(t, tc.expectedReadsSet, k) + } + }) + } +} + +func TestGetDomainStorageMapRegisterReadsForV1Account(t *testing.T) { + + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + type getStorageDataFunc func() (storedValues map[string][]byte, StorageIndices map[string]uint64) + + createV1AccountWithDomain := func( + address common.Address, + domain common.StorageDomain, + ) getStorageDataFunc { + return func() (storedValues map[string][]byte, StorageIndices map[string]uint64) { + ledger := NewTestLedger(nil, nil) + + persistentSlabStorage := newSlabStorage(ledger) + + orderedMap, err := atree.NewMap( + persistentSlabStorage, + atree.Address(address), + atree.NewDefaultDigesterBuilder(), + interpreter.EmptyTypeInfo{}, + ) + require.NoError(t, err) + + slabIndex := orderedMap.SlabID().Index() + + for i := range 3 { + + key := interpreter.StringStorageMapKey(strconv.Itoa(i)) + + value := interpreter.NewUnmeteredIntValueFromInt64(int64(i)) + + existingStorable, err := orderedMap.Set( + key.AtreeValueCompare, + key.AtreeValueHashInput, + key.AtreeValue(), + value, + ) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // Commit domain storage map + err = persistentSlabStorage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + + // Create domain register + err = ledger.SetValue(address[:], []byte(domain.Identifier()), slabIndex[:]) + require.NoError(t, err) + + return ledger.StoredValues, ledger.StorageIndices + } + } + + testCases := []struct { + name string + getStorageData getStorageDataFunc + storageFormatV2Enabled bool + domain common.StorageDomain + createIfNotExists bool + expectedDomainStorageMapIsNil bool + expectedReadsFor1stGetDomainStorageMapCall []ownerKeyPair + expectedReadsFor2ndGetDomainStorageMapCall []ownerKeyPair + expectedReadsSet map[string]struct{} + }{ + // Test cases with storageFormatV2Enabled = false + { + name: "storageFormatV2Enabled = false, domain storage map does not exist, createIfNotExists = false", + storageFormatV2Enabled: false, + getStorageData: createV1AccountWithDomain(address, common.StorageDomainPathPublic), + domain: common.StorageDomainPathStorage, + createIfNotExists: false, + expectedDomainStorageMapIsNil: true, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Read domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // Read domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, + }, + }, + { + name: "storageFormatV2Enabled = false, domain storage map does not exist, createIfNotExists = true", + storageFormatV2Enabled: false, + getStorageData: createV1AccountWithDomain(address, common.StorageDomainPathPublic), + domain: common.StorageDomainPathStorage, + createIfNotExists: true, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Read domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading in second GetDomainStorageMap() because + // domain storage map is created and cached in the first + // GetDomainStorageMap(0). + }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, + }, + }, + { + name: "storageFormatV2Enabled = false, domain storage map exists, createIfNotExists = false", + storageFormatV2Enabled: false, + getStorageData: createV1AccountWithDomain(address, common.StorageDomainPathStorage), + domain: common.StorageDomainPathStorage, + createIfNotExists: false, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Read domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + // Read domain storage map register + { + owner: address[:], + key: []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}, + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading in second GetDomainStorageMap() because + // domain storage map is loaded and cached in the first + // GetDomainStorageMap(0). + }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, + string(address[:]) + "|" + string([]byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + }, + }, + { + name: "storageFormatV2Enabled = false, domain storage map exists, createIfNotExists = true", + storageFormatV2Enabled: false, + getStorageData: createV1AccountWithDomain(address, common.StorageDomainPathStorage), + domain: common.StorageDomainPathStorage, + createIfNotExists: true, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Read domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + // Read domain storage map register + { + owner: address[:], + key: []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}, + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading in second GetDomainStorageMap() because + // domain storage map is loaded and cached in the first + // GetDomainStorageMap(0). + }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, + string(address[:]) + "|" + string([]byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + }, + }, + // Test cases with storageFormatV2Enabled = true + { + name: "storageFormatV2Enabled = true, domain storage map does not exist, createIfNotExists = false", + storageFormatV2Enabled: true, + getStorageData: createV1AccountWithDomain(address, common.StorageDomainPathPublic), + domain: common.StorageDomainPathStorage, + createIfNotExists: false, + expectedDomainStorageMapIsNil: true, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Check all domain registers until existing domain register is read + { + owner: address[:], + key: []byte(common.PathDomainStorage.Identifier()), + }, + { + owner: address[:], + key: []byte(common.PathDomainPrivate.Identifier()), + }, + { + owner: address[:], + key: []byte(common.PathDomainPublic.Identifier()), + }, + // Read requested domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // Read requested domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + AccountStorageKey: {}, + string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainPathPrivate.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainPathPublic.Identifier(): {}, + }, + }, + { + name: "storageFormatV2Enabled = true, domain storage map does not exist, createIfNotExists = true", + storageFormatV2Enabled: true, + getStorageData: createV1AccountWithDomain(address, common.StorageDomainPathPublic), + domain: common.StorageDomainPathStorage, + createIfNotExists: true, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Check all domain registers until existing domain register is read + { + owner: address[:], + key: []byte(common.PathDomainStorage.Identifier()), + }, + { + owner: address[:], + key: []byte(common.PathDomainPrivate.Identifier()), + }, + { + owner: address[:], + key: []byte(common.PathDomainPublic.Identifier()), + }, + // Read requested domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading from second GetDomainStorageMap() because + // domain storage map is created and cached in the first + // GetDomainStorageMap(). + }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + AccountStorageKey: {}, + string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainPathPrivate.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainPathPublic.Identifier(): {}, + }, + }, + { + name: "storageFormatV2Enabled = true, domain storage map exists, createIfNotExists = false", + storageFormatV2Enabled: true, + getStorageData: createV1AccountWithDomain(address, common.StorageDomainPathStorage), + domain: common.StorageDomainPathStorage, + createIfNotExists: false, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Check domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + // Read domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + // Read domain storage map register + { + owner: address[:], + key: []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}, + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading from second GetDomainStorageMap() because + // domain storage map is created and cached in the first + // GetDomainStorageMap(). + }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + AccountStorageKey: {}, + string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, + string(address[:]) + "|" + string([]byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + }, + }, + { + name: "storageFormatV2Enabled = true, domain storage map exists, createIfNotExists = true", + storageFormatV2Enabled: true, + getStorageData: createV1AccountWithDomain(address, common.StorageDomainPathStorage), + domain: common.StorageDomainPathStorage, + createIfNotExists: true, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Check given domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + // Read given domain register + { + owner: address[:], + key: []byte(common.StorageDomainPathStorage.Identifier()), + }, + // Read domain storage map register + { + owner: address[:], + key: []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}, + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading from second GetDomainStorageMap() because + // domain storage map is created and cached in the first + // GetDomainStorageMap(). + }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + AccountStorageKey: {}, + string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, + string(address[:]) + "|" + string([]byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + storedValues, storedIndices := tc.getStorageData() + + var ledgerReads []ownerKeyPair + ledgerReadsSet := make(map[string]struct{}) + + ledger := NewTestLedgerWithData( + func(owner, key, _ []byte) { + ledgerReads = append( + ledgerReads, + ownerKeyPair{ + owner: owner, + key: key, + }, + ) + ledgerReadsSet[string(owner)+"|"+string(key)] = struct{}{} + }, + nil, + storedValues, + storedIndices, + ) + + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: tc.storageFormatV2Enabled, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + domainStorageMap := storage.GetDomainStorageMap(inter, address, tc.domain, tc.createIfNotExists) + require.Equal(t, tc.expectedDomainStorageMapIsNil, domainStorageMap == nil) + require.Equal(t, tc.expectedReadsFor1stGetDomainStorageMapCall, ledgerReads) + + ledgerReads = ledgerReads[:0] + + domainStorageMap = storage.GetDomainStorageMap(inter, address, tc.domain, tc.createIfNotExists) + require.Equal(t, tc.expectedDomainStorageMapIsNil, domainStorageMap == nil) + require.Equal(t, tc.expectedReadsFor2ndGetDomainStorageMapCall, ledgerReads) + + // Check underlying ledger reads + require.Equal(t, len(ledgerReadsSet), len(tc.expectedReadsSet)) + for k := range ledgerReadsSet { + require.Contains(t, tc.expectedReadsSet, k) + } + }) + } +} + +func TestGetDomainStorageMapRegisterReadsForV2Account(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + type getStorageDataFunc func() (storedValues map[string][]byte, StorageIndices map[string]uint64) + + createV2AccountWithDomain := func( + address common.Address, + domain common.StorageDomain, + ) getStorageDataFunc { + return func() (storedValues map[string][]byte, StorageIndices map[string]uint64) { + ledger := NewTestLedger(nil, nil) + + persistentSlabStorage := newSlabStorage(ledger) + + accountOrderedMap, err := atree.NewMap( + persistentSlabStorage, + atree.Address(address), + atree.NewDefaultDigesterBuilder(), + interpreter.EmptyTypeInfo{}, + ) + require.NoError(t, err) + + slabIndex := accountOrderedMap.SlabID().Index() + + domainOrderedMap, err := atree.NewMap( + persistentSlabStorage, + atree.Address(address), + atree.NewDefaultDigesterBuilder(), + interpreter.EmptyTypeInfo{}, + ) + require.NoError(t, err) + + domainKey := interpreter.Uint64StorageMapKey(domain) + + existingDomain, err := accountOrderedMap.Set( + domainKey.AtreeValueCompare, + domainKey.AtreeValueHashInput, + domainKey.AtreeValue(), + domainOrderedMap, + ) + require.NoError(t, err) + require.Nil(t, existingDomain) + + for i := range 3 { + + key := interpreter.StringStorageMapKey(strconv.Itoa(i)) + + value := interpreter.NewUnmeteredIntValueFromInt64(int64(i)) + + existingStorable, err := domainOrderedMap.Set( + key.AtreeValueCompare, + key.AtreeValueHashInput, + key.AtreeValue(), + value, + ) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // Commit domain storage map + err = persistentSlabStorage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + + // Create account register + err = ledger.SetValue(address[:], []byte(AccountStorageKey), slabIndex[:]) + require.NoError(t, err) + + return ledger.StoredValues, ledger.StorageIndices + } + } + + testCases := []struct { + name string + getStorageData getStorageDataFunc + domain common.StorageDomain + createIfNotExists bool + expectedDomainStorageMapIsNil bool + expectedReadsFor1stGetDomainStorageMapCall []ownerKeyPair + expectedReadsFor2ndGetDomainStorageMapCall []ownerKeyPair + expectedReadsSet map[string]struct{} + }{ + { + name: "domain storage map does not exist, createIfNotExists = false", + getStorageData: createV2AccountWithDomain(address, common.StorageDomainPathPublic), + domain: common.StorageDomainPathStorage, + createIfNotExists: false, + expectedDomainStorageMapIsNil: true, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Read account register + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Read account storage map + { + owner: address[:], + key: []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}, + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading from second GetDomainStorageMap because + // account storage map is loaded and cached from first + // GetDomainStorageMap(). + }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + AccountStorageKey: {}, + string(address[:]) + "|" + string([]byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + }, + }, + { + name: "domain storage map does not exist, createIfNotExists = true", + getStorageData: createV2AccountWithDomain(address, common.StorageDomainPathPublic), + domain: common.StorageDomainPathStorage, + createIfNotExists: true, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Read account register + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Read account storage map + { + owner: address[:], + key: []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}, + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading from second GetDomainStorageMap() because + // domain storage map is created and cached in the first + // GetDomainStorageMap(). + }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + AccountStorageKey: {}, + string(address[:]) + "|" + string([]byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + }, + }, + { + name: "domain storage map exists, createIfNotExists = false", + getStorageData: createV2AccountWithDomain(address, common.StorageDomainPathStorage), + domain: common.StorageDomainPathStorage, + createIfNotExists: false, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Read account register + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Read account storage map + { + owner: address[:], + key: []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}, + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading from second GetDomainStorageMap() because + // domain storage map is created and cached in the first + // GetDomainStorageMap(). + }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + AccountStorageKey: {}, + string(address[:]) + "|" + string([]byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + }, + }, + { + name: "domain storage map exists, createIfNotExists = true", + getStorageData: createV2AccountWithDomain(address, common.StorageDomainPathStorage), + domain: common.StorageDomainPathStorage, + createIfNotExists: true, + expectedDomainStorageMapIsNil: false, + expectedReadsFor1stGetDomainStorageMapCall: []ownerKeyPair{ + // Check if account is v2 + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Read account register + { + owner: address[:], + key: []byte(AccountStorageKey), + }, + // Read account storage map + { + owner: address[:], + key: []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}, + }, + }, + expectedReadsFor2ndGetDomainStorageMapCall: []ownerKeyPair{ + // No register reading from second GetDomainStorageMap() because + // domain storage map is created and cached in the first + // GetDomainStorageMap(). + }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + AccountStorageKey: {}, + string(address[:]) + "|" + string([]byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + storedValues, storedIndices := tc.getStorageData() + + var ledgerReads []ownerKeyPair + ledgerReadsSet := make(map[string]struct{}) + + ledger := NewTestLedgerWithData( + func(owner, key, _ []byte) { + ledgerReads = append( + ledgerReads, + ownerKeyPair{ + owner: owner, + key: key, + }, + ) + ledgerReadsSet[string(owner)+"|"+string(key)] = struct{}{} + }, + nil, + storedValues, + storedIndices, + ) + + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: true, + }, + ) + + inter := NewTestInterpreterWithStorage(t, storage) + + domainStorageMap := storage.GetDomainStorageMap(inter, address, tc.domain, tc.createIfNotExists) + require.Equal(t, tc.expectedDomainStorageMapIsNil, domainStorageMap == nil) + require.Equal(t, tc.expectedReadsFor1stGetDomainStorageMapCall, ledgerReads) + + ledgerReads = ledgerReads[:0] + + domainStorageMap = storage.GetDomainStorageMap(inter, address, tc.domain, tc.createIfNotExists) + require.Equal(t, tc.expectedDomainStorageMapIsNil, domainStorageMap == nil) + require.Equal(t, tc.expectedReadsFor2ndGetDomainStorageMapCall, ledgerReads) + + // Check underlying ledger reads + require.Equal(t, len(ledgerReadsSet), len(tc.expectedReadsSet)) + for k := range ledgerReadsSet { + require.Contains(t, tc.expectedReadsSet, k) + } + }) + } +} + // createAndWriteAccountStorageMap creates account storage map with given domains and writes random values to domain storage map. func createAndWriteAccountStorageMap( t testing.TB, @@ -8207,3 +9143,35 @@ func checkAccountStorageMapData( require.Equal(tb, 1, len(rootSlabIDs)) require.Contains(tb, rootSlabIDs, accountSlabID) } + +func newSlabStorage(ledger atree.Ledger) *atree.PersistentSlabStorage { + decodeStorable := func( + decoder *cbor.StreamDecoder, + slabID atree.SlabID, + inlinedExtraData []atree.ExtraData, + ) ( + atree.Storable, + error, + ) { + return interpreter.DecodeStorable( + decoder, + slabID, + inlinedExtraData, + nil, + ) + } + + decodeTypeInfo := func(decoder *cbor.StreamDecoder) (atree.TypeInfo, error) { + return interpreter.DecodeTypeInfo(decoder, nil) + } + + ledgerStorage := atree.NewLedgerBaseStorage(ledger) + + return atree.NewPersistentSlabStorage( + ledgerStorage, + interpreter.CBOREncMode, + interpreter.CBORDecMode, + decodeStorable, + decodeTypeInfo, + ) +} From 9af92f66fac1926eea058c06c2f9bcc7cd3bd7a3 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 25 Nov 2024 10:38:15 -0600 Subject: [PATCH 42/52] Update unit tests to check unique register reads Currently, tests for register reads only check sequence of register reading. However, same register can be read multiple times and all register reads are included in the sequence. This commit updates tests to explicitly checks unique registers being read in addition to sequence of registers being read. --- runtime/storage_test.go | 99 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 99 insertions(+) diff --git a/runtime/storage_test.go b/runtime/storage_test.go index ab5640fe81..9b00223849 100644 --- a/runtime/storage_test.go +++ b/runtime/storage_test.go @@ -8096,6 +8096,7 @@ func TestGetDomainStorageMapRegisterReadsForNewAccount(t *testing.T) { expectedDomainStorageMapIsNil bool expectedReadsFor1stGetDomainStorageMapCall []ownerKeyPair expectedReadsFor2ndGetDomainStorageMapCall []ownerKeyPair + expectedReadsSet map[string]struct{} }{ // Test cases with storageFormatV2Enabled = false { @@ -8118,6 +8119,9 @@ func TestGetDomainStorageMapRegisterReadsForNewAccount(t *testing.T) { key: []byte(common.StorageDomainPathStorage.Identifier()), }, }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, + }, }, { name: "storageFormatV2Enabled = false, domain storage map does not exist, createIfNotExists = true", @@ -8136,6 +8140,9 @@ func TestGetDomainStorageMapRegisterReadsForNewAccount(t *testing.T) { // No register reads from the second GetDomainStorageMap() because // domain storage map is created and cached in the first GetDomainStorageMap(). }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, + }, }, // Test cases with storageFormatV2Enabled = true { @@ -8171,6 +8178,10 @@ func TestGetDomainStorageMapRegisterReadsForNewAccount(t *testing.T) { key: []byte(common.StorageDomainPathStorage.Identifier()), }, }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + AccountStorageKey: {}, + string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, + }, }, { name: "storageFormatV2Enabled = true, domain storage map does not exist, createIfNotExists = true", @@ -8236,6 +8247,18 @@ func TestGetDomainStorageMapRegisterReadsForNewAccount(t *testing.T) { // No register reads from the second GetDomainStorageMap() because // domain storage map is created and cached in the first GetDomainStorageMap(). }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + AccountStorageKey: {}, + string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainPathPrivate.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainPathPublic.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainContract.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainInbox.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainCapabilityController.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainCapabilityControllerTag.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainPathCapability.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainAccountCapability.Identifier(): {}, + }, }, } @@ -8243,6 +8266,7 @@ func TestGetDomainStorageMapRegisterReadsForNewAccount(t *testing.T) { t.Run(tc.name, func(t *testing.T) { var ledgerReads []ownerKeyPair + ledgerReadsSet := make(map[string]struct{}) // Create empty storage ledger := NewTestLedger( @@ -8254,6 +8278,7 @@ func TestGetDomainStorageMapRegisterReadsForNewAccount(t *testing.T) { key: key, }, ) + ledgerReadsSet[string(owner)+"|"+string(key)] = struct{}{} }, nil) @@ -8278,6 +8303,12 @@ func TestGetDomainStorageMapRegisterReadsForNewAccount(t *testing.T) { domainStorageMap = storage.GetDomainStorageMap(inter, address, tc.domain, tc.createIfNotExists) require.Equal(t, tc.expectedDomainStorageMapIsNil, domainStorageMap == nil) require.Equal(t, tc.expectedReadsFor2ndGetDomainStorageMapCall, ledgerReads) + + // Check underlying ledger reads + require.Equal(t, len(ledgerReadsSet), len(tc.expectedReadsSet)) + for k := range ledgerReadsSet { + require.Contains(t, tc.expectedReadsSet, k) + } }) } } @@ -8346,6 +8377,7 @@ func TestGetDomainStorageMapRegisterReadsForV1Account(t *testing.T) { expectedDomainStorageMapIsNil bool expectedReadsFor1stGetDomainStorageMapCall []ownerKeyPair expectedReadsFor2ndGetDomainStorageMapCall []ownerKeyPair + expectedReadsSet map[string]struct{} }{ // Test cases with storageFormatV2Enabled = false { @@ -8369,6 +8401,9 @@ func TestGetDomainStorageMapRegisterReadsForV1Account(t *testing.T) { key: []byte(common.StorageDomainPathStorage.Identifier()), }, }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, + }, }, { name: "storageFormatV2Enabled = false, domain storage map does not exist, createIfNotExists = true", @@ -8389,6 +8424,9 @@ func TestGetDomainStorageMapRegisterReadsForV1Account(t *testing.T) { // domain storage map is created and cached in the first // GetDomainStorageMap(0). }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, + }, }, { name: "storageFormatV2Enabled = false, domain storage map exists, createIfNotExists = false", @@ -8414,6 +8452,10 @@ func TestGetDomainStorageMapRegisterReadsForV1Account(t *testing.T) { // domain storage map is loaded and cached in the first // GetDomainStorageMap(0). }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, + string(address[:]) + "|" + string([]byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + }, }, { name: "storageFormatV2Enabled = false, domain storage map exists, createIfNotExists = true", @@ -8439,6 +8481,10 @@ func TestGetDomainStorageMapRegisterReadsForV1Account(t *testing.T) { // domain storage map is loaded and cached in the first // GetDomainStorageMap(0). }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, + string(address[:]) + "|" + string([]byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + }, }, // Test cases with storageFormatV2Enabled = true { @@ -8472,6 +8518,10 @@ func TestGetDomainStorageMapRegisterReadsForV1Account(t *testing.T) { key: []byte(common.StorageDomainPathStorage.Identifier()), }, }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + AccountStorageKey: {}, + string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, + }, }, { name: "storageFormatV2Enabled = true, domain storage map does not exist, createIfNotExists = true", @@ -8515,6 +8565,12 @@ func TestGetDomainStorageMapRegisterReadsForV1Account(t *testing.T) { // domain storage map is created and cached in the first // GetDomainStorageMap(). }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + AccountStorageKey: {}, + string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainPathPrivate.Identifier(): {}, + string(address[:]) + "|" + common.StorageDomainPathPublic.Identifier(): {}, + }, }, { name: "storageFormatV2Enabled = true, domain storage map exists, createIfNotExists = false", @@ -8550,6 +8606,11 @@ func TestGetDomainStorageMapRegisterReadsForV1Account(t *testing.T) { // domain storage map is created and cached in the first // GetDomainStorageMap(). }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + AccountStorageKey: {}, + string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, + string(address[:]) + "|" + string([]byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + }, }, { name: "storageFormatV2Enabled = true, domain storage map exists, createIfNotExists = true", @@ -8585,6 +8646,11 @@ func TestGetDomainStorageMapRegisterReadsForV1Account(t *testing.T) { // domain storage map is created and cached in the first // GetDomainStorageMap(). }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + AccountStorageKey: {}, + string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, + string(address[:]) + "|" + string([]byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + }, }, } @@ -8594,6 +8660,7 @@ func TestGetDomainStorageMapRegisterReadsForV1Account(t *testing.T) { storedValues, storedIndices := tc.getStorageData() var ledgerReads []ownerKeyPair + ledgerReadsSet := make(map[string]struct{}) ledger := NewTestLedgerWithData( func(owner, key, _ []byte) { @@ -8604,6 +8671,7 @@ func TestGetDomainStorageMapRegisterReadsForV1Account(t *testing.T) { key: key, }, ) + ledgerReadsSet[string(owner)+"|"+string(key)] = struct{}{} }, nil, storedValues, @@ -8629,6 +8697,12 @@ func TestGetDomainStorageMapRegisterReadsForV1Account(t *testing.T) { domainStorageMap = storage.GetDomainStorageMap(inter, address, tc.domain, tc.createIfNotExists) require.Equal(t, tc.expectedDomainStorageMapIsNil, domainStorageMap == nil) require.Equal(t, tc.expectedReadsFor2ndGetDomainStorageMapCall, ledgerReads) + + // Check underlying ledger reads + require.Equal(t, len(ledgerReadsSet), len(tc.expectedReadsSet)) + for k := range ledgerReadsSet { + require.Contains(t, tc.expectedReadsSet, k) + } }) } } @@ -8714,6 +8788,7 @@ func TestGetDomainStorageMapRegisterReadsForV2Account(t *testing.T) { expectedDomainStorageMapIsNil bool expectedReadsFor1stGetDomainStorageMapCall []ownerKeyPair expectedReadsFor2ndGetDomainStorageMapCall []ownerKeyPair + expectedReadsSet map[string]struct{} }{ { name: "domain storage map does not exist, createIfNotExists = false", @@ -8743,6 +8818,10 @@ func TestGetDomainStorageMapRegisterReadsForV2Account(t *testing.T) { // account storage map is loaded and cached from first // GetDomainStorageMap(). }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + AccountStorageKey: {}, + string(address[:]) + "|" + string([]byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + }, }, { name: "domain storage map does not exist, createIfNotExists = true", @@ -8772,6 +8851,10 @@ func TestGetDomainStorageMapRegisterReadsForV2Account(t *testing.T) { // domain storage map is created and cached in the first // GetDomainStorageMap(). }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + AccountStorageKey: {}, + string(address[:]) + "|" + string([]byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + }, }, { name: "domain storage map exists, createIfNotExists = false", @@ -8801,6 +8884,10 @@ func TestGetDomainStorageMapRegisterReadsForV2Account(t *testing.T) { // domain storage map is created and cached in the first // GetDomainStorageMap(). }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + AccountStorageKey: {}, + string(address[:]) + "|" + string([]byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + }, }, { name: "domain storage map exists, createIfNotExists = true", @@ -8830,6 +8917,10 @@ func TestGetDomainStorageMapRegisterReadsForV2Account(t *testing.T) { // domain storage map is created and cached in the first // GetDomainStorageMap(). }, + expectedReadsSet: map[string]struct{}{ + string(address[:]) + "|" + AccountStorageKey: {}, + string(address[:]) + "|" + string([]byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + }, }, } @@ -8839,6 +8930,7 @@ func TestGetDomainStorageMapRegisterReadsForV2Account(t *testing.T) { storedValues, storedIndices := tc.getStorageData() var ledgerReads []ownerKeyPair + ledgerReadsSet := make(map[string]struct{}) ledger := NewTestLedgerWithData( func(owner, key, _ []byte) { @@ -8849,6 +8941,7 @@ func TestGetDomainStorageMapRegisterReadsForV2Account(t *testing.T) { key: key, }, ) + ledgerReadsSet[string(owner)+"|"+string(key)] = struct{}{} }, nil, storedValues, @@ -8874,6 +8967,12 @@ func TestGetDomainStorageMapRegisterReadsForV2Account(t *testing.T) { domainStorageMap = storage.GetDomainStorageMap(inter, address, tc.domain, tc.createIfNotExists) require.Equal(t, tc.expectedDomainStorageMapIsNil, domainStorageMap == nil) require.Equal(t, tc.expectedReadsFor2ndGetDomainStorageMapCall, ledgerReads) + + // Check underlying ledger reads + require.Equal(t, len(ledgerReadsSet), len(tc.expectedReadsSet)) + for k := range ledgerReadsSet { + require.Contains(t, tc.expectedReadsSet, k) + } }) } } From c7cf0127ec09a18d10715ac456248855a4f4a82a Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 25 Nov 2024 13:11:36 -0600 Subject: [PATCH 43/52] Refactor getAccountStorageFormat() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Bastian Müller --- runtime/storage.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/runtime/storage.go b/runtime/storage.go index aab2f1daba..9bef9d5e72 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -301,12 +301,16 @@ func (s *Storage) getAccountStorageFormat( return storageFormatNew } -func (s *Storage) getCachedAccountFormat(address common.Address) (isV1 bool, isV2 bool) { +func (s *Storage) getCachedAccountFormat(address common.Address) (format storageFormat, known bool) { isV1, cached := s.cachedV1Accounts[address] if !cached { - return false, false + return storageFormatUnknown, false + } + if isV1 { + return storageFormatV1, true + } else { + return StorageFormatV2, true } - return isV1, !isV1 } // isV2Account returns true if given account is in account storage format v2. From 426a5489e36305d86763e55641144cf7611027f6 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 25 Nov 2024 13:14:48 -0600 Subject: [PATCH 44/52] Use refactored getCachedAccountFormat() --- runtime/storage.go | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/runtime/storage.go b/runtime/storage.go index 9bef9d5e72..6f811f2e68 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -264,14 +264,9 @@ func (s *Storage) getAccountStorageFormat( // Return cached account format (no register reading). - isCachedV1, isCachedV2 := s.getCachedAccountFormat(address) - - if isCachedV1 { - return storageFormatV1 - } - - if isCachedV2 { - return StorageFormatV2 + cachedFormat, known := s.getCachedAccountFormat(address) + if known { + return cachedFormat } // Check if account is v2 (by reading "stored" register). From 42d6b261f0e0048c66059a1f5a28b4ec95b51183 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 25 Nov 2024 15:44:50 -0600 Subject: [PATCH 45/52] Rename storageFormat constant --- runtime/storage.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/runtime/storage.go b/runtime/storage.go index 6f811f2e68..0b8c8ec493 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -46,7 +46,7 @@ const ( storageFormatUnknown storageFormat = iota storageFormatNew storageFormatV1 - StorageFormatV2 + storageFormatV2 ) type Storage struct { @@ -211,7 +211,7 @@ func (s *Storage) GetDomainStorageMap( s.cacheIsV1Account(address, true) - case StorageFormatV2, storageFormatNew: + case storageFormatV2, storageFormatNew: domainStorageMap = s.AccountStorageV2.GetDomainStorageMap( inter, address, @@ -272,7 +272,7 @@ func (s *Storage) getAccountStorageFormat( // Check if account is v2 (by reading "stored" register). if s.isV2Account(address) { - return StorageFormatV2 + return storageFormatV2 } // Check if account is v1 (by reading given domain register). @@ -304,7 +304,7 @@ func (s *Storage) getCachedAccountFormat(address common.Address) (format storage if isV1 { return storageFormatV1, true } else { - return StorageFormatV2, true + return storageFormatV2, true } } From 1568414631ed1926dce972de443e981f70cd809d Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 25 Nov 2024 16:23:06 -0600 Subject: [PATCH 46/52] Refactor Storage.GetDomainStorageMap() --- runtime/storage.go | 194 ++++++++++++++++++++++++--------------------- 1 file changed, 105 insertions(+), 89 deletions(-) diff --git a/runtime/storage.go b/runtime/storage.go index 0b8c8ec493..9f6e301327 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -44,7 +44,6 @@ type storageFormat uint8 const ( storageFormatUnknown storageFormat = iota - storageFormatNew storageFormatV1 storageFormatV2 ) @@ -173,127 +172,144 @@ func (s *Storage) GetDomainStorageMap( // When StorageFormatV2 is disabled, handle all accounts as v1 accounts. - domainStorageMap = s.AccountStorageV1.GetDomainStorageMap( + // Only read requested domain register. + + domainStorageMap = s.getDomainStorageMapForV1Account( address, domain, createIfNotExists, ) - if domainStorageMap != nil { - s.cacheIsV1Account(address, true) - } + return + } - } else { + // StorageFormatV2 is enabled. - // StorageFormatV2 is enabled. + // Check if cached account format is available. - onlyCheckSpecifiedDomainForV1 := !createIfNotExists - format := s.getAccountStorageFormat(address, domain, onlyCheckSpecifiedDomainForV1) + cachedFormat, known := s.getCachedAccountFormat(address) + if known { + return s.getDomainStorageMap( + cachedFormat, + inter, + address, + domain, + createIfNotExists, + ) + } - switch format { + // Check if account is v2 (by reading "stored" register). - case storageFormatUnknown: - // storageFormatUnknown is returned when !createIfNotExists - // and domain register doesn't exist. + if s.isV2Account(address) { + return s.getDomainStorageMapForV2Account( + inter, + address, + domain, + createIfNotExists, + ) + } - if createIfNotExists { - panic(errors.NewUnreachableError()) - } + // Check if account is v1 (by reading requested domain register). - domainStorageMap = nil + if s.hasDomainRegister(address, domain) { + return s.getDomainStorageMapForV1Account( + address, + domain, + createIfNotExists, + ) + } - case storageFormatV1: - domainStorageMap = s.AccountStorageV1.GetDomainStorageMap( - address, - domain, - createIfNotExists, - ) + // Domain register doesn't exist. - s.cacheIsV1Account(address, true) + // Return early if !createIfNotExists to avoid more register reading. - case storageFormatV2, storageFormatNew: - domainStorageMap = s.AccountStorageV2.GetDomainStorageMap( - inter, - address, - domain, - createIfNotExists, - ) + if !createIfNotExists { + return nil + } - s.cacheIsV1Account(address, false) + // At this point, account is either new account or v1 account without requested domain register. - default: - panic(errors.NewUnreachableError()) - } + // Check if account is v1 (by reading more domain registers) + + if s.isV1Account(address) { + return s.getDomainStorageMapForV1Account( + address, + domain, + createIfNotExists, + ) } - return domainStorageMap + // New account is treated as v2 account when feature flag is enabled. + + return s.getDomainStorageMapForV2Account( + inter, + address, + domain, + createIfNotExists, + ) } -// getAccountStorageFormat returns storageFormat for the given account. -// This function determines account format by reading registers. -// If onlyCheckSpecifiedDomainForV1 is true, only the given domain -// register is checked to determine if account format is v1. If -// domain register doesn't exist, StorageFormatUnknown is returned. -// -// When StorageFormatV2 is disabled: -// - No register reading (accounts are assumed to be in v1 format). -// -// When StorageFormatV2 is enabled: -// - For v2 accounts, "stored" register is read. -// -// - For v1 accounts, -// - If onlyCheckSpecifiedDomainForV1 is true, -// "stored" register and given domain register are read. -// - If onlyCheckSpecifiedDomainForV1 is false and given domain exists, -// "stored" register and given domain register are read. -// - If onlyCheckSpecifiedDomainForV1 is false and given domain doesn't exist, -// "stored" register, given domain register, and all domain registers are read. -// -// - For new accounts, "stored" register, given domain register, and all domain registers are read. -func (s *Storage) getAccountStorageFormat( +func (s *Storage) getDomainStorageMapForV1Account( address common.Address, domain common.StorageDomain, - onlyCheckSpecifiedDomainForV1 bool, -) (format storageFormat) { - - // All accounts are assumed to be in v1 format when StorageFormatV2 is disabled. - - if !s.Config.StorageFormatV2Enabled { - return storageFormatV1 - } - - // Return cached account format (no register reading). + createIfNotExists bool, +) *interpreter.DomainStorageMap { + domainStorageMap := s.AccountStorageV1.GetDomainStorageMap( + address, + domain, + createIfNotExists, + ) - cachedFormat, known := s.getCachedAccountFormat(address) - if known { - return cachedFormat - } + s.cacheIsV1Account(address, true) - // Check if account is v2 (by reading "stored" register). + return domainStorageMap +} - if s.isV2Account(address) { - return storageFormatV2 - } +func (s *Storage) getDomainStorageMapForV2Account( + inter *interpreter.Interpreter, + address common.Address, + domain common.StorageDomain, + createIfNotExists bool, +) *interpreter.DomainStorageMap { + domainStorageMap := s.AccountStorageV2.GetDomainStorageMap( + inter, + address, + domain, + createIfNotExists, + ) - // Check if account is v1 (by reading given domain register). + s.cacheIsV1Account(address, false) - if s.hasDomainRegister(address, domain) { - return storageFormatV1 - } + return domainStorageMap +} - // Return early if onlyCheckSpecifiedDomainForV1 to prevent more register reading. +func (s *Storage) getDomainStorageMap( + format storageFormat, + inter *interpreter.Interpreter, + address common.Address, + domain common.StorageDomain, + createIfNotExists bool, +) *interpreter.DomainStorageMap { + switch format { - if onlyCheckSpecifiedDomainForV1 { - return storageFormatUnknown - } + case storageFormatV1: + return s.getDomainStorageMapForV1Account( + address, + domain, + createIfNotExists, + ) - // At this point, account is either new account or v1 account without given domain register. + case storageFormatV2: + return s.getDomainStorageMapForV2Account( + inter, + address, + domain, + createIfNotExists, + ) - if s.isV1Account(address) { - return storageFormatV1 + default: + panic(errors.NewUnreachableError()) } - - return storageFormatNew } func (s *Storage) getCachedAccountFormat(address common.Address) (format storageFormat, known bool) { From c8a61764cba5f2456b738adfd10fad5673b90579 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20M=C3=BCller?= Date: Mon, 25 Nov 2024 14:50:33 -0800 Subject: [PATCH 47/52] add return value to migration scheduling functions --- runtime/storage.go | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/runtime/storage.go b/runtime/storage.go index 8198e69fa1..d22c1cd62c 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -370,16 +370,24 @@ func (s *Storage) commit(inter *interpreter.Interpreter, commitContractUpdates b } } -func (s *Storage) ScheduleV2Migration(address common.Address) { +func (s *Storage) ScheduleV2Migration(address common.Address) bool { + if !s.Config.StorageFormatV2Enabled { + return false + } s.scheduledV2Migrations = append(s.scheduledV2Migrations, address) + return true } -func (s *Storage) ScheduleV2MigrationForModifiedAccounts() { +func (s *Storage) ScheduleV2MigrationForModifiedAccounts() bool { for address, isV1 := range s.cachedV1Accounts { //nolint:maprange if isV1 && s.PersistentSlabStorage.HasUnsavedChanges(atree.Address(address)) { - s.ScheduleV2Migration(address) + if !s.ScheduleV2Migration(address) { + return false + } } } + + return true } func (s *Storage) migrateV1AccountsToV2(inter *interpreter.Interpreter) error { From 7772eb085ca44dd60c194b21762b126939356fde Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 25 Nov 2024 16:51:27 -0600 Subject: [PATCH 48/52] Add test helper function --- runtime/storage_test.go | 114 ++++++++++++++++++++++------------------ 1 file changed, 64 insertions(+), 50 deletions(-) diff --git a/runtime/storage_test.go b/runtime/storage_test.go index c0c8c0d3fb..d1e24cfd9f 100644 --- a/runtime/storage_test.go +++ b/runtime/storage_test.go @@ -8120,7 +8120,7 @@ func TestGetDomainStorageMapRegisterReadsForNewAccount(t *testing.T) { }, }, expectedReadsSet: map[string]struct{}{ - string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {}, }, }, { @@ -8141,7 +8141,7 @@ func TestGetDomainStorageMapRegisterReadsForNewAccount(t *testing.T) { // domain storage map is created and cached in the first GetDomainStorageMap(). }, expectedReadsSet: map[string]struct{}{ - string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {}, }, }, // Test cases with storageFormatV2Enabled = true @@ -8211,16 +8211,16 @@ func TestGetDomainStorageMapRegisterReadsForNewAccount(t *testing.T) { }, }, expectedReadsSet: map[string]struct{}{ - string(address[:]) + "|" + AccountStorageKey: {}, - string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, - string(address[:]) + "|" + common.StorageDomainPathPrivate.Identifier(): {}, - string(address[:]) + "|" + common.StorageDomainPathPublic.Identifier(): {}, - string(address[:]) + "|" + common.StorageDomainContract.Identifier(): {}, - string(address[:]) + "|" + common.StorageDomainInbox.Identifier(): {}, - string(address[:]) + "|" + common.StorageDomainCapabilityController.Identifier(): {}, - string(address[:]) + "|" + common.StorageDomainCapabilityControllerTag.Identifier(): {}, - string(address[:]) + "|" + common.StorageDomainPathCapability.Identifier(): {}, - string(address[:]) + "|" + common.StorageDomainAccountCapability.Identifier(): {}, + concatRegisterAddressAndKey(address, []byte(AccountStorageKey)): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathPrivate): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathPublic): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainContract): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainInbox): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainCapabilityController): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainCapabilityControllerTag): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathCapability): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainAccountCapability): {}, }, }, { @@ -8283,16 +8283,16 @@ func TestGetDomainStorageMapRegisterReadsForNewAccount(t *testing.T) { // domain storage map is created and cached in the first GetDomainStorageMap(). }, expectedReadsSet: map[string]struct{}{ - string(address[:]) + "|" + AccountStorageKey: {}, - string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, - string(address[:]) + "|" + common.StorageDomainPathPrivate.Identifier(): {}, - string(address[:]) + "|" + common.StorageDomainPathPublic.Identifier(): {}, - string(address[:]) + "|" + common.StorageDomainContract.Identifier(): {}, - string(address[:]) + "|" + common.StorageDomainInbox.Identifier(): {}, - string(address[:]) + "|" + common.StorageDomainCapabilityController.Identifier(): {}, - string(address[:]) + "|" + common.StorageDomainCapabilityControllerTag.Identifier(): {}, - string(address[:]) + "|" + common.StorageDomainPathCapability.Identifier(): {}, - string(address[:]) + "|" + common.StorageDomainAccountCapability.Identifier(): {}, + concatRegisterAddressAndKey(address, []byte(AccountStorageKey)): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathPrivate): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathPublic): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainContract): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainInbox): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainCapabilityController): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainCapabilityControllerTag): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathCapability): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainAccountCapability): {}, }, }, } @@ -8437,7 +8437,7 @@ func TestGetDomainStorageMapRegisterReadsForV1Account(t *testing.T) { }, }, expectedReadsSet: map[string]struct{}{ - string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {}, }, }, { @@ -8460,7 +8460,7 @@ func TestGetDomainStorageMapRegisterReadsForV1Account(t *testing.T) { // GetDomainStorageMap(0). }, expectedReadsSet: map[string]struct{}{ - string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {}, }, }, { @@ -8488,8 +8488,8 @@ func TestGetDomainStorageMapRegisterReadsForV1Account(t *testing.T) { // GetDomainStorageMap(0). }, expectedReadsSet: map[string]struct{}{ - string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, - string(address[:]) + "|" + string([]byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {}, + concatRegisterAddressAndKey(address, []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, }, }, { @@ -8517,8 +8517,8 @@ func TestGetDomainStorageMapRegisterReadsForV1Account(t *testing.T) { // GetDomainStorageMap(0). }, expectedReadsSet: map[string]struct{}{ - string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, - string(address[:]) + "|" + string([]byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {}, + concatRegisterAddressAndKey(address, []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, }, }, // Test cases with storageFormatV2Enabled = true @@ -8562,10 +8562,10 @@ func TestGetDomainStorageMapRegisterReadsForV1Account(t *testing.T) { }, }, expectedReadsSet: map[string]struct{}{ - string(address[:]) + "|" + AccountStorageKey: {}, - string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, - string(address[:]) + "|" + common.StorageDomainPathPrivate.Identifier(): {}, - string(address[:]) + "|" + common.StorageDomainPathPublic.Identifier(): {}, + concatRegisterAddressAndKey(address, []byte(AccountStorageKey)): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathPrivate): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathPublic): {}, }, }, { @@ -8606,10 +8606,10 @@ func TestGetDomainStorageMapRegisterReadsForV1Account(t *testing.T) { // GetDomainStorageMap(). }, expectedReadsSet: map[string]struct{}{ - string(address[:]) + "|" + AccountStorageKey: {}, - string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, - string(address[:]) + "|" + common.StorageDomainPathPrivate.Identifier(): {}, - string(address[:]) + "|" + common.StorageDomainPathPublic.Identifier(): {}, + concatRegisterAddressAndKey(address, []byte(AccountStorageKey)): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathPrivate): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathPublic): {}, }, }, { @@ -8647,9 +8647,9 @@ func TestGetDomainStorageMapRegisterReadsForV1Account(t *testing.T) { // GetDomainStorageMap(). }, expectedReadsSet: map[string]struct{}{ - string(address[:]) + "|" + AccountStorageKey: {}, - string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, - string(address[:]) + "|" + string([]byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + concatRegisterAddressAndKey(address, []byte(AccountStorageKey)): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {}, + concatRegisterAddressAndKey(address, []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, }, }, { @@ -8687,9 +8687,9 @@ func TestGetDomainStorageMapRegisterReadsForV1Account(t *testing.T) { // GetDomainStorageMap(). }, expectedReadsSet: map[string]struct{}{ - string(address[:]) + "|" + AccountStorageKey: {}, - string(address[:]) + "|" + common.StorageDomainPathStorage.Identifier(): {}, - string(address[:]) + "|" + string([]byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + concatRegisterAddressAndKey(address, []byte(AccountStorageKey)): {}, + concatRegisterAddressAndDomain(address, common.StorageDomainPathStorage): {}, + concatRegisterAddressAndKey(address, []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, }, }, } @@ -8859,8 +8859,8 @@ func TestGetDomainStorageMapRegisterReadsForV2Account(t *testing.T) { // GetDomainStorageMap(). }, expectedReadsSet: map[string]struct{}{ - string(address[:]) + "|" + AccountStorageKey: {}, - string(address[:]) + "|" + string([]byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + concatRegisterAddressAndKey(address, []byte(AccountStorageKey)): {}, + concatRegisterAddressAndKey(address, []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, }, }, { @@ -8892,8 +8892,8 @@ func TestGetDomainStorageMapRegisterReadsForV2Account(t *testing.T) { // GetDomainStorageMap(). }, expectedReadsSet: map[string]struct{}{ - string(address[:]) + "|" + AccountStorageKey: {}, - string(address[:]) + "|" + string([]byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + concatRegisterAddressAndKey(address, []byte(AccountStorageKey)): {}, + concatRegisterAddressAndKey(address, []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, }, }, { @@ -8925,8 +8925,8 @@ func TestGetDomainStorageMapRegisterReadsForV2Account(t *testing.T) { // GetDomainStorageMap(). }, expectedReadsSet: map[string]struct{}{ - string(address[:]) + "|" + AccountStorageKey: {}, - string(address[:]) + "|" + string([]byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + concatRegisterAddressAndKey(address, []byte(AccountStorageKey)): {}, + concatRegisterAddressAndKey(address, []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, }, }, { @@ -8958,8 +8958,8 @@ func TestGetDomainStorageMapRegisterReadsForV2Account(t *testing.T) { // GetDomainStorageMap(). }, expectedReadsSet: map[string]struct{}{ - string(address[:]) + "|" + AccountStorageKey: {}, - string(address[:]) + "|" + string([]byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, + concatRegisterAddressAndKey(address, []byte(AccountStorageKey)): {}, + concatRegisterAddressAndKey(address, []byte{'$', 0, 0, 0, 0, 0, 0, 0, 1}): {}, }, }, } @@ -9175,3 +9175,17 @@ func newSlabStorage(ledger atree.Ledger) *atree.PersistentSlabStorage { decodeTypeInfo, ) } + +func concatRegisterAddressAndKey( + address common.Address, + key []byte, +) string { + return string(address[:]) + "|" + string(key) +} + +func concatRegisterAddressAndDomain( + address common.Address, + domain common.StorageDomain, +) string { + return string(address[:]) + "|" + domain.Identifier() +} From ba76c1111ddc6f985953c2d56f969a528abdf905 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 25 Nov 2024 16:59:00 -0600 Subject: [PATCH 49/52] Extract new function NewPersistentSlabStorage() --- runtime/storage.go | 16 ++++++++++++---- runtime/storage_test.go | 37 ++----------------------------------- 2 files changed, 14 insertions(+), 39 deletions(-) diff --git a/runtime/storage.go b/runtime/storage.go index 8198e69fa1..49aaa92b4a 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -69,11 +69,10 @@ type Storage struct { var _ atree.SlabStorage = &Storage{} var _ interpreter.Storage = &Storage{} -func NewStorage( +func NewPersistentSlabStorage( ledger atree.Ledger, memoryGauge common.MemoryGauge, - config StorageConfig, -) *Storage { +) *atree.PersistentSlabStorage { decodeStorable := func( decoder *cbor.StreamDecoder, slabID atree.SlabID, @@ -95,13 +94,22 @@ func NewStorage( } ledgerStorage := atree.NewLedgerBaseStorage(ledger) - persistentSlabStorage := atree.NewPersistentSlabStorage( + + return atree.NewPersistentSlabStorage( ledgerStorage, interpreter.CBOREncMode, interpreter.CBORDecMode, decodeStorable, decodeTypeInfo, ) +} + +func NewStorage( + ledger atree.Ledger, + memoryGauge common.MemoryGauge, + config StorageConfig, +) *Storage { + persistentSlabStorage := NewPersistentSlabStorage(ledger, memoryGauge) accountStorageV1 := NewAccountStorageV1( ledger, diff --git a/runtime/storage_test.go b/runtime/storage_test.go index d1e24cfd9f..f0bbaef75b 100644 --- a/runtime/storage_test.go +++ b/runtime/storage_test.go @@ -30,7 +30,6 @@ import ( "strings" "testing" - "github.com/fxamacker/cbor/v2" "github.com/onflow/atree" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -8363,7 +8362,7 @@ func TestGetDomainStorageMapRegisterReadsForV1Account(t *testing.T) { return func() (storedValues map[string][]byte, StorageIndices map[string]uint64) { ledger := NewTestLedger(nil, nil) - persistentSlabStorage := newSlabStorage(ledger) + persistentSlabStorage := NewPersistentSlabStorage(ledger, nil) orderedMap, err := atree.NewMap( persistentSlabStorage, @@ -8761,7 +8760,7 @@ func TestGetDomainStorageMapRegisterReadsForV2Account(t *testing.T) { return func() (storedValues map[string][]byte, StorageIndices map[string]uint64) { ledger := NewTestLedger(nil, nil) - persistentSlabStorage := newSlabStorage(ledger) + persistentSlabStorage := NewPersistentSlabStorage(ledger, nil) accountOrderedMap, err := atree.NewMap( persistentSlabStorage, @@ -9144,38 +9143,6 @@ func checkAccountStorageMapData( require.Contains(tb, rootSlabIDs, accountSlabID) } -func newSlabStorage(ledger atree.Ledger) *atree.PersistentSlabStorage { - decodeStorable := func( - decoder *cbor.StreamDecoder, - slabID atree.SlabID, - inlinedExtraData []atree.ExtraData, - ) ( - atree.Storable, - error, - ) { - return interpreter.DecodeStorable( - decoder, - slabID, - inlinedExtraData, - nil, - ) - } - - decodeTypeInfo := func(decoder *cbor.StreamDecoder) (atree.TypeInfo, error) { - return interpreter.DecodeTypeInfo(decoder, nil) - } - - ledgerStorage := atree.NewLedgerBaseStorage(ledger) - - return atree.NewPersistentSlabStorage( - ledgerStorage, - interpreter.CBOREncMode, - interpreter.CBORDecMode, - decodeStorable, - decodeTypeInfo, - ) -} - func concatRegisterAddressAndKey( address common.Address, key []byte, From b51053d112dea40d7e176793fa84534a404cf1ba Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Mon, 25 Nov 2024 17:09:52 -0600 Subject: [PATCH 50/52] Refactor Storage.isV1Account() --- runtime/storage.go | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/runtime/storage.go b/runtime/storage.go index 9f6e301327..d6eb222f0f 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -356,14 +356,7 @@ func (s *Storage) isV1Account(address common.Address) (isV1 bool) { // Check if a storage map register exists for any of the domains. // Check the most frequently used domains first, such as storage, public, private. for _, domain := range common.AllStorageDomains { - _, domainExists, err := readSlabIndexFromRegister( - s.Ledger, - address, - []byte(domain.Identifier()), - ) - if err != nil { - panic(err) - } + domainExists := s.hasDomainRegister(address, domain) if domainExists { return true } From 1b79b41cb3f1636b8c47dd36b5e9f2dee85b68c3 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Wed, 27 Nov 2024 12:13:03 -0600 Subject: [PATCH 51/52] Bump atree version to v0.8.1 --- go.mod | 4 ++-- go.sum | 8 ++++---- tools/compatibility-check/go.mod | 4 ++-- tools/compatibility-check/go.sum | 8 ++++---- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index ac296d2e54..3412ef864a 100644 --- a/go.mod +++ b/go.mod @@ -13,10 +13,10 @@ require ( github.com/kr/pretty v0.3.1 github.com/leanovate/gopter v0.2.9 github.com/logrusorgru/aurora/v4 v4.0.0 - github.com/onflow/atree v0.8.1-0.20241028213850-07c884e4abcf + github.com/onflow/atree v0.8.1 github.com/rivo/uniseg v0.4.4 github.com/schollz/progressbar/v3 v3.13.1 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 github.com/texttheater/golang-levenshtein/levenshtein v0.0.0-20200805054039-cae8b0eaed6c github.com/tidwall/pretty v1.2.1 github.com/turbolent/prettier v0.0.0-20220320183459-661cc755135d diff --git a/go.sum b/go.sum index 417d758195..4cbe6e714d 100644 --- a/go.sum +++ b/go.sum @@ -74,8 +74,8 @@ github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2Em github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/onflow/atree v0.8.1-0.20241028213850-07c884e4abcf h1:MDB/hdwr5GMsZHIIrAw3gBnmWBy5XjsZ4/6kftv9d5c= -github.com/onflow/atree v0.8.1-0.20241028213850-07c884e4abcf/go.mod h1:U8PGG42VrSJqjdfJ9NGQ2fenkyFRYlgtfHsZM61H4zY= +github.com/onflow/atree v0.8.1 h1:DAnPnL9/Ks3LaAnkQVokokTBG/znTW0DJfovDtJDhLI= +github.com/onflow/atree v0.8.1/go.mod h1:FT6udJF9Q7VQTu3wknDhFX+VV4D44ZGdqtTAE5iztck= github.com/onflow/crypto v0.25.0 h1:BeWbLsh3ZD13Ej+Uky6kg1PL1ZIVBDVX+2MVBNwqddg= github.com/onflow/crypto v0.25.0/go.mod h1:C8FbaX0x8y+FxWjbkHy0Q4EASCDR9bSPWZqlpCLYyVI= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -97,8 +97,8 @@ github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/texttheater/golang-levenshtein/levenshtein v0.0.0-20200805054039-cae8b0eaed6c h1:HelZ2kAFadG0La9d+4htN4HzQ68Bm2iM9qKMSMES6xg= github.com/texttheater/golang-levenshtein/levenshtein v0.0.0-20200805054039-cae8b0eaed6c/go.mod h1:JlzghshsemAMDGZLytTFY8C1JQxQPhnatWqNwUXjggo= github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= diff --git a/tools/compatibility-check/go.mod b/tools/compatibility-check/go.mod index 0a4813d95b..ca326637cd 100644 --- a/tools/compatibility-check/go.mod +++ b/tools/compatibility-check/go.mod @@ -7,7 +7,7 @@ require ( github.com/onflow/flow-core-contracts/lib/go/contracts v1.4.0 github.com/onflow/flow-go v0.38.0-preview.0.0.20241018215103-774056466e36 github.com/rs/zerolog v1.29.0 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 ) require ( @@ -43,7 +43,7 @@ require ( github.com/multiformats/go-multibase v0.2.0 // indirect github.com/multiformats/go-multihash v0.2.3 // indirect github.com/multiformats/go-varint v0.0.7 // indirect - github.com/onflow/atree v0.8.1-0.20241028213850-07c884e4abcf // indirect + github.com/onflow/atree v0.8.1 // indirect github.com/onflow/crypto v0.25.2 // indirect github.com/onflow/flow-core-contracts/lib/go/templates v1.3.3-0.20241017220455-79fdc6c8ba53 // indirect github.com/onflow/flow-ft/lib/go/contracts v1.0.1 // indirect diff --git a/tools/compatibility-check/go.sum b/tools/compatibility-check/go.sum index 4ab363961b..39d0ed9012 100644 --- a/tools/compatibility-check/go.sum +++ b/tools/compatibility-check/go.sum @@ -342,8 +342,8 @@ github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/n github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onflow/atree v0.8.1-0.20241028213850-07c884e4abcf h1:MDB/hdwr5GMsZHIIrAw3gBnmWBy5XjsZ4/6kftv9d5c= -github.com/onflow/atree v0.8.1-0.20241028213850-07c884e4abcf/go.mod h1:U8PGG42VrSJqjdfJ9NGQ2fenkyFRYlgtfHsZM61H4zY= +github.com/onflow/atree v0.8.1 h1:DAnPnL9/Ks3LaAnkQVokokTBG/znTW0DJfovDtJDhLI= +github.com/onflow/atree v0.8.1/go.mod h1:FT6udJF9Q7VQTu3wknDhFX+VV4D44ZGdqtTAE5iztck= github.com/onflow/crypto v0.25.2 h1:GjHunqVt+vPcdqhxxhAXiMIF3YiLX7gTuTR5O+VG2ns= github.com/onflow/crypto v0.25.2/go.mod h1:fY7eLqUdMKV8EGOw301unP8h7PvLVy8/6gVR++/g0BY= github.com/onflow/flow-core-contracts/lib/go/contracts v1.4.0 h1:R86HaOuk6vpuECZnriEUE7bw9inC2AtdSn8lL/iwQLQ= @@ -453,8 +453,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/texttheater/golang-levenshtein/levenshtein v0.0.0-20200805054039-cae8b0eaed6c h1:HelZ2kAFadG0La9d+4htN4HzQ68Bm2iM9qKMSMES6xg= From d0026257ee4aff0d5e881287c1e122c17e39d867 Mon Sep 17 00:00:00 2001 From: Faye Amacker <33205765+fxamacker@users.noreply.github.com> Date: Wed, 27 Nov 2024 15:53:08 -0600 Subject: [PATCH 52/52] Add Storage.AccountStorageFormat() --- runtime/storage.go | 53 +++++++-- runtime/storage_test.go | 243 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 285 insertions(+), 11 deletions(-) diff --git a/runtime/storage.go b/runtime/storage.go index 4a2c79136c..87f3cf094d 100644 --- a/runtime/storage.go +++ b/runtime/storage.go @@ -40,12 +40,12 @@ type StorageConfig struct { StorageFormatV2Enabled bool } -type storageFormat uint8 +type StorageFormat uint8 const ( - storageFormatUnknown storageFormat = iota - storageFormatV1 - storageFormatV2 + StorageFormatUnknown StorageFormat = iota + StorageFormatV1 + StorageFormatV2 ) type Storage struct { @@ -292,7 +292,7 @@ func (s *Storage) getDomainStorageMapForV2Account( } func (s *Storage) getDomainStorageMap( - format storageFormat, + format StorageFormat, inter *interpreter.Interpreter, address common.Address, domain common.StorageDomain, @@ -300,14 +300,14 @@ func (s *Storage) getDomainStorageMap( ) *interpreter.DomainStorageMap { switch format { - case storageFormatV1: + case StorageFormatV1: return s.getDomainStorageMapForV1Account( address, domain, createIfNotExists, ) - case storageFormatV2: + case StorageFormatV2: return s.getDomainStorageMapForV2Account( inter, address, @@ -320,15 +320,15 @@ func (s *Storage) getDomainStorageMap( } } -func (s *Storage) getCachedAccountFormat(address common.Address) (format storageFormat, known bool) { +func (s *Storage) getCachedAccountFormat(address common.Address) (format StorageFormat, known bool) { isV1, cached := s.cachedV1Accounts[address] if !cached { - return storageFormatUnknown, false + return StorageFormatUnknown, false } if isV1 { - return storageFormatV1, true + return StorageFormatV1, true } else { - return storageFormatV2, true + return StorageFormatV2, true } } @@ -708,6 +708,37 @@ func (s *Storage) CheckHealth() error { return nil } +// AccountStorageFormat returns either StorageFormatV1 or StorageFormatV2 for existing accounts, +// and StorageFormatUnknown for non-existing accounts. +func (s *Storage) AccountStorageFormat(address common.Address) (format StorageFormat) { + cachedFormat, known := s.getCachedAccountFormat(address) + if known { + return cachedFormat + } + + defer func() { + // Cache account fomat + switch format { + case StorageFormatV1: + s.cacheIsV1Account(address, true) + case StorageFormatV2: + s.cacheIsV1Account(address, false) + } + }() + + if s.Config.StorageFormatV2Enabled { + if s.isV2Account(address) { + return StorageFormatV2 + } + } + + if s.isV1Account(address) { + return StorageFormatV1 + } + + return StorageFormatUnknown +} + type UnreferencedRootSlabsError struct { UnreferencedRootSlabIDs []atree.SlabID } diff --git a/runtime/storage_test.go b/runtime/storage_test.go index 4705101493..c9efbfdc5f 100644 --- a/runtime/storage_test.go +++ b/runtime/storage_test.go @@ -8976,6 +8976,249 @@ func TestGetDomainStorageMapRegisterReadsForV2Account(t *testing.T) { } } +func TestAccountStorageFormatForNonExistingAccount(t *testing.T) { + + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + testCases := []struct { + name string + storageFormatV2Enabled bool + format StorageFormat + }{ + { + name: "non-existing account, storageFormatV2Enabled = false", + storageFormatV2Enabled: false, + format: StorageFormatUnknown, + }, + { + name: "non-existing account, storageFormatV2Enabled = true", + storageFormatV2Enabled: true, + format: StorageFormatUnknown, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ledger := NewTestLedger(nil, nil) + + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: tc.storageFormatV2Enabled, + }, + ) + + for range 2 { + format := storage.AccountStorageFormat(address) + require.Equal(t, tc.format, format) + } + }) + } +} + +func TestAccountStorageFormatForV1Account(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + createV1AccountWithDomain := func( + address common.Address, + domain common.StorageDomain, + ) (storedValues map[string][]byte, StorageIndices map[string]uint64) { + ledger := NewTestLedger(nil, nil) + + persistentSlabStorage := NewPersistentSlabStorage(ledger, nil) + + orderedMap, err := atree.NewMap( + persistentSlabStorage, + atree.Address(address), + atree.NewDefaultDigesterBuilder(), + interpreter.EmptyTypeInfo{}, + ) + require.NoError(t, err) + + slabIndex := orderedMap.SlabID().Index() + + for i := range 3 { + + key := interpreter.StringStorageMapKey(strconv.Itoa(i)) + + value := interpreter.NewUnmeteredIntValueFromInt64(int64(i)) + + existingStorable, err := orderedMap.Set( + key.AtreeValueCompare, + key.AtreeValueHashInput, + key.AtreeValue(), + value, + ) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // Commit domain storage map + err = persistentSlabStorage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + + // Create domain register + err = ledger.SetValue(address[:], []byte(domain.Identifier()), slabIndex[:]) + require.NoError(t, err) + + return ledger.StoredValues, ledger.StorageIndices + } + + testCases := []struct { + name string + storageFormatV2Enabled bool + format StorageFormat + }{ + { + name: "v1 account, storageFormatV2Enabled = false", + storageFormatV2Enabled: false, + format: StorageFormatV1, + }, + { + name: "v1 account, storageFormatV2Enabled = true", + storageFormatV2Enabled: true, + format: StorageFormatV1, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + storedValues, storedIndices := createV1AccountWithDomain( + address, + common.StorageDomainPathStorage, + ) + + ledger := NewTestLedgerWithData(nil, nil, storedValues, storedIndices) + + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: tc.storageFormatV2Enabled, + }, + ) + + for range 2 { + format := storage.AccountStorageFormat(address) + require.Equal(t, tc.format, format) + } + }) + } +} + +func TestAccountStorageFormatForV2Account(t *testing.T) { + t.Parallel() + + address := common.MustBytesToAddress([]byte{0x1}) + + createV2AccountWithDomain := func( + address common.Address, + domain common.StorageDomain, + ) (storedValues map[string][]byte, StorageIndices map[string]uint64) { + ledger := NewTestLedger(nil, nil) + + persistentSlabStorage := NewPersistentSlabStorage(ledger, nil) + + accountOrderedMap, err := atree.NewMap( + persistentSlabStorage, + atree.Address(address), + atree.NewDefaultDigesterBuilder(), + interpreter.EmptyTypeInfo{}, + ) + require.NoError(t, err) + + slabIndex := accountOrderedMap.SlabID().Index() + + domainOrderedMap, err := atree.NewMap( + persistentSlabStorage, + atree.Address(address), + atree.NewDefaultDigesterBuilder(), + interpreter.EmptyTypeInfo{}, + ) + require.NoError(t, err) + + domainKey := interpreter.Uint64StorageMapKey(domain) + + existingDomain, err := accountOrderedMap.Set( + domainKey.AtreeValueCompare, + domainKey.AtreeValueHashInput, + domainKey.AtreeValue(), + domainOrderedMap, + ) + require.NoError(t, err) + require.Nil(t, existingDomain) + + for i := range 3 { + + key := interpreter.StringStorageMapKey(strconv.Itoa(i)) + + value := interpreter.NewUnmeteredIntValueFromInt64(int64(i)) + + existingStorable, err := domainOrderedMap.Set( + key.AtreeValueCompare, + key.AtreeValueHashInput, + key.AtreeValue(), + value, + ) + require.NoError(t, err) + require.Nil(t, existingStorable) + } + + // Commit domain storage map + err = persistentSlabStorage.FastCommit(runtime.NumCPU()) + require.NoError(t, err) + + // Create account register + err = ledger.SetValue(address[:], []byte(AccountStorageKey), slabIndex[:]) + require.NoError(t, err) + + return ledger.StoredValues, ledger.StorageIndices + } + + testCases := []struct { + name string + storageFormatV2Enabled bool + format StorageFormat + }{ + { + name: "v2 account, storageFormatV2Enabled = true", + storageFormatV2Enabled: true, + format: StorageFormatV2, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + storedValues, storedIndices := createV2AccountWithDomain( + address, + common.StorageDomainPathStorage, + ) + + ledger := NewTestLedgerWithData(nil, nil, storedValues, storedIndices) + + storage := NewStorage( + ledger, + nil, + StorageConfig{ + StorageFormatV2Enabled: tc.storageFormatV2Enabled, + }, + ) + + for range 2 { + format := storage.AccountStorageFormat(address) + require.Equal(t, tc.format, format) + } + }) + } +} + // createAndWriteAccountStorageMap creates account storage map with given domains and writes random values to domain storage map. func createAndWriteAccountStorageMap( t testing.TB,