diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 527ee9fd..167f2ea7 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -14,11 +14,11 @@ jobs: test: strategy: matrix: - go-version: [1.18.x, 1.19.x] + go-version: [1.19.x, 1.20.x] os: [ubuntu-latest] make-cmd: [test, test-replayhistory] exclude: - - go-version: 1.18.x + - go-version: 1.19x make-cmd: test-replayhistory runs-on: ${{ matrix.os }} steps: diff --git a/.golangci.yml b/.golangci.yml index d7331330..13030fb4 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -59,5 +59,5 @@ run: timeout: 30m skip-dirs: - - "pkg/sqlstore/impl/system/internal/db" + - "pkg/sqlstore/impl/system/db" - "internal/router/controllers/apiv1" diff --git a/Makefile b/Makefile index 698f7333..bb14e68a 100644 --- a/Makefile +++ b/Makefile @@ -81,7 +81,7 @@ lint: .PHONY: lint # OpenAPI -SPEC_URL=https://raw.githubusercontent.com/tablelandnetwork/docs/main/specs/validator/tableland-openapi-spec.yaml +SPEC_URL=https://raw.githubusercontent.com/tablelandnetwork/docs/bcalza/merkle/specs/validator/tableland-openapi-spec.yaml APIV1=${PWD}/internal/router/controllers/apiv1 gen-api-v1: mkdir -p ${APIV1} diff --git a/cmd/api/config.go b/cmd/api/config.go index 1ace3365..bd1b4e75 100644 --- a/cmd/api/config.go +++ b/cmd/api/config.go @@ -122,6 +122,16 @@ type ChainConfig struct { MinBlockDepth int `default:"5"` } HashCalculationStep int64 `default:"1000"` + MerkleTree struct { + Enabled bool `default:"false"` + // We aim to have a new root calculated every 30 min. + // That means that the step should be configured to LeavesSnapshottingStep = 30*60/chain_avg_block_time_in_seconds. + // e.g. In Ethereum, chain_avg_block_time_in_seconds = 12s, so LeavesSnapshottingStep for Ethereum is 30*60/12 = 150. + LeavesSnapshottingStep int64 `default:"1000"` + // We aim to have a new root calculated every 30 min. Setting the default to half of that. + PublishingInterval string `default:"15m"` + RootRegistryContract string `default:""` + } } func setupConfig() (*config, string) { diff --git a/cmd/api/main.go b/cmd/api/main.go index a5e1ddb1..b6f954f9 100644 --- a/cmd/api/main.go +++ b/cmd/api/main.go @@ -31,6 +31,7 @@ import ( epimpl "github.com/textileio/go-tableland/pkg/eventprocessor/impl" executor "github.com/textileio/go-tableland/pkg/eventprocessor/impl/executor/impl" "github.com/textileio/go-tableland/pkg/logging" + "github.com/textileio/go-tableland/pkg/metrics" nonceimpl "github.com/textileio/go-tableland/pkg/nonce/impl" "github.com/textileio/go-tableland/pkg/parsing" @@ -47,6 +48,9 @@ import ( "github.com/textileio/go-tableland/pkg/telemetry/storage" "github.com/textileio/go-tableland/pkg/wallet" "go.opentelemetry.io/otel/attribute" + + merklepublisher "github.com/textileio/go-tableland/pkg/merkletree/publisher" + merklepublisherimpl "github.com/textileio/go-tableland/pkg/merkletree/publisher/impl" ) type moduleCloser func(ctx context.Context) error @@ -83,10 +87,18 @@ func main() { log.Fatal().Err(err).Msg("creating parser") } + // Wiring + treeDatabaseURL := path.Join(dirPath, "trees.db") + treeStore, err := merklepublisherimpl.NewMerkleTreeStore(treeDatabaseURL) + if err != nil { + log.Fatal().Err(err).Msg("creating new merkle tree store") + } + // Chain stacks. chainStacks, closeChainStacks, err := createChainStacks( databaseURL, parser, + treeStore, config.Chains, config.TableConstraints, config.Analytics.FetchExtraBlockInfo) @@ -105,7 +117,7 @@ func main() { } // HTTP API server. - closeHTTPServer, err := createAPIServer(config.HTTP, config.Gateway, parser, userStore, chainStacks) + closeHTTPServer, err := createAPIServer(config.HTTP, config.Gateway, parser, userStore, treeStore, chainStacks) if err != nil { log.Fatal().Err(err).Msg("creating HTTP server") } @@ -164,6 +176,7 @@ func createChainIDStack( dbURI string, executorsDB *sql.DB, parser parsing.SQLValidator, + treeStore *merklepublisherimpl.MerkleTreeStore, tableConstraints TableConstraints, fetchExtraBlockInfo bool, ) (chains.ChainStack, error) { @@ -268,6 +281,30 @@ func createChainIDStack( if err := ep.Start(); err != nil { return chains.ChainStack{}, fmt.Errorf("starting event processor: %s", err) } + + // starts Merkle Tree Publisher + var merkleRootPublisher *merklepublisher.MerkleRootPublisher + if config.MerkleTree.Enabled { + scAddress := common.HexToAddress(config.MerkleTree.RootRegistryContract) + rootRegistry, err := merklepublisherimpl.NewMerkleRootRegistryEthereum(conn, scAddress, wallet, tracker) + if err != nil { + return chains.ChainStack{}, fmt.Errorf("creating merkle root registry: %s", err) + } + + merkleTreePublishingInterval, err := time.ParseDuration(config.MerkleTree.PublishingInterval) + if err != nil { + return chains.ChainStack{}, fmt.Errorf("parsing merkle tree publishing interval: %s", err) + } + + merkleRootPublisher = merklepublisher.NewMerkleRootPublisher( + merklepublisherimpl.NewLeavesStore(systemStore), + treeStore, + rootRegistry, + merkleTreePublishingInterval, + ) + merkleRootPublisher.Start() + } + return chains.ChainStack{ Store: systemStore, Registry: registry, @@ -284,6 +321,11 @@ func createChainIDStack( if err := systemStore.Close(); err != nil { return fmt.Errorf("closing system store for chain_id %d: %s", config.ChainID, err) } + + if config.MerkleTree.Enabled { + merkleRootPublisher.Close() + } + return nil }, }, nil @@ -414,6 +456,7 @@ func createParser(queryConstraints QueryConstraints) (parsing.SQLValidator, erro func createChainStacks( databaseURL string, parser parsing.SQLValidator, + treeStore *merklepublisherimpl.MerkleTreeStore, chainsConfig []ChainConfig, tableConstraintsConfig TableConstraints, fetchExtraBlockInfo bool, @@ -440,6 +483,7 @@ func createChainStacks( databaseURL, executorsDB, parser, + treeStore, tableConstraintsConfig, fetchExtraBlockInfo) if err != nil { @@ -480,6 +524,7 @@ func createAPIServer( gatewayConfig GatewayConfig, parser parsing.SQLValidator, userStore *user.UserStore, + treeStore *merklepublisherimpl.MerkleTreeStore, chainStacks map[tableland.ChainID]chains.ChainStack, ) (moduleCloser, error) { instrUserStore, err := sqlstoreimpl.NewInstrumentedUserStore(userStore) @@ -519,6 +564,7 @@ func createAPIServer( router, err := router.ConfiguredRouter( mesaService, systemService, + treeStore, httpConfig.MaxRequestPerInterval, rateLimInterval, supportedChainIDs, diff --git a/docker/deployed/staging/api/config.json b/docker/deployed/staging/api/config.json index 404ecff3..20807ec5 100644 --- a/docker/deployed/staging/api/config.json +++ b/docker/deployed/staging/api/config.json @@ -76,7 +76,13 @@ "StuckInterval": "10m", "MinBlockDepth": 0 }, - "HashCalculationStep": 100 + "HashCalculationStep": 100, + "MerkleTree": { + "Enabled" : true, + "LeavesSnapshottingStep" : 5, + "PublishingInterval" : "5m", + "RootRegistryContract" : "0x8065b18fDF17E6180614308bCFb798E877A4c291" + } } ] } \ No newline at end of file diff --git a/go.mod b/go.mod index 62a32993..60e93a79 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/textileio/go-tableland -go 1.18 +go 1.19 require ( cloud.google.com/go/bigquery v1.45.0 @@ -24,12 +24,14 @@ require ( github.com/stretchr/testify v1.8.2 github.com/tablelandnetwork/sqlparser v0.0.0-20221230162331-b318f234cefd github.com/textileio/cli v1.0.2 + go.etcd.io/bbolt v1.3.6 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0 go.opentelemetry.io/otel v1.14.0 go.opentelemetry.io/otel/exporters/prometheus v0.37.0 go.opentelemetry.io/otel/metric v0.37.0 go.opentelemetry.io/otel/sdk/metric v0.37.0 go.uber.org/atomic v1.10.0 + golang.org/x/crypto v0.6.0 golang.org/x/sync v0.1.0 ) @@ -103,7 +105,6 @@ require ( go.opentelemetry.io/otel/trace v1.14.0 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.19.0 // indirect - golang.org/x/crypto v0.6.0 // indirect golang.org/x/net v0.7.0 // indirect golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 // indirect golang.org/x/sys v0.5.0 // indirect diff --git a/go.sum b/go.sum index b6c26aea..a2b207c9 100644 --- a/go.sum +++ b/go.sum @@ -1244,6 +1244,7 @@ gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2 go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= diff --git a/internal/router/controllers/apiv1/api_proof.go b/internal/router/controllers/apiv1/api_proof.go new file mode 100644 index 00000000..dbefe49a --- /dev/null +++ b/internal/router/controllers/apiv1/api_proof.go @@ -0,0 +1,19 @@ +/* + * Tableland Validator - OpenAPI 3.0 + * + * In Tableland, Validators are the execution unit/actors of the protocol. They have the following responsibilities: - Listen to on-chain events to materialize Tableland-compliant SQL queries in a database engine (currently, SQLite by default). - Serve read-queries (e.g: SELECT * FROM foo_69_1) to the external world. - Serve state queries (e.g. list tables, get receipts, etc) to the external world. In the 1.0.0 release of the Tableland Validator API, we've switched to a design first approach! You can now help us improve the API whether it's by making changes to the definition itself or to the code. That way, with time, we can improve the API in general, and expose some of the new features in OAS3. + * + * API version: 1.0.0 + * Contact: carson@textile.io + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ +package apiv1 + +import ( + "net/http" +) + +func QueryProof(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=UTF-8") + w.WriteHeader(http.StatusOK) +} diff --git a/internal/router/controllers/apiv1/model_proof.go b/internal/router/controllers/apiv1/model_proof.go new file mode 100644 index 00000000..36596471 --- /dev/null +++ b/internal/router/controllers/apiv1/model_proof.go @@ -0,0 +1,15 @@ +/* + * Tableland Validator - OpenAPI 3.0 + * + * In Tableland, Validators are the execution unit/actors of the protocol. They have the following responsibilities: - Listen to on-chain events to materialize Tableland-compliant SQL queries in a database engine (currently, SQLite by default). - Serve read-queries (e.g: SELECT * FROM foo_69_1) to the external world. - Serve state queries (e.g. list tables, get receipts, etc) to the external world. In the 1.0.0 release of the Tableland Validator API, we've switched to a design first approach! You can now help us improve the API whether it's by making changes to the definition itself or to the code. That way, with time, we can improve the API in general, and expose some of the new features in OAS3. + * + * API version: 1.0.0 + * Contact: carson@textile.io + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ +package apiv1 + +type Proof struct { + + Proof []string `json:"proof,omitempty"` +} diff --git a/internal/router/controllers/apiv1/routers.go b/internal/router/controllers/apiv1/routers.go index f61e6894..a54761e7 100644 --- a/internal/router/controllers/apiv1/routers.go +++ b/internal/router/controllers/apiv1/routers.go @@ -62,6 +62,13 @@ var routes = Routes{ Health, }, + Route{ + "QueryProof", + strings.ToUpper("Get"), + "/api/v1/proof/{chainId}/{tableId}/{row}", + QueryProof, + }, + Route{ "QueryByStatement", strings.ToUpper("Get"), diff --git a/internal/router/controllers/controller.go b/internal/router/controllers/controller.go index 2999831b..05a9b9ca 100644 --- a/internal/router/controllers/controller.go +++ b/internal/router/controllers/controller.go @@ -2,8 +2,10 @@ package controllers import ( "context" + "encoding/hex" "encoding/json" "fmt" + "math/big" "net/http" "strconv" "strings" @@ -20,6 +22,7 @@ import ( "github.com/textileio/go-tableland/internal/system" "github.com/textileio/go-tableland/internal/tableland" "github.com/textileio/go-tableland/pkg/errors" + "github.com/textileio/go-tableland/pkg/merkletree" "github.com/textileio/go-tableland/pkg/tables" "github.com/textileio/go-tableland/pkg/telemetry" ) @@ -29,15 +32,22 @@ type SQLRunner interface { RunReadQuery(ctx context.Context, stmt string) (*tableland.TableData, error) } +// MerkleTreeGetter defines the API for fetching a merkle tree. +type MerkleTreeGetter interface { + Get(chainID int64, tableID *big.Int, blockNumber int64) (*merkletree.MerkleTree, error) +} + // Controller defines the HTTP handlers for interacting with user tables. type Controller struct { + treeStore MerkleTreeGetter runner SQLRunner systemService system.SystemService } // NewController creates a new Controller. -func NewController(runner SQLRunner, svc system.SystemService) *Controller { +func NewController(runner SQLRunner, svc system.SystemService, treeStore MerkleTreeGetter) *Controller { return &Controller{ + treeStore: treeStore, runner: runner, systemService: svc, } @@ -253,8 +263,15 @@ func (c *Controller) GetReceiptByTransactionHash(rw http.ResponseWriter, r *http receipt, exists, err := c.systemService.GetReceiptByTransactionHash(ctx, txnHash) if err != nil { rw.Header().Set("Content-Type", "application/json") - rw.WriteHeader(http.StatusBadRequest) log.Ctx(ctx).Error().Err(err).Msg("get receipt by transaction hash") + + if strings.Contains(err.Error(), "database table is locked") || + strings.Contains(err.Error(), "database schema is locked") { + rw.WriteHeader(http.StatusLocked) + } else { + rw.WriteHeader(http.StatusBadRequest) + } + _ = json.NewEncoder(rw).Encode(errors.ServiceError{Message: "Get receipt by transaction hash failed"}) return } @@ -551,6 +568,42 @@ func (c *Controller) GetTableQuery(rw http.ResponseWriter, r *http.Request) { _, _ = rw.Write(formatted) } +// GetProof handles the GET /proof/{chainId}/{tableId}/{row} call. +func (c *Controller) GetProof(rw http.ResponseWriter, r *http.Request) { + rw.Header().Set("Content-Type", "application/json") + + vars := mux.Vars(r) + chainID, tableID, encodedRow := vars["chainId"], vars["tableId"], vars["row"] + + row, err := hex.DecodeString(encodedRow) + if err != nil { + return + } + + chainIDInt, err := strconv.ParseInt(chainID, 10, 0) + if err != nil { + return + } + + tableIDInt, err := strconv.ParseInt(tableID, 10, 0) + if err != nil { + return + } + + tree, err := c.treeStore.Get(chainIDInt, big.NewInt(tableIDInt), 0) + if err != nil { + return + } + + found, proof := tree.GetProof(row) + if !found { + rw.WriteHeader(http.StatusNotFound) + return + } + + _ = json.NewEncoder(rw).Encode(apiv1.Proof{Proof: proof.Hex()}) +} + func (c *Controller) runReadRequest( ctx context.Context, stm string, diff --git a/internal/router/controllers/controller_test.go b/internal/router/controllers/controller_test.go index 7fb8ecd0..3e1734dd 100644 --- a/internal/router/controllers/controller_test.go +++ b/internal/router/controllers/controller_test.go @@ -22,7 +22,7 @@ func TestGetTableRow(t *testing.T) { req, err := http.NewRequest("GET", "/chain/69/tables/100/id/1", nil) require.NoError(t, err) - ctrl := NewController(newTableRowRunnerMock(t), nil) + ctrl := NewController(newTableRowRunnerMock(t), nil, nil) router := mux.NewRouter() router.HandleFunc("/chain/{chainID}/tables/{id}/{key}/{value}", ctrl.GetTableRow) @@ -41,7 +41,7 @@ func TestERC721Metadata(t *testing.T) { req, err := http.NewRequest("GET", "/chain/69/tables/100/id/1?format=erc721&name=id&image=image&description=description&external_url=external_url&attributes[0][column]=base&attributes[0][trait_type]=Base&attributes[1][column]=eyes&attributes[1][trait_type]=Eyes&attributes[2][column]=mouth&attributes[2][trait_type]=Mouth&attributes[3][column]=level&attributes[3][trait_type]=Level&attributes[4][column]=stamina&attributes[4][trait_type]=Stamina&attributes[5][column]=personality&attributes[5][trait_type]=Personality&attributes[6][column]=aqua_power&attributes[6][display_type]=boost_number&attributes[6][trait_type]=Aqua%20Power&attributes[7][column]=stamina_increase&attributes[7][display_type]=boost_percentage&attributes[7][trait_type]=Stamina%20Increase&attributes[8][column]=generation&attributes[8][display_type]=number&attributes[8][trait_type]=Generation", nil) // nolint require.NoError(t, err) - ctrl := NewController(newTableRowRunnerMock(t), nil) + ctrl := NewController(newTableRowRunnerMock(t), nil, nil) router := mux.NewRouter() router.HandleFunc("/chain/{chainID}/tables/{id}/{key}/{value}", ctrl.GetTableRow) @@ -63,7 +63,7 @@ func TestBadQuery(t *testing.T) { req, err := http.NewRequest("GET", "/chain/69/tables/100/invalid_column/0", nil) require.NoError(t, err) - ctrl := NewController(r, nil) + ctrl := NewController(r, nil, nil) router := mux.NewRouter() router.HandleFunc("/chain/{chainID}/tables/{id}/{key}/{value}", ctrl.GetTableRow) @@ -106,7 +106,7 @@ func TestRowNotFound(t *testing.T) { req, err := http.NewRequest("GET", "/chain/69/tables/100/id/1", nil) require.NoError(t, err) - ctrl := NewController(r, nil) + ctrl := NewController(r, nil, nil) router := mux.NewRouter() router.HandleFunc("/chain/{chainID}/tables/{id}/{key}/{value}", ctrl.GetTableRow) @@ -150,7 +150,7 @@ func TestQuery(t *testing.T) { nil, ) - ctrl := NewController(r, nil) + ctrl := NewController(r, nil, nil) router := mux.NewRouter() router.HandleFunc("/query", ctrl.GetTableQuery) @@ -210,7 +210,7 @@ func TestLegacyQuery(t *testing.T) { nil, ) - ctrl := NewController(r, nil) + ctrl := NewController(r, nil, nil) router := mux.NewRouter() router.HandleFunc("/query", ctrl.GetTableQuery) @@ -253,7 +253,7 @@ func TestQueryExtracted(t *testing.T) { nil, ) - ctrl := NewController(r, nil) + ctrl := NewController(r, nil, nil) router := mux.NewRouter() router.HandleFunc("/query", ctrl.GetTableQuery) @@ -286,7 +286,7 @@ func TestGetTablesByMocked(t *testing.T) { t.Parallel() systemService := systemimpl.NewSystemMockService() - ctrl := NewController(nil, systemService) + ctrl := NewController(nil, systemService, nil) t.Run("get table metadata", func(t *testing.T) { t.Parallel() @@ -406,7 +406,7 @@ func TestGetTableWithInvalidID(t *testing.T) { require.NoError(t, err) systemService := systemimpl.NewSystemMockService() - systemController := NewController(nil, systemService) + systemController := NewController(nil, systemService, nil) router := mux.NewRouter() router.HandleFunc("/tables/{id}", systemController.GetTable) @@ -427,7 +427,7 @@ func TestTableNotFoundMock(t *testing.T) { require.NoError(t, err) systemService := systemimpl.NewSystemMockErrService() - systemController := NewController(nil, systemService) + systemController := NewController(nil, systemService, nil) router := mux.NewRouter() router.HandleFunc("/tables/{tableId}", systemController.GetTable) diff --git a/internal/router/controllers/legacy/rpcservice.go b/internal/router/controllers/legacy/rpcservice.go index f3a44e8d..3afeb4fd 100644 --- a/internal/router/controllers/legacy/rpcservice.go +++ b/internal/router/controllers/legacy/rpcservice.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "strings" "time" "github.com/ethereum/go-ethereum/common" @@ -228,6 +229,11 @@ func (rs *RPCService) GetReceipt( } ok, receipt, err := rs.tbl.GetReceipt(ctx, chainID, req.TxnHash) if err != nil { + if strings.Contains(err.Error(), "database table is locked") || + strings.Contains(err.Error(), "database schema is locked") { + ret := GetReceiptResponse{Ok: ok} + return ret, nil + } return GetReceiptResponse{}, fmt.Errorf("calling GetReceipt: %v", err) } ret := GetReceiptResponse{Ok: ok} diff --git a/internal/router/router.go b/internal/router/router.go index d139c77e..c8560b3b 100644 --- a/internal/router/router.go +++ b/internal/router/router.go @@ -13,12 +13,14 @@ import ( "github.com/textileio/go-tableland/internal/router/middlewares" "github.com/textileio/go-tableland/internal/system" "github.com/textileio/go-tableland/internal/tableland" + "github.com/textileio/go-tableland/pkg/merkletree/publisher/impl" ) // ConfiguredRouter returns a fully configured Router that can be used as an http handler. func ConfiguredRouter( tableland tableland.Tableland, systemService system.SystemService, + treeStore *impl.MerkleTreeStore, maxRPI uint64, rateLimInterval time.Duration, supportedChainIDs []tableland.ChainID, @@ -45,7 +47,7 @@ func ConfiguredRouter( return nil, fmt.Errorf("creating rate limit controller middleware: %s", err) } - ctrl := controllers.NewController(tableland, systemService) + ctrl := controllers.NewController(tableland, systemService, treeStore) // TODO(json-rpc): remove this when dropping support. // APIs Legacy (REST + JSON-RPC) @@ -99,6 +101,10 @@ func configureAPIV1Routes( userCtrl.GetTableQuery, []mux.MiddlewareFunc{middlewares.WithLogging, rateLim}, }, + "QueryProof": { + userCtrl.GetProof, + []mux.MiddlewareFunc{middlewares.WithLogging, rateLim}, + }, "ReceiptByTransactionHash": { userCtrl.GetReceiptByTransactionHash, []mux.MiddlewareFunc{middlewares.WithLogging, middlewares.RESTChainID(supportedChainIDs), rateLim}, diff --git a/internal/tableland/impl/mesa_test.go b/internal/tableland/impl/mesa_test.go index 68c8174a..83c154bc 100644 --- a/internal/tableland/impl/mesa_test.go +++ b/internal/tableland/impl/mesa_test.go @@ -37,7 +37,6 @@ import ( "github.com/textileio/go-tableland/pkg/sqlstore/impl/user" "github.com/textileio/go-tableland/pkg/tables" "github.com/textileio/go-tableland/pkg/tables/impl/ethereum" - "github.com/textileio/go-tableland/pkg/tables/impl/testutil" "github.com/textileio/go-tableland/pkg/wallet" "github.com/textileio/go-tableland/tests" ) @@ -940,14 +939,17 @@ func (b *tablelandSetupBuilder) build(t *testing.T) *tablelandSetup { ex, err := executor.NewExecutor(1337, db, parser, 0, &aclHalfMock{store}) require.NoError(t, err) - backend, addr, sc, auth, sk := testutil.Setup(t) + // Spin up the EVM chain with the contract. + simulatedChain := tests.NewSimulatedChain(t) + contract, err := simulatedChain.DeployContract(t, ethereum.Deploy) + require.NoError(t, err) // Spin up dependencies needed for the EventProcessor. // i.e: Executor, Parser, and EventFeed (connected to the EVM chain) ef, err := efimpl.New(store, 1337, - backend, - addr, + simulatedChain.Backend, + contract.ContractAddr, eventfeed.WithNewHeadPollFreq(time.Millisecond), eventfeed.WithMinBlockDepth(0)) require.NoError(t, err) @@ -970,15 +972,15 @@ func (b *tablelandSetupBuilder) build(t *testing.T) *tablelandSetup { chainID: 1337, // ethereum client - ethClient: backend, + ethClient: simulatedChain.Backend, // contract bindings - contract: sc, - contractAddr: addr, + contract: contract.Contract.(*ethereum.Contract), + contractAddr: contract.ContractAddr, // contract deployer - deployerPrivateKey: sk, - deployerTxOpts: auth, + deployerPrivateKey: simulatedChain.DeployerPrivateKey, + deployerTxOpts: simulatedChain.DeployerTransactOpts, // common dependencies among mesa clients parser: parser, diff --git a/pkg/client/v1/client_test.go b/pkg/client/v1/client_test.go index df93cf4c..d1edf0b6 100644 --- a/pkg/client/v1/client_test.go +++ b/pkg/client/v1/client_test.go @@ -2,10 +2,13 @@ package v1 import ( "context" + "encoding/hex" "fmt" + "hash/fnv" "testing" "time" + "github.com/ethereum/go-ethereum/crypto" "github.com/stretchr/testify/require" "github.com/textileio/go-tableland/internal/router/controllers/apiv1" "github.com/textileio/go-tableland/pkg/client" @@ -74,20 +77,65 @@ func TestGetReceipt(t *testing.T) { t.Run("status 400", func(t *testing.T) { calls := setup(t) - _ = requireCreate(t, calls) - _, _, err := calls.client.Receipt(context.Background(), "0xINVALIDHASH") - require.Error(t, err) + requireCreate(t, calls) + + require.Eventually(t, func() bool { + _, _, err := calls.client.Receipt(context.Background(), "0xINVALIDHASH") + return err != nil + }, 10*time.Second, time.Second) }) t.Run("status 404", func(t *testing.T) { calls := setup(t) - _ = requireCreate(t, calls) - _, exists, err := calls.client.Receipt(context.Background(), "0x5c6f90e52284726a7276d6a20a3df94a4532a8fa4c921233a301e95673ad0255") //nolint - require.NoError(t, err) - require.False(t, exists) + requireCreate(t, calls) + + require.Eventually(t, func() bool { + _, exists, err := calls.client.Receipt(context.Background(), "0x5c6f90e52284726a7276d6a20a3df94a4532a8fa4c921233a301e95673ad0255") //nolint + require.NoError(t, err) + return exists == false + }, 10*time.Second, time.Second) }) } +func TestProof(t *testing.T) { + calls := setup(t) + + id, tableName := calls.create("(foo text, bar int, baz blob)", WithPrefix("foo"), WithReceiptTimeout(time.Second*10)) + require.Equal(t, "foo_1337_1", tableName) + + hash := calls.write( + fmt.Sprintf("insert into %s (foo, bar, baz) values ('qux', 1, X'53514C697465'), (null, 2, null)", tableName), + ) + require.NotEmpty(t, hash) + requireReceipt(t, calls, hash, WaitFor(time.Second*10)) + + h := fnv.New128a() + h.Write([]byte("bar")) + h.Write([]byte("1")) + h.Write([]byte("foo")) + h.Write([]byte("qux")) + h.Write([]byte("baz")) + byts, _ := hex.DecodeString("53514C697465") + h.Write(byts) + leaf1 := crypto.Keccak256(h.Sum(nil)) + + h = fnv.New128a() + h.Write([]byte("foo")) + h.Write([]byte{}) + h.Write([]byte("baz")) + h.Write([]byte{}) + h.Write([]byte("bar")) + h.Write([]byte("2")) + leaf2 := crypto.Keccak256(h.Sum(nil)) + + require.Eventually(t, func() bool { + proof1, found1, _ := calls.client.Proof(context.Background(), id, leaf1) + proof2, found2, _ := calls.client.Proof(context.Background(), id, leaf2) + return found1 && proof1[0] == hex.EncodeToString(leaf2) && + found2 && proof2[0] == hex.EncodeToString(leaf1) + }, time.Second*10, time.Second) +} + func TestGetTableByID(t *testing.T) { t.Run("status 200", func(t *testing.T) { calls := setup(t) diff --git a/pkg/client/v1/proof.go b/pkg/client/v1/proof.go new file mode 100644 index 00000000..8cf4f3ef --- /dev/null +++ b/pkg/client/v1/proof.go @@ -0,0 +1,49 @@ +package v1 + +import ( + "context" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "net/http" + + "github.com/textileio/go-tableland/internal/router/controllers/apiv1" +) + +// Proof gets a proof for a given row. +func (c *Client) Proof( + ctx context.Context, + tableID TableID, + row []byte, +) ([]string, bool, error) { + return c.getProof(ctx, tableID, row) +} + +func (c *Client) getProof(ctx context.Context, tableID TableID, row []byte) ([]string, bool, error) { + url := fmt.Sprintf( + "%s/api/v1/proof/%d/%d/%s", c.baseURL, c.chain.ID, tableID.ToBigInt().Int64(), hex.EncodeToString(row), + ) + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + return nil, false, fmt.Errorf("creating request: %s", err) + } + response, err := c.tblHTTP.Do(req) + if err != nil { + return nil, false, fmt.Errorf("calling get receipt by transaction hash: %s", err) + } + defer func() { _ = response.Body.Close() }() + if response.StatusCode == http.StatusNotFound { + return nil, false, nil + } + if response.StatusCode != http.StatusOK { + msg, _ := io.ReadAll(response.Body) + return nil, false, fmt.Errorf("failed call (status: %d, body: %s)", response.StatusCode, msg) + } + + var proof apiv1.Proof + if err := json.NewDecoder(response.Body).Decode(&proof); err != nil { + return nil, false, fmt.Errorf("unmarshaling result: %s", err) + } + return proof.Proof, true, nil +} diff --git a/pkg/client/v1/receipt.go b/pkg/client/v1/receipt.go index 541d2ce8..4dba2cab 100644 --- a/pkg/client/v1/receipt.go +++ b/pkg/client/v1/receipt.go @@ -55,6 +55,11 @@ func (c *Client) getReceipt(ctx context.Context, txnHash string) (*apiv1.Transac if response.StatusCode == http.StatusNotFound { return nil, false, nil } + + if response.StatusCode == http.StatusLocked { + return nil, false, nil + } + if response.StatusCode != http.StatusOK { msg, _ := io.ReadAll(response.Body) return nil, false, fmt.Errorf("failed call (status: %d, body: %s)", response.StatusCode, msg) diff --git a/pkg/eventprocessor/eventfeed/impl/eventfeed_test.go b/pkg/eventprocessor/eventfeed/impl/eventfeed_test.go index 74cdb5fb..95241b8c 100644 --- a/pkg/eventprocessor/eventfeed/impl/eventfeed_test.go +++ b/pkg/eventprocessor/eventfeed/impl/eventfeed_test.go @@ -16,7 +16,6 @@ import ( "github.com/textileio/go-tableland/pkg/eventprocessor/eventfeed" "github.com/textileio/go-tableland/pkg/sqlstore/impl/system" "github.com/textileio/go-tableland/pkg/tables/impl/ethereum" - "github.com/textileio/go-tableland/pkg/tables/impl/testutil" "github.com/textileio/go-tableland/tests" ) @@ -29,31 +28,37 @@ func TestRunSQLEvents(t *testing.T) { systemStore, err := system.New(dbURI, tableland.ChainID(1337)) require.NoError(t, err) - backend, addr, sc, authOpts, _ := testutil.Setup(t) + // Spin up the EVM chain with the contract. + simulatedChain := tests.NewSimulatedChain(t) + contract, err := simulatedChain.DeployContract(t, ethereum.Deploy) + require.NoError(t, err) + ef, err := New( systemStore, 1337, - backend, - addr, + simulatedChain.Backend, + contract.ContractAddr, eventfeed.WithNewHeadPollFreq(time.Millisecond), eventfeed.WithMinBlockDepth(0)) require.NoError(t, err) // Create the table - ctrl := authOpts.From - _, err = sc.CreateTable( - authOpts, + ctrl := simulatedChain.DeployerTransactOpts.From + _, err = contract.Contract.(*ethereum.Contract).CreateTable( + simulatedChain.DeployerTransactOpts, ctrl, "CREATE TABLE foo (bar int)") require.NoError(t, err) // Make one call before start listening. - _, err = sc.RunSQL(authOpts, ctrl, big.NewInt(1), "stmt-1") + _, err = contract.Contract.(*ethereum.Contract).RunSQL( + simulatedChain.DeployerTransactOpts, ctrl, big.NewInt(1), "stmt-1", + ) require.NoError(t, err) - backend.Commit() + simulatedChain.Backend.Commit() // Start listening to Logs for the contract from the next block. - currBlockNumber := backend.Blockchain().CurrentHeader().Number.Int64() + currBlockNumber := simulatedChain.Backend.Blockchain().CurrentHeader().Number.Int64() ch := make(chan eventfeed.BlockEvents) go func() { err := ef.Start(context.Background(), currBlockNumber+1, ch, []eventfeed.EventType{eventfeed.RunSQL}) @@ -71,9 +76,11 @@ func TestRunSQLEvents(t *testing.T) { } // Make a second call, that should be detected as a new event next. - _, err = sc.RunSQL(authOpts, ctrl, big.NewInt(1), "stmt-2") + _, err = contract.Contract.(*ethereum.Contract).RunSQL( + simulatedChain.DeployerTransactOpts, ctrl, big.NewInt(1), "stmt-2", + ) require.NoError(t, err) - backend.Commit() + simulatedChain.Backend.Commit() select { case bes := <-ch: require.Len(t, bes.Txns, 1) @@ -84,12 +91,16 @@ func TestRunSQLEvents(t *testing.T) { } // Try making two calls in a single block now, and assert we receive things correctly. - _, err = sc.RunSQL(authOpts, ctrl, big.NewInt(1), "stmt-3") + _, err = contract.Contract.(*ethereum.Contract).RunSQL( + simulatedChain.DeployerTransactOpts, ctrl, big.NewInt(1), "stmt-3", + ) require.NoError(t, err) - _, err = sc.RunSQL(authOpts, ctrl, big.NewInt(1), "stmt-4") + _, err = contract.Contract.(*ethereum.Contract).RunSQL( + simulatedChain.DeployerTransactOpts, ctrl, big.NewInt(1), "stmt-4", + ) require.NoError(t, err) - backend.Commit() + simulatedChain.Backend.Commit() select { case bes := <-ch: require.Len(t, bes.Txns, 2) @@ -109,13 +120,17 @@ func TestAllEvents(t *testing.T) { systemStore, err := system.New(dbURI, tableland.ChainID(1337)) require.NoError(t, err) - backend, addr, sc, authOpts, _ := testutil.Setup(t) + // Spin up the EVM chain with the contract. + simulatedChain := tests.NewSimulatedChain(t) + contract, err := simulatedChain.DeployContract(t, ethereum.Deploy) + require.NoError(t, err) + fetchBlockExtraInfoDelay = time.Millisecond ef, err := New( systemStore, 1337, - backend, - addr, + simulatedChain.Backend, + contract.ContractAddr, eventfeed.WithNewHeadPollFreq(time.Millisecond), eventfeed.WithMinBlockDepth(0), eventfeed.WithEventPersistence(true), @@ -145,33 +160,35 @@ func TestAllEvents(t *testing.T) { require.Error(t, err) } - ctrl := authOpts.From + ctrl := simulatedChain.DeployerTransactOpts.From // Make four calls to different functions emitting different events - txn1, err := sc.CreateTable( - authOpts, + txn1, err := contract.Contract.(*ethereum.Contract).CreateTable( + simulatedChain.DeployerTransactOpts, ctrl, "CREATE TABLE foo (bar int)") require.NoError(t, err) - txn2, err := sc.RunSQL(authOpts, ctrl, big.NewInt(1), "stmt-2") + txn2, err := contract.Contract.(*ethereum.Contract).RunSQL( + simulatedChain.DeployerTransactOpts, ctrl, big.NewInt(1), "stmt-2", + ) require.NoError(t, err) - txn3, err := sc.SetController( - authOpts, + txn3, err := contract.Contract.(*ethereum.Contract).SetController( + simulatedChain.DeployerTransactOpts, ctrl, big.NewInt(1), common.HexToAddress("0xB0Cf943Cf94E7B6A2657D15af41c5E06c2BFEA3E"), ) require.NoError(t, err) - txn4, err := sc.TransferFrom( - authOpts, + txn4, err := contract.Contract.(*ethereum.Contract).TransferFrom( + simulatedChain.DeployerTransactOpts, ctrl, common.HexToAddress("0xB0Cf943Cf94E7B6A2657D15af41c5E06c2BFEA3E"), big.NewInt(1), ) require.NoError(t, err) - backend.Commit() + simulatedChain.Backend.Commit() select { case bes := <-ch: diff --git a/pkg/eventprocessor/impl/eventprocessor.go b/pkg/eventprocessor/impl/eventprocessor.go index 0cf9e90a..7dbf2950 100644 --- a/pkg/eventprocessor/impl/eventprocessor.go +++ b/pkg/eventprocessor/impl/eventprocessor.go @@ -46,13 +46,14 @@ type EventProcessor struct { daemonCanceled chan struct{} // Metrics - mBaseLabels []attribute.KeyValue - mExecutionRound atomic.Int64 - mLastProcessedHeight atomic.Int64 - mBlockExecutionLatency instrument.Int64Histogram - mEventExecutionCounter instrument.Int64Counter - mTxnExecutionLatency instrument.Int64Histogram - mHashCalculationElapsedTime atomic.Int64 + mBaseLabels []attribute.KeyValue + mExecutionRound atomic.Int64 + mLastProcessedHeight atomic.Int64 + mBlockExecutionLatency instrument.Int64Histogram + mEventExecutionCounter instrument.Int64Counter + mTxnExecutionLatency instrument.Int64Histogram + mHashCalculationElapsedTime atomic.Int64 + mTreeLeavesCalculationElapsedTime atomic.Int64 } // New returns a new EventProcessor. @@ -216,13 +217,6 @@ func (ep *EventProcessor) executeBlock(ctx context.Context, block eventfeed.Bloc } }() - if block.BlockNumber >= ep.nextHashCalcBlockNumber { - if err := ep.calculateHash(ctx, bs); err != nil { - return fmt.Errorf("calculate hash: %s", err) - } - ep.nextHashCalcBlockNumber = nextMultipleOf(block.BlockNumber, ep.config.HashCalcStep) - } - receipts := make([]eventprocessor.Receipt, 0, len(block.Txns)) for idxInBlock, txnEvents := range block.Txns { if ep.config.DedupExecutedTxns { @@ -280,6 +274,18 @@ func (ep *EventProcessor) executeBlock(ctx context.Context, block eventfeed.Bloc return fmt.Errorf("set new processed height %d: %s", block.BlockNumber, err) } + if block.BlockNumber >= ep.nextHashCalcBlockNumber { + if err := ep.calculateHash(ctx, bs); err != nil { + return fmt.Errorf("calculate hash: %s", err) + } + + if err := ep.snapshotTreeLeaves(ctx, bs, block.BlockNumber); err != nil { + return fmt.Errorf("calculate tree leaves: %s", err) + } + + ep.nextHashCalcBlockNumber = nextMultipleOf(block.BlockNumber, ep.config.HashCalcStep) + } + if err := bs.Commit(); err != nil { return fmt.Errorf("committing changes: %s", err) } @@ -322,6 +328,22 @@ func (ep *EventProcessor) calculateHash(ctx context.Context, bs executor.BlockSc return nil } +func (ep *EventProcessor) snapshotTreeLeaves(ctx context.Context, bs executor.BlockScope, blockNumber int64) error { + startTime := time.Now() + if err := bs.SnapshotTableLeaves(ctx); err != nil { + return fmt.Errorf("snapshot tree leaves: %s", err) + } + elapsedTime := time.Since(startTime).Milliseconds() + ep.log.Info(). + Int64("block_number", blockNumber). + Int64("elapsed_time", elapsedTime). + Msg("tree leaves snapshotting") + + ep.mTreeLeavesCalculationElapsedTime.Store(elapsedTime) + + return nil +} + func nextMultipleOf(x, y int64) int64 { return y * ((x + y) / y) } diff --git a/pkg/eventprocessor/impl/eventprocessor_test.go b/pkg/eventprocessor/impl/eventprocessor_test.go index 5385d765..cf8a2322 100644 --- a/pkg/eventprocessor/impl/eventprocessor_test.go +++ b/pkg/eventprocessor/impl/eventprocessor_test.go @@ -20,7 +20,7 @@ import ( "github.com/textileio/go-tableland/pkg/sqlstore/impl/system" "github.com/textileio/go-tableland/pkg/sqlstore/impl/user" "github.com/textileio/go-tableland/pkg/tables" - "github.com/textileio/go-tableland/pkg/tables/impl/testutil" + "github.com/textileio/go-tableland/pkg/tables/impl/ethereum" "github.com/textileio/go-tableland/tests" ) @@ -309,7 +309,9 @@ func setup(t *testing.T) ( t.Helper() // Spin up the EVM chain with the contract. - backend, addr, sc, authOpts, _ := testutil.Setup(t) + simulatedChain := tests.NewSimulatedChain(t) + contract, err := simulatedChain.DeployContract(t, ethereum.Deploy) + require.NoError(t, err) // Spin up dependencies needed for the EventProcessor. // i.e: Executor, Parser, and EventFeed (connected to the EVM chain) @@ -328,8 +330,8 @@ func setup(t *testing.T) ( ef, err := efimpl.New( systemStore, chainID, - backend, - addr, + simulatedChain.Backend, + contract.ContractAddr, eventfeed.WithNewHeadPollFreq(time.Millisecond), eventfeed.WithMinBlockDepth(0)) require.NoError(t, err) @@ -342,33 +344,41 @@ func setup(t *testing.T) ( contractSendRunSQL := func(queries []string) []common.Hash { var txnHashes []common.Hash for _, q := range queries { - txn, err := sc.RunSQL(authOpts, authOpts.From, big.NewInt(1), q) + txn, err := contract.Contract.(*ethereum.Contract).RunSQL( + simulatedChain.DeployerTransactOpts, simulatedChain.DeployerTransactOpts.From, big.NewInt(1), q, + ) require.NoError(t, err) txnHashes = append(txnHashes, txn.Hash()) } - backend.Commit() + simulatedChain.Backend.Commit() return txnHashes } contractSendSetController := func(controller common.Address) common.Hash { - txn, err := sc.SetController(authOpts, authOpts.From, big.NewInt(1), controller) + txn, err := contract.Contract.(*ethereum.Contract).SetController( + simulatedChain.DeployerTransactOpts, simulatedChain.DeployerTransactOpts.From, big.NewInt(1), controller, + ) require.NoError(t, err) - backend.Commit() + simulatedChain.Backend.Commit() return txn.Hash() } mintTable := func(query string) common.Hash { - txn, err := sc.CreateTable(authOpts, authOpts.From, query) + txn, err := contract.Contract.(*ethereum.Contract).CreateTable( + simulatedChain.DeployerTransactOpts, simulatedChain.DeployerTransactOpts.From, query, + ) require.NoError(t, err) - backend.Commit() + simulatedChain.Backend.Commit() return txn.Hash() } transferFrom := func(controller common.Address) common.Hash { - txn, err := sc.TransferFrom(authOpts, authOpts.From, controller, big.NewInt(1)) + txn, err := contract.Contract.(*ethereum.Contract).TransferFrom( + simulatedChain.DeployerTransactOpts, simulatedChain.DeployerTransactOpts.From, controller, big.NewInt(1), + ) require.NoError(t, err) - backend.Commit() + simulatedChain.Backend.Commit() return txn.Hash() } diff --git a/pkg/eventprocessor/impl/executor/executor.go b/pkg/eventprocessor/impl/executor/executor.go index 313fe4b4..073b5bc6 100644 --- a/pkg/eventprocessor/impl/executor/executor.go +++ b/pkg/eventprocessor/impl/executor/executor.go @@ -45,6 +45,9 @@ type BlockScope interface { // StateHash calculates the hash of some state of the database. StateHash(ctx context.Context, chainID tableland.ChainID) (StateHash, error) + // SnapshotTableLeaves takes a snapshot of the leaves of each table for future Merkle Tree building. + SnapshotTableLeaves(ctx context.Context) error + // Commit commits all the changes that happened in previously successful ExecuteTxnEvents(...) calls. Commit() error diff --git a/pkg/eventprocessor/impl/executor/impl/blockscope.go b/pkg/eventprocessor/impl/executor/impl/blockscope.go index 1bab319e..3a3814be 100644 --- a/pkg/eventprocessor/impl/executor/impl/blockscope.go +++ b/pkg/eventprocessor/impl/executor/impl/blockscope.go @@ -1,9 +1,13 @@ package impl import ( + "bytes" "context" "database/sql" "fmt" + "hash" + "hash/fnv" + "sort" "strings" "github.com/ethereum/go-ethereum/common" @@ -215,6 +219,102 @@ func (bs *blockScope) StateHash(ctx context.Context, chainID tableland.ChainID) return executor.NewStateHash(chainID, bs.scopeVars.BlockNumber, hash), nil } +func (bs *blockScope) SnapshotTableLeaves(ctx context.Context) error { + rows, err := bs.txn.QueryContext(ctx, "select prefix, id from registry where chain_id = ?1", bs.scopeVars.ChainID) + if err != nil { + return fmt.Errorf("fetching tables from registry: %s", err) + } + defer func() { + if err := rows.Close(); err != nil { + bs.log.Error().Err(err).Msg("closing the rows") + } + }() + + for rows.Next() { + var tablePrefix string + var tableID int64 + if err := rows.Scan(&tablePrefix, &tableID); err != nil { + return fmt.Errorf("scanning table name: %s", err) + } + + if err := bs.snapshotTreeLeavesForTable(ctx, bs.scopeVars.ChainID, tablePrefix, tableID); err != nil { + return fmt.Errorf("snapshot leaves for table: %s", err) + } + } + + if err := rows.Err(); err != nil { + return fmt.Errorf("encountered error during iteration: %s", err) + } + + return nil +} + +func (bs *blockScope) snapshotTreeLeavesForTable( + ctx context.Context, + chainID tableland.ChainID, + tablePrefix string, + tableID int64, +) error { + tableName := fmt.Sprintf("%s_%d_%d", tablePrefix, chainID, tableID) + + // we don't need to sort the rows here because they will be sorted later inside the Merkle Tree library + tableRows, err := bs.txn.QueryContext(ctx, fmt.Sprintf("SELECT * FROM %s", tableName)) + if err != nil { + return fmt.Errorf("fetching rows from %s: %s", tableName, err) + } + defer func() { + if err := tableRows.Close(); err != nil { + bs.log.Error().Err(err).Msg("closing the rows") + } + }() + + columns, err := tableRows.Columns() + if err != nil { + return fmt.Errorf("getting the columns of row: %s", err) + } + + columnValues := make([]sql.RawBytes, len(columns)) + args := make([]interface{}, len(columnValues)) + for i := range columnValues { + args[i] = &columnValues[i] + } + + leaves, cols := []byte{}, make([]string, len(columns)) + // using a non-cryptographic hash that outputs a hash of 16 bytes + h := fnv.New128a() + for tableRows.Next() { + if err := tableRows.Scan(args...); err != nil { + return fmt.Errorf("table row scan: %s", err) + } + copy(cols, columns) + row := rowLeaf{columns: cols, columnValues: columnValues} + sort.Stable(row) + leaves = append(leaves, row.Encode(h)...) + } + + if err := tableRows.Err(); err != nil { + return fmt.Errorf("encountered error during iteration: %s", err) + } + + if len(leaves) == 0 { + bs.log.Warn().Int64("chain_id", int64(bs.scopeVars.ChainID)).Int64("table_id", tableID).Msg("empty row") + return nil + } + + if _, err := bs.txn.ExecContext(ctx, + "INSERT INTO system_tree_leaves (prefix, chain_id, table_id, block_number, leaves) VALUES (?1, ?2, ?3, ?4, ?5)", + tablePrefix, + bs.scopeVars.ChainID, + tableID, + bs.scopeVars.BlockNumber, + leaves, + ); err != nil { + return fmt.Errorf("inserting tree leaves %s: %s", tableName, err) + } + + return nil +} + // Close closes gracefully the block scope. // Clients should *always* `defer Close()` when opening block scopes. func (bs *blockScope) Close() error { @@ -255,3 +355,51 @@ func (wqr *writeStatmentResolver) GetTxnHash() string { func (wqr *writeStatmentResolver) GetBlockNumber() int64 { return wqr.blockNumber } + +// rowLeaf is a sortable data structure that holds the columns and columns values. +// +// We sort the column values to have a deterministic order of columns, +// because we cannot trust the order of 'SELECT *'. +// +// The downside of sorting is that different rows can be considered equal. +// For example, consider the table below: +// a | b +// ------ +// 0 | 1 +// 1 | 0 +// +// After sorting and hashing we would have the same hash for both rows, +// but they are different and should have different hashes. +// +// To avoid that we have sort the columns with respect to column values sorting. +// And encode the column together with the column value. +type rowLeaf struct { + columnValues []sql.RawBytes + columns []string +} + +func (rl rowLeaf) Len() int { + return len(rl.columnValues) +} + +func (rl rowLeaf) Swap(i, j int) { + rl.columnValues[i], rl.columnValues[j] = rl.columnValues[j], rl.columnValues[i] + rl.columns[i], rl.columns[j] = rl.columns[j], rl.columns[i] +} + +func (rl rowLeaf) Less(i, j int) bool { + if len(rl.columnValues[i]) == len(rl.columnValues[j]) { + return bytes.Compare(rl.columnValues[i], rl.columnValues[j]) < 0 + } + + return len(rl.columnValues[i]) < len(rl.columnValues[j]) +} + +func (rl rowLeaf) Encode(h hash.Hash) []byte { + h.Reset() + for i := range rl.columnValues { + h.Write([]byte(rl.columns[i])) + h.Write(rl.columnValues[i]) + } + return h.Sum(nil) +} diff --git a/pkg/eventprocessor/impl/metrics.go b/pkg/eventprocessor/impl/metrics.go index 25f3c9b5..a7db688c 100644 --- a/pkg/eventprocessor/impl/metrics.go +++ b/pkg/eventprocessor/impl/metrics.go @@ -29,14 +29,19 @@ func (ep *EventProcessor) initMetrics(chainID tableland.ChainID) error { if err != nil { return fmt.Errorf("creating hash calculation elapsed time gauge: %s", err) } + mTreeLeavesCalculationElapsedTime, err := meter.Int64ObservableGauge("tableland.eventprocessor.leaves.calculation.elapsed.time") // nolint + if err != nil { + return fmt.Errorf("creating leaves calculation elapsed time gauge: %s", err) + } _, err = meter.RegisterCallback( func(ctx context.Context, o metric.Observer) error { o.ObserveInt64(mExecutionRound, ep.mExecutionRound.Load(), ep.mBaseLabels...) o.ObserveInt64(mLastProcessedHeight, ep.mLastProcessedHeight.Load(), ep.mBaseLabels...) o.ObserveInt64(mHashCalculationElapsedTime, ep.mHashCalculationElapsedTime.Load(), ep.mBaseLabels...) + o.ObserveInt64(mTreeLeavesCalculationElapsedTime, ep.mTreeLeavesCalculationElapsedTime.Load(), ep.mBaseLabels...) return nil }, []instrument.Asynchronous{ - mExecutionRound, mLastProcessedHeight, mHashCalculationElapsedTime, + mExecutionRound, mLastProcessedHeight, mHashCalculationElapsedTime, mTreeLeavesCalculationElapsedTime, }...) if err != nil { return fmt.Errorf("registering async metric callback: %s", err) diff --git a/pkg/merkletree/README.md b/pkg/merkletree/README.md new file mode 100644 index 00000000..0bb1881a --- /dev/null +++ b/pkg/merkletree/README.md @@ -0,0 +1,55 @@ +# Merkle Tree + +This package implements a Merkle Tree. It is used to calculate the Merkle Root of a table and get Membership Proof of rows. + +## Design characteristics + +- It was designed to work with [OpenZepellin MerkeProof verifier](https://github.com/OpenZeppelin/openzeppelin-contracts/blob/260e082ed10e86e5870c4e5859750a8271eeb2b9/contracts/utils/cryptography/MerkleProof.sol#L27-L29). That requires leaves and hash pairs to be sorted. The sorting simplifies the proof verification by removing the need to include information about the order of the proof piece. +- It duplicates the last leaf node in case the number of leaves is odd. This is a common approach but vunerable to a forgery attack because two trees can produce the same Merkle Root, e.g. `MerkleRoot(a, b, c) = MerkleRoot(a, b, c, c)`. But that is not a problem in our use case. + +## Usage + +```go + leaves := [][]byte{} + leaves = append(leaves, []byte("A")) + leaves = append(leaves, []byte("B")) + leaves = append(leaves, []byte("C")) + leaves = append(leaves, []byte("D")) + leaves = append(leaves, []byte("E")) + leaves = append(leaves, []byte("F")) + tree, _ := merkletree.NewTree(leaves, crypto.Keccak256) + + // Getting the root + root := tree.MerkleRoot() + fmt.Printf("ROOT: %s\n\n", hex.EncodeToString(root)) + + // Getting the proof for a given leaf + leaf := crypto.Keccak256([]byte("D")) + proof := tree.GetProof(leaf) + for i, part := range proof { + fmt.Printf("PROOF (%d): 0x%s\n", i, hex.EncodeToString(part)) + } + + // Verifying the proof + ok := merkletree.VerifyProof(root, proof, leaf, crypto.Keccak256) + fmt.Printf("\nVERIFICATION RESULT: %t\n", ok) +``` + +```bash +ROOT: 5b4f920caf9a50816be944fd3626945ebaed5fcd1f041fa864027d4eaad29cf6 + +PROOF (0): 0xe61d9a3d3848fb2cdd9a2ab61e2f21a10ea431275aed628a0557f9dee697c37a +PROOF (1): 0x324d51074ba12c3b56f59e6a9dd606351316426b7f7d924b1fc9efa7f261b476 +PROOF (2): 0xd8c26fda8cf7503459d00730efe60ff9ec19bf97b7a26b6aa42fa8d8337efe78 + +VERIFICATION RESULT: true +``` + +## Things to explore + +- Multi proofs +- Non-membership proofs +- Consistency proofs +- Serialization/Deserialization of the entire tree +- Possible Vunerabilites +- Performance diff --git a/pkg/merkletree/publisher/impl/contract.go b/pkg/merkletree/publisher/impl/contract.go new file mode 100644 index 00000000..a51562d7 --- /dev/null +++ b/pkg/merkletree/publisher/impl/contract.go @@ -0,0 +1,254 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package impl + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription +) + +// ContractMetaData contains all meta data concerning the Contract contract. +var ContractMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"uint256[]\",\"name\":\"tableIds\",\"type\":\"uint256[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"roots\",\"type\":\"bytes32[]\"}],\"name\":\"setRoots\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"tableId\",\"type\":\"uint256\"},{\"internalType\":\"bytes32\",\"name\":\"row\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32[]\",\"name\":\"proof\",\"type\":\"bytes32[]\"}],\"name\":\"verifyRowInclusion\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + Bin: "0x608060405234801561001057600080fd5b50610613806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c8063719f98111461003b578063d88fd1f41461006b575b600080fd5b61005560048036038101906100509190610339565b610087565b60405161006291906103c8565b60405180910390f35b61008560048036038101906100809190610439565b6100f6565b005b6000806000808781526020019081526020016000205490506100eb848480806020026020016040519081016040528093929190818152602001838360200280828437600081840152601f19601f8201169050808301925050505050505082876101af565b915050949350505050565b81819050848490501461013e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161013590610517565b60405180910390fd5b60005b848490508110156101a85782828281811061015f5761015e610537565b5b9050602002013560008087878581811061017c5761017b610537565b5b9050602002013581526020019081526020016000208190555080806101a090610595565b915050610141565b5050505050565b6000826101bc85846101c6565b1490509392505050565b60008082905060005b8451811015610211576101fc828683815181106101ef576101ee610537565b5b602002602001015161021c565b9150808061020990610595565b9150506101cf565b508091505092915050565b60008183106102345761022f8284610247565b61023f565b61023e8383610247565b5b905092915050565b600082600052816020526040600020905092915050565b600080fd5b600080fd5b6000819050919050565b61027b81610268565b811461028657600080fd5b50565b60008135905061029881610272565b92915050565b6000819050919050565b6102b18161029e565b81146102bc57600080fd5b50565b6000813590506102ce816102a8565b92915050565b600080fd5b600080fd5b600080fd5b60008083601f8401126102f9576102f86102d4565b5b8235905067ffffffffffffffff811115610316576103156102d9565b5b602083019150836020820283011115610332576103316102de565b5b9250929050565b600080600080606085870312156103535761035261025e565b5b600061036187828801610289565b9450506020610372878288016102bf565b935050604085013567ffffffffffffffff81111561039357610392610263565b5b61039f878288016102e3565b925092505092959194509250565b60008115159050919050565b6103c2816103ad565b82525050565b60006020820190506103dd60008301846103b9565b92915050565b60008083601f8401126103f9576103f86102d4565b5b8235905067ffffffffffffffff811115610416576104156102d9565b5b602083019150836020820283011115610432576104316102de565b5b9250929050565b600080600080604085870312156104535761045261025e565b5b600085013567ffffffffffffffff81111561047157610470610263565b5b61047d878288016103e3565b9450945050602085013567ffffffffffffffff8111156104a05761049f610263565b5b6104ac878288016102e3565b925092505092959194509250565b600082825260208201905092915050565b7f6c656e6774687320646f6e2774206d6174636800000000000000000000000000600082015250565b60006105016013836104ba565b915061050c826104cb565b602082019050919050565b60006020820190508181036000830152610530816104f4565b9050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60006105a082610268565b91507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82036105d2576105d1610566565b5b60018201905091905056fea2646970667358221220db32a5f9744ccb3526456fbb36b8152b9bed84903b501d1c160ed16fbb41600964736f6c63430008120033", +} + +// ContractABI is the input ABI used to generate the binding from. +// Deprecated: Use ContractMetaData.ABI instead. +var ContractABI = ContractMetaData.ABI + +// ContractBin is the compiled bytecode used for deploying new contracts. +// Deprecated: Use ContractMetaData.Bin instead. +var ContractBin = ContractMetaData.Bin + +// DeployContract deploys a new Ethereum contract, binding an instance of Contract to it. +func DeployContract(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *Contract, error) { + parsed, err := ContractMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(ContractBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &Contract{ContractCaller: ContractCaller{contract: contract}, ContractTransactor: ContractTransactor{contract: contract}, ContractFilterer: ContractFilterer{contract: contract}}, nil +} + +// Contract is an auto generated Go binding around an Ethereum contract. +type Contract struct { + ContractCaller // Read-only binding to the contract + ContractTransactor // Write-only binding to the contract + ContractFilterer // Log filterer for contract events +} + +// ContractCaller is an auto generated read-only Go binding around an Ethereum contract. +type ContractCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// ContractTransactor is an auto generated write-only Go binding around an Ethereum contract. +type ContractTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// ContractFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type ContractFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// ContractSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type ContractSession struct { + Contract *Contract // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// ContractCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type ContractCallerSession struct { + Contract *ContractCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// ContractTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type ContractTransactorSession struct { + Contract *ContractTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// ContractRaw is an auto generated low-level Go binding around an Ethereum contract. +type ContractRaw struct { + Contract *Contract // Generic contract binding to access the raw methods on +} + +// ContractCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type ContractCallerRaw struct { + Contract *ContractCaller // Generic read-only contract binding to access the raw methods on +} + +// ContractTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type ContractTransactorRaw struct { + Contract *ContractTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewContract creates a new instance of Contract, bound to a specific deployed contract. +func NewContract(address common.Address, backend bind.ContractBackend) (*Contract, error) { + contract, err := bindContract(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &Contract{ContractCaller: ContractCaller{contract: contract}, ContractTransactor: ContractTransactor{contract: contract}, ContractFilterer: ContractFilterer{contract: contract}}, nil +} + +// NewContractCaller creates a new read-only instance of Contract, bound to a specific deployed contract. +func NewContractCaller(address common.Address, caller bind.ContractCaller) (*ContractCaller, error) { + contract, err := bindContract(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &ContractCaller{contract: contract}, nil +} + +// NewContractTransactor creates a new write-only instance of Contract, bound to a specific deployed contract. +func NewContractTransactor(address common.Address, transactor bind.ContractTransactor) (*ContractTransactor, error) { + contract, err := bindContract(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &ContractTransactor{contract: contract}, nil +} + +// NewContractFilterer creates a new log filterer instance of Contract, bound to a specific deployed contract. +func NewContractFilterer(address common.Address, filterer bind.ContractFilterer) (*ContractFilterer, error) { + contract, err := bindContract(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &ContractFilterer{contract: contract}, nil +} + +// bindContract binds a generic wrapper to an already deployed contract. +func bindContract(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := abi.JSON(strings.NewReader(ContractABI)) + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Contract *ContractRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Contract.Contract.ContractCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Contract *ContractRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Contract.Contract.ContractTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Contract *ContractRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Contract.Contract.ContractTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Contract *ContractCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Contract.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Contract *ContractTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Contract.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Contract *ContractTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Contract.Contract.contract.Transact(opts, method, params...) +} + +// VerifyRowInclusion is a free data retrieval call binding the contract method 0x719f9811. +// +// Solidity: function verifyRowInclusion(uint256 tableId, bytes32 row, bytes32[] proof) view returns(bool) +func (_Contract *ContractCaller) VerifyRowInclusion(opts *bind.CallOpts, tableId *big.Int, row [32]byte, proof [][32]byte) (bool, error) { + var out []interface{} + err := _Contract.contract.Call(opts, &out, "verifyRowInclusion", tableId, row, proof) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +// VerifyRowInclusion is a free data retrieval call binding the contract method 0x719f9811. +// +// Solidity: function verifyRowInclusion(uint256 tableId, bytes32 row, bytes32[] proof) view returns(bool) +func (_Contract *ContractSession) VerifyRowInclusion(tableId *big.Int, row [32]byte, proof [][32]byte) (bool, error) { + return _Contract.Contract.VerifyRowInclusion(&_Contract.CallOpts, tableId, row, proof) +} + +// VerifyRowInclusion is a free data retrieval call binding the contract method 0x719f9811. +// +// Solidity: function verifyRowInclusion(uint256 tableId, bytes32 row, bytes32[] proof) view returns(bool) +func (_Contract *ContractCallerSession) VerifyRowInclusion(tableId *big.Int, row [32]byte, proof [][32]byte) (bool, error) { + return _Contract.Contract.VerifyRowInclusion(&_Contract.CallOpts, tableId, row, proof) +} + +// SetRoots is a paid mutator transaction binding the contract method 0xd88fd1f4. +// +// Solidity: function setRoots(uint256[] tableIds, bytes32[] roots) returns() +func (_Contract *ContractTransactor) SetRoots(opts *bind.TransactOpts, tableIds []*big.Int, roots [][32]byte) (*types.Transaction, error) { + return _Contract.contract.Transact(opts, "setRoots", tableIds, roots) +} + +// SetRoots is a paid mutator transaction binding the contract method 0xd88fd1f4. +// +// Solidity: function setRoots(uint256[] tableIds, bytes32[] roots) returns() +func (_Contract *ContractSession) SetRoots(tableIds []*big.Int, roots [][32]byte) (*types.Transaction, error) { + return _Contract.Contract.SetRoots(&_Contract.TransactOpts, tableIds, roots) +} + +// SetRoots is a paid mutator transaction binding the contract method 0xd88fd1f4. +// +// Solidity: function setRoots(uint256[] tableIds, bytes32[] roots) returns() +func (_Contract *ContractTransactorSession) SetRoots(tableIds []*big.Int, roots [][32]byte) (*types.Transaction, error) { + return _Contract.Contract.SetRoots(&_Contract.TransactOpts, tableIds, roots) +} diff --git a/pkg/merkletree/publisher/impl/leaves_store.go b/pkg/merkletree/publisher/impl/leaves_store.go new file mode 100644 index 00000000..69c32c26 --- /dev/null +++ b/pkg/merkletree/publisher/impl/leaves_store.go @@ -0,0 +1,93 @@ +package impl + +import ( + "context" + "fmt" + "math/big" + + "github.com/rs/zerolog" + logger "github.com/rs/zerolog/log" + + "github.com/textileio/go-tableland/pkg/merkletree/publisher" + "github.com/textileio/go-tableland/pkg/sqlstore" + "github.com/textileio/go-tableland/pkg/sqlstore/impl/system/db" +) + +// LeavesStore responsible for interacting with system_tree_leaves table. +type LeavesStore struct { + log zerolog.Logger + systemStore sqlstore.SystemStore +} + +// NewLeavesStore returns a new LeavesStore backed by database/sql. +func NewLeavesStore(systemStore sqlstore.SystemStore) *LeavesStore { + log := logger.With(). + Str("component", "leavesstore"). + Logger() + + leavesstore := &LeavesStore{ + log: log, + systemStore: systemStore, + } + + return leavesstore +} + +// FetchLeavesByChainIDAndBlockNumber fetches chain ids and block numbers to be processed. +func (s *LeavesStore) FetchLeavesByChainIDAndBlockNumber( + ctx context.Context, + chainID int64, + blockNumber int64, +) ([]publisher.TreeLeaves, error) { + params := db.FetchLeavesByChainIDAndBlockNumberParams{ + ChainID: chainID, + BlockNumber: blockNumber, + } + rows, err := s.systemStore.Queries().FetchLeavesByChainIDAndBlockNumber(ctx, params) + if err != nil { + return []publisher.TreeLeaves{}, fmt.Errorf("fetching leaves by chain id and block number: %s", err) + } + + leaves := make([]publisher.TreeLeaves, len(rows)) + for i, row := range rows { + leaves[i] = publisher.TreeLeaves{ + ChainID: row.ChainID, + BlockNumber: row.BlockNumber, + TableID: big.NewInt(row.TableID), + TablePrefix: row.Prefix, + Leaves: row.Leaves, + } + } + + return leaves, nil +} + +// FetchChainIDAndBlockNumber fetches rows from leaves store by chain id and block number. +func (s *LeavesStore) FetchChainIDAndBlockNumber(ctx context.Context) ([]publisher.ChainIDBlockNumberPair, error) { + rows, err := s.systemStore.Queries().FetchChainIDAndBlockNumber(ctx) + if err != nil { + return []publisher.ChainIDBlockNumberPair{}, fmt.Errorf("fetching chain id and block number: %s", err) + } + + pairs := make([]publisher.ChainIDBlockNumberPair, len(rows)) + for i, row := range rows { + pairs[i] = publisher.ChainIDBlockNumberPair{ + ChainID: row.ChainID, + BlockNumber: row.BlockNumber, + } + } + + return pairs, nil +} + +// DeleteProcessing deletes rows that are marked as processing. +func (s *LeavesStore) DeleteProcessing(ctx context.Context, chainID int64, blockNumber int64) error { + if err := s.systemStore.Queries().DeleteProcessing(ctx, db.DeleteProcessingParams{ + ChainID: chainID, + BlockNumber: blockNumber, + }); err != nil { + return fmt.Errorf("delete processing: %s", err) + } + + return nil +} diff --git a/pkg/merkletree/publisher/impl/publisher_test.go b/pkg/merkletree/publisher/impl/publisher_test.go new file mode 100644 index 00000000..0d41c45b --- /dev/null +++ b/pkg/merkletree/publisher/impl/publisher_test.go @@ -0,0 +1,238 @@ +package impl + +import ( + "bytes" + "context" + "database/sql" + "encoding/hex" + "encoding/json" + "math/big" + "os" + "sync" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" + "github.com/textileio/go-tableland/internal/tableland" + "github.com/textileio/go-tableland/pkg/merkletree/publisher" + nonceimpl "github.com/textileio/go-tableland/pkg/nonce/impl" + "github.com/textileio/go-tableland/pkg/sqlstore/impl/system" + "github.com/textileio/go-tableland/pkg/wallet" + "github.com/textileio/go-tableland/tests" +) + +func TestPublisher(t *testing.T) { + t.Parallel() + // We are going to pass this logger to MerkleRootRegistryLogger. + // It will fill the `buf` with logged bytes, that later we can inspect that it logged the expected values. + var buf buffer + logger := zerolog.New(&buf).With().Timestamp().Logger() + + helper := setup(t, []publisher.TreeLeaves{ + { + TablePrefix: "", + ChainID: 1, + TableID: big.NewInt(1), + BlockNumber: 1, + Leaves: []byte("ABCDEFGHABCDEFGH"), + }, + { + TablePrefix: "", + ChainID: 1, + TableID: big.NewInt(2), + BlockNumber: 1, + Leaves: []byte("ABCDEFGHABCDEFGH"), + }, + }) + + p := publisher.NewMerkleRootPublisher( + helper.leavesStore, helper.treeStore, NewMerkleRootRegistryLogger(logger), time.Second, + ) + p.Start() + defer p.Close() + + type l struct { + ChainID int `json:"chain_id"` + BlockNumber int `json:"block_number"` + Level string `json:"level"` + Message string `json:"message"` + Root1 string `json:"root_1"` + Root2 string `json:"root_2"` + Tables []int `json:"tables"` + } + + // Eventually the MerkleRootLogger will build the tree and emit the expected log. + require.Eventually(t, func() bool { + // We're going to inspect `buf`. + if buf.Len() != 0 { + expLog := &l{} + decoder := json.NewDecoder(bytes.NewReader(buf.Bytes())) + require.NoError(t, decoder.Decode(expLog)) + + require.Equal(t, 1, expLog.ChainID) + require.Equal(t, 1, expLog.BlockNumber) + require.Equal(t, "info", expLog.Level) + require.Equal(t, "merkle roots", expLog.Message) + require.Equal(t, "8b8e53316fb13d0bfe0e559e947f729af5296981a47095be51054afae8e48ab1", expLog.Root1) + require.Equal(t, "8b8e53316fb13d0bfe0e559e947f729af5296981a47095be51054afae8e48ab1", expLog.Root2) + require.Equal(t, []int{1, 2}, expLog.Tables) + + return helper.treeLeavesCount(t) == 0 + } + return buf.Len() != 0 + }, 10*time.Second, time.Second) +} + +func TestPublisherWithSimulatedBackend(t *testing.T) { + t.Parallel() + + helper := setup(t, []publisher.TreeLeaves{ + { + TablePrefix: "", + ChainID: 1337, + TableID: big.NewInt(1), + BlockNumber: 1, + Leaves: []byte("ABCDEFGHABCDEFGH"), + }, + { + TablePrefix: "", + ChainID: 1337, + TableID: big.NewInt(2), + BlockNumber: 1, + Leaves: []byte("ABCDEFGHABCDEFGH"), + }, + }) + + p := publisher.NewMerkleRootPublisher(helper.leavesStore, helper.treeStore, helper.rootRegistry, time.Second) + p.Start() + defer p.Close() + + // Eventually the MerkleRootLogger will build the tree and emit the expected log. + require.Eventually(t, func() bool { + return helper.treeLeavesCount(t) == 0 + }, 10*time.Second, time.Second) +} + +func setup(t *testing.T, data []publisher.TreeLeaves) *helper { + t.Helper() + + chain := tests.NewSimulatedChain(t) + contract, err := chain.DeployContract(t, + func(auth *bind.TransactOpts, sb *backends.SimulatedBackend) (common.Address, interface{}, error) { + addr, _, contract, err := DeployContract(auth, sb) + return addr, contract, err + }) + require.NoError(t, err) + + privateKey := chain.CreateAccountWithBalance(t) + + w, err := wallet.NewWallet(hex.EncodeToString(crypto.FromECDSA(privateKey))) + require.NoError(t, err) + + url := tests.Sqlite3URI(t) + + systemStore, err := system.New(url, tableland.ChainID(1337)) + require.NoError(t, err) + + tracker, err := nonceimpl.NewLocalTracker( + context.Background(), + w, + nonceimpl.NewNonceStore(systemStore), + tableland.ChainID(1337), + chain.Backend, + 5*time.Second, + 0, + 3*time.Microsecond, + ) + require.NoError(t, err) + + rootRegistry, err := NewMerkleRootRegistryEthereum(chain.Backend, contract.ContractAddr, w, tracker) + require.NoError(t, err) + + db, err := sql.Open("sqlite3", url) + require.NoError(t, err) + + // pre populate system_tree_leaves with provided data + for _, treeLeaves := range data { + _, err = db.Exec( + "INSERT INTO system_tree_leaves (prefix, chain_id, table_id, block_number, leaves) VALUES (?1, ?2, ?3, ?4, ?5)", + treeLeaves.TablePrefix, + treeLeaves.ChainID, + treeLeaves.TableID.Int64(), + treeLeaves.BlockNumber, + treeLeaves.Leaves, + ) + require.NoError(t, err) + } + + treeStore, err := NewMerkleTreeStore(tempfile(t)) + require.NoError(t, err) + + return &helper{ + db: db, + leavesStore: NewLeavesStore(systemStore), + treeStore: treeStore, + rootRegistry: rootRegistry, + } +} + +type helper struct { + db *sql.DB + leavesStore *LeavesStore + treeStore *MerkleTreeStore + rootRegistry *MerkleRootRegistryEthereum +} + +func (h *helper) treeLeavesCount(t *testing.T) int { + var count int + err := h.db.QueryRow("SELECT count(1) FROM system_tree_leaves").Scan(&count) + require.NoError(t, err) + return count +} + +// We need a thread-safe version of bytes.Buffer to avoid data races in this test. +// The reason for that is because there's a thread writing to the buffer and another one reading from it. +type buffer struct { + b bytes.Buffer + m sync.Mutex +} + +func (b *buffer) Read(p []byte) (n int, err error) { + b.m.Lock() + defer b.m.Unlock() + return b.b.Read(p) +} + +func (b *buffer) Write(p []byte) (n int, err error) { + b.m.Lock() + defer b.m.Unlock() + return b.b.Write(p) +} + +func (b *buffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.b.Bytes() +} + +func (b *buffer) Len() int { + b.m.Lock() + defer b.m.Unlock() + return b.b.Len() +} + +// tempfile returns a temporary file path. +func tempfile(t *testing.T) string { + t.Helper() + + f, err := os.CreateTemp(t.TempDir(), "bolt_*.db") + require.NoError(t, err) + require.NoError(t, f.Close()) + + return f.Name() +} diff --git a/pkg/merkletree/publisher/impl/registry.go b/pkg/merkletree/publisher/impl/registry.go new file mode 100644 index 00000000..46d8f90a --- /dev/null +++ b/pkg/merkletree/publisher/impl/registry.go @@ -0,0 +1,165 @@ +package impl + +import ( + "context" + "fmt" + "math/big" + "strings" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + + "github.com/rs/zerolog" + logger "github.com/rs/zerolog/log" + "github.com/textileio/go-tableland/pkg/nonce" + "github.com/textileio/go-tableland/pkg/wallet" +) + +// MerkleRootRegistryLogger is an implementat that simply logs the roots. +type MerkleRootRegistryLogger struct { + logger zerolog.Logger +} + +// NewMerkleRootRegistryLogger creates a new MerkleRootRegistryLogger. +func NewMerkleRootRegistryLogger(logger zerolog.Logger) *MerkleRootRegistryLogger { + return &MerkleRootRegistryLogger{logger: logger} +} + +// Publish logs the roots. +func (r *MerkleRootRegistryLogger) Publish( + _ context.Context, + chainID int64, + blockNumber int64, + tables []*big.Int, + roots [][]byte, +) error { + tableIds := make([]int64, len(tables)) + for i, id := range tables { + tableIds[i] = id.Int64() + } + + l := r.logger.Info(). + Int64("chain_id", chainID). + Int64("block_number", blockNumber). + Ints64("tables", tableIds) + + for i, root := range roots { + l.Hex(fmt.Sprintf("root_%d", tables[i].Int64()), root) + } + + l.Msg("merkle roots") + + return nil +} + +// MerkleRootRegistryEthereum is a Ethereum Root Registry implementation. +type MerkleRootRegistryEthereum struct { + contract *Contract + backend bind.ContractBackend + wallet *wallet.Wallet + tracker nonce.NonceTracker + + log zerolog.Logger +} + +// NewMerkleRootRegistryEthereum creates a new MerkleRootRegistryEthereum. +func NewMerkleRootRegistryEthereum( + backend bind.ContractBackend, + contractAddr common.Address, + wallet *wallet.Wallet, + tracker nonce.NonceTracker, +) (*MerkleRootRegistryEthereum, error) { + contract, err := NewContract(contractAddr, backend) + if err != nil { + return nil, fmt.Errorf("creating contract: %v", err) + } + + log := logger.With(). + Str("component", "merklerootregistryethereum"). + Logger() + + return &MerkleRootRegistryEthereum{ + contract: contract, + backend: backend, + wallet: wallet, + tracker: tracker, + log: log, + }, nil +} + +// Publish publishes the roots to a Smart Contract. +func (r *MerkleRootRegistryEthereum) Publish( + ctx context.Context, + chainID int64, + _ int64, + tables []*big.Int, + roots [][]byte, +) error { + transactOpts, err := bind.NewKeyedTransactorWithChainID(r.wallet.PrivateKey(), big.NewInt(chainID)) + if err != nil { + return fmt.Errorf("creating keyed transactor: %s", err) + } + + gasTipCap, err := r.backend.SuggestGasTipCap(ctx) + if err != nil { + return fmt.Errorf("suggest gas price: %s", err) + } + + _, err = r.callWithRetry(ctx, func() (*types.Transaction, error) { + registerPendingTx, unlock, nonce := r.tracker.GetNonce(ctx) + defer unlock() + + opts := &bind.TransactOpts{ + Context: ctx, + Signer: transactOpts.Signer, + From: transactOpts.From, + Nonce: big.NewInt(0).SetInt64(nonce), + GasTipCap: gasTipCap, + } + + rootsCopy := make([][32]byte, len(roots)) + for i, root := range roots { + copy(rootsCopy[i][:], root[:]) + } + + tx, err := r.contract.SetRoots(opts, tables, rootsCopy) + if err != nil { + return nil, err + } + registerPendingTx(tx.Hash()) + return tx, nil + }) + if err != nil { + return fmt.Errorf("retryable SetRoots call: %s", err) + } + return nil +} + +func (r *MerkleRootRegistryEthereum) callWithRetry( + ctx context.Context, f func() (*types.Transaction, error), +) (*types.Transaction, error) { + tx, err := f() + + possibleErrMgs := []string{"nonce too low", "invalid transaction nonce"} + if err != nil { + for _, errMsg := range possibleErrMgs { + if strings.Contains(err.Error(), errMsg) { + r.log.Warn().Err(err).Msg("retrying smart contract call") + if err := r.tracker.Resync(ctx); err != nil { + return nil, fmt.Errorf("resync: %s", err) + } + tx, err = f() + if err != nil { + return nil, fmt.Errorf("retry contract call: %s", err) + } + + return tx, nil + } + } + + return nil, fmt.Errorf("contract call: %s", err) + } + + return tx, nil +} diff --git a/pkg/merkletree/publisher/impl/root-registry-abi.json b/pkg/merkletree/publisher/impl/root-registry-abi.json new file mode 100644 index 00000000..ca4381a1 --- /dev/null +++ b/pkg/merkletree/publisher/impl/root-registry-abi.json @@ -0,0 +1,49 @@ +[ + { + "inputs": [ + { + "internalType": "uint256[]", + "name": "tableIds", + "type": "uint256[]" + }, + { + "internalType": "bytes32[]", + "name": "roots", + "type": "bytes32[]" + } + ], + "name": "setRoots", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "tableId", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "row", + "type": "bytes32" + }, + { + "internalType": "bytes32[]", + "name": "proof", + "type": "bytes32[]" + } + ], + "name": "verifyRowInclusion", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + } +] \ No newline at end of file diff --git a/pkg/merkletree/publisher/impl/root-registry-bytecode.bin b/pkg/merkletree/publisher/impl/root-registry-bytecode.bin new file mode 100644 index 00000000..8f7a25c3 --- /dev/null +++ b/pkg/merkletree/publisher/impl/root-registry-bytecode.bin @@ -0,0 +1 @@ +608060405234801561001057600080fd5b50610613806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c8063719f98111461003b578063d88fd1f41461006b575b600080fd5b61005560048036038101906100509190610339565b610087565b60405161006291906103c8565b60405180910390f35b61008560048036038101906100809190610439565b6100f6565b005b6000806000808781526020019081526020016000205490506100eb848480806020026020016040519081016040528093929190818152602001838360200280828437600081840152601f19601f8201169050808301925050505050505082876101af565b915050949350505050565b81819050848490501461013e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161013590610517565b60405180910390fd5b60005b848490508110156101a85782828281811061015f5761015e610537565b5b9050602002013560008087878581811061017c5761017b610537565b5b9050602002013581526020019081526020016000208190555080806101a090610595565b915050610141565b5050505050565b6000826101bc85846101c6565b1490509392505050565b60008082905060005b8451811015610211576101fc828683815181106101ef576101ee610537565b5b602002602001015161021c565b9150808061020990610595565b9150506101cf565b508091505092915050565b60008183106102345761022f8284610247565b61023f565b61023e8383610247565b5b905092915050565b600082600052816020526040600020905092915050565b600080fd5b600080fd5b6000819050919050565b61027b81610268565b811461028657600080fd5b50565b60008135905061029881610272565b92915050565b6000819050919050565b6102b18161029e565b81146102bc57600080fd5b50565b6000813590506102ce816102a8565b92915050565b600080fd5b600080fd5b600080fd5b60008083601f8401126102f9576102f86102d4565b5b8235905067ffffffffffffffff811115610316576103156102d9565b5b602083019150836020820283011115610332576103316102de565b5b9250929050565b600080600080606085870312156103535761035261025e565b5b600061036187828801610289565b9450506020610372878288016102bf565b935050604085013567ffffffffffffffff81111561039357610392610263565b5b61039f878288016102e3565b925092505092959194509250565b60008115159050919050565b6103c2816103ad565b82525050565b60006020820190506103dd60008301846103b9565b92915050565b60008083601f8401126103f9576103f86102d4565b5b8235905067ffffffffffffffff811115610416576104156102d9565b5b602083019150836020820283011115610432576104316102de565b5b9250929050565b600080600080604085870312156104535761045261025e565b5b600085013567ffffffffffffffff81111561047157610470610263565b5b61047d878288016103e3565b9450945050602085013567ffffffffffffffff8111156104a05761049f610263565b5b6104ac878288016102e3565b925092505092959194509250565b600082825260208201905092915050565b7f6c656e6774687320646f6e2774206d6174636800000000000000000000000000600082015250565b60006105016013836104ba565b915061050c826104cb565b602082019050919050565b60006020820190508181036000830152610530816104f4565b9050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60006105a082610268565b91507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82036105d2576105d1610566565b5b60018201905091905056fea2646970667358221220db32a5f9744ccb3526456fbb36b8152b9bed84903b501d1c160ed16fbb41600964736f6c63430008120033 \ No newline at end of file diff --git a/pkg/merkletree/publisher/impl/tree_store.go b/pkg/merkletree/publisher/impl/tree_store.go new file mode 100644 index 00000000..20ffa781 --- /dev/null +++ b/pkg/merkletree/publisher/impl/tree_store.go @@ -0,0 +1,89 @@ +package impl + +import ( + "encoding/binary" + "fmt" + "math/big" + + "github.com/textileio/go-tableland/pkg/merkletree" + "go.etcd.io/bbolt" +) + +// MerkleTreeStore stores merkle trees. +type MerkleTreeStore struct { + db *bbolt.DB +} + +// NewMerkleTreeStore creates a new Merkle Tree store. +func NewMerkleTreeStore(path string) (*MerkleTreeStore, error) { + db, err := bbolt.Open(path, 0o600, nil) + if err != nil { + return nil, fmt.Errorf("opening database: %s", err) + } + + return &MerkleTreeStore{ + db: db, + }, nil +} + +// Store stores a merkle tree. +func (s *MerkleTreeStore) Store(chainID int64, tableID *big.Int, _ int64, tree *merkletree.MerkleTree) error { + tx, err := s.db.Begin(true) + if err != nil { + return fmt.Errorf("begin: %s", err) + } + + bucket := make([]byte, 8) + binary.LittleEndian.PutUint64(bucket, uint64(chainID)) + + b, err := tx.CreateBucketIfNotExists(bucket) + if err != nil { + return fmt.Errorf("creating bucket: %s", err) + } + + // bn := make([]byte, 8) + // binary.LittleEndian.PutUint64(bn, uint64(blockNumber)) + + // key := append(tableID.Bytes(), bn...) + + key := tableID.Bytes() + if err := b.Put(key, tree.Marshal()); err != nil { + return fmt.Errorf("storing the tree: %s", err) + } + + if err := tx.Commit(); err != nil { + return fmt.Errorf("commit: %s", err) + } + + return nil +} + +// Get fetches a tree and deserialize it. +func (s *MerkleTreeStore) Get(chainID int64, tableID *big.Int, _ int64) (*merkletree.MerkleTree, error) { + var tree *merkletree.MerkleTree + if err := s.db.View(func(tx *bbolt.Tx) error { + bucket := make([]byte, 8) + binary.LittleEndian.PutUint64(bucket, uint64(chainID)) + + b := tx.Bucket(bucket) + if b == nil { + return fmt.Errorf("bucket is nil") + } + + var err error + tree, err = merkletree.Unmarshal(b.Get(tableID.Bytes()), nil) + if err != nil { + return fmt.Errorf("unmarshalling tree: %s", err) + } + return nil + }); err != nil { + return nil, fmt.Errorf("db view: %s", err) + } + + return tree, nil +} + +// Close closes the store. +func (s *MerkleTreeStore) Close() error { + return s.db.Close() +} diff --git a/pkg/merkletree/publisher/publisher.go b/pkg/merkletree/publisher/publisher.go new file mode 100644 index 00000000..1c208d7f --- /dev/null +++ b/pkg/merkletree/publisher/publisher.go @@ -0,0 +1,179 @@ +package publisher + +import ( + "context" + "errors" + "fmt" + "hash/fnv" + "math/big" + "sync" + "time" + + logger "github.com/rs/zerolog/log" + "github.com/textileio/go-tableland/pkg/merkletree" +) + +// TreeLeaves represents a leaves' snapshot of a table at a particular block. +type TreeLeaves struct { + Leaves []byte + ChainID int64 + TableID *big.Int + BlockNumber int64 + TablePrefix string +} + +// ChainIDBlockNumberPair is a pair of ChainID and BlockNumber. +type ChainIDBlockNumberPair struct { + ChainID int64 + BlockNumber int64 +} + +// LeavesStore defines the API for fetching leaves from trees that need to be built. +type LeavesStore interface { + FetchLeavesByChainIDAndBlockNumber(context.Context, int64, int64) ([]TreeLeaves, error) + FetchChainIDAndBlockNumber(context.Context) ([]ChainIDBlockNumberPair, error) + DeleteProcessing(context.Context, int64, int64) error +} + +// MerkleTreeStore defines the API for storing the merkle tree. +type MerkleTreeStore interface { + Store(chainID int64, tableID *big.Int, blockNumber int64, tree *merkletree.MerkleTree) error +} + +// MerkleRootRegistry defines the API for publishing root. +type MerkleRootRegistry interface { + // Publish publishes the roots of multiple tables at a particular block. + Publish(ctx context.Context, chainID int64, blockNumber int64, tables []*big.Int, roots [][]byte) error +} + +// MerkleRootPublisher is responsible for building Merkle Tree and publishing the root. +type MerkleRootPublisher struct { + leavesStore LeavesStore // where leaves are stored + treeStore MerkleTreeStore // where trees are stored + registry MerkleRootRegistry // where root will be published + + // wallet *wallet.Wallet + interval time.Duration + + quitOnce sync.Once + quit chan struct{} +} + +// NewMerkleRootPublisher creates a new publisher. +func NewMerkleRootPublisher( + leavesStore LeavesStore, + treeStore MerkleTreeStore, + registry MerkleRootRegistry, + interval time.Duration, +) *MerkleRootPublisher { + return &MerkleRootPublisher{ + leavesStore: leavesStore, + treeStore: treeStore, + registry: registry, + + // wallet: wallet, + interval: interval, + quit: make(chan struct{}), + } +} + +var log = logger.With(). + Str("component", "merkletreepublisher"). + Logger() + +// Start starts the publisher. +func (p *MerkleRootPublisher) Start() { + ctx := context.Background() + + ticker := time.NewTicker(p.interval) + go func() { + for { + select { + case <-ticker.C: + if err := p.publish(ctx); err != nil { + log.Err(err).Msg("failed to publish merkle root") + } + case <-p.quit: + log.Info().Msg("quiting merkle root publisher") + ticker.Stop() + return + } + } + }() +} + +// Close closes the published goroutine. +func (p *MerkleRootPublisher) Close() { + p.quitOnce.Do(func() { + p.quit <- struct{}{} + close(p.quit) + }) +} + +func (p *MerkleRootPublisher) publish(ctx context.Context) error { + chainIDBlockNumberPairs, err := p.leavesStore.FetchChainIDAndBlockNumber(ctx) + if err != nil { + return fmt.Errorf("fetching block number and chain id pairs: %s", err) + } + + // we do `n` publish calls, where n is the number of chains + for _, pair := range chainIDBlockNumberPairs { + tableLeaves, err := p.leavesStore.FetchLeavesByChainIDAndBlockNumber(ctx, pair.ChainID, pair.BlockNumber) + if err != nil { + return fmt.Errorf("fetch unpublished metrics: %s", err) + } + + if len(tableLeaves) == 0 { + return nil + } + + tableIDs, roots := make([]*big.Int, len(tableLeaves)), make([][]byte, len(tableLeaves)) + for i, table := range tableLeaves { + if table.ChainID != pair.ChainID { + return fmt.Errorf("chain id mismatch (%d, %d)", table.ChainID, pair.ChainID) + } + + if table.BlockNumber != pair.BlockNumber { + return fmt.Errorf("block number mismatch (%d, %d)", table.BlockNumber, pair.BlockNumber) + } + + // gotta use the block size of the hash used to encode the leaves + chunks, err := chunker(table.Leaves, fnv.New128a().Size()) + if err != nil { + return fmt.Errorf("breaking leaves into chunks: %s", err) + } + + tree, err := merkletree.NewTree(chunks, nil) + if err != nil { + return fmt.Errorf("building a tree: %s", err) + } + + tableIDs[i], roots[i] = table.TableID, tree.MerkleRoot() + if err := p.treeStore.Store(pair.ChainID, table.TableID, pair.BlockNumber, tree); err != nil { + return fmt.Errorf("storing the tree chain: %d, table: %d): %s", pair.ChainID, table.TableID.Int64(), err) + } + } + + if err := p.registry.Publish(ctx, pair.ChainID, pair.BlockNumber, tableIDs, roots); err != nil { + return fmt.Errorf("publishing root: %s", err) + } + + if err := p.leavesStore.DeleteProcessing(ctx, pair.ChainID, pair.BlockNumber); err != nil { + return fmt.Errorf("delete processing: %s", err) + } + } + + return nil +} + +func chunker(data []byte, size int) ([][]byte, error) { + if len(data)%size != 0 { + return [][]byte{}, errors.New("data length should be multiple of size") + } + chunks := make([][]byte, len(data)/size) + for i := 0; i < len(data); i += size { + chunks[i/size] = data[i : i+size] + } + + return chunks, nil +} diff --git a/pkg/merkletree/tree.go b/pkg/merkletree/tree.go new file mode 100644 index 00000000..0b6a23a8 --- /dev/null +++ b/pkg/merkletree/tree.go @@ -0,0 +1,323 @@ +package merkletree + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "hash" + "sort" + + "golang.org/x/crypto/sha3" +) + +// DefaultHasher is the default hash function in case none is passed. +var DefaultHasher = sha3.NewLegacyKeccak256 + +// EncodingSchemaNLBytes indicates the number of bytes of the number of leaves in the encoding schema. +const EncodingSchemaNLBytes = 4 + +// MerkleTree is a binary Merkle Tree implemenation. +type MerkleTree struct { + root *Node + leaves []*Node + + h hash.Hash +} + +// Node represents a Node of MerkleTree. +type Node struct { + parent, left, right *Node + hash []byte +} + +// Proof represents a proof. +type Proof [][]byte + +// Hex is a hex encoded representation of a proof. +func (p Proof) Hex() []string { + pieces := make([]string, len(p)) + for i, part := range p { + pieces[i] = hex.EncodeToString(part) + } + + return pieces +} + +func (n *Node) isLeaf() bool { + return n.left == nil && n.right == nil +} + +// NewTree builds a new Merkle Tree. +func NewTree(leaves [][]byte, hasher func() hash.Hash) (*MerkleTree, error) { + if hasher == nil { + hasher = DefaultHasher + } + + tree := &MerkleTree{ + h: hasher(), + } + + if len(leaves) == 0 { + return nil, errors.New("no leaves") + } + + if err := tree.buildTree(leaves); err != nil { + return nil, fmt.Errorf("building the tree: %s", err) + } + return tree, nil +} + +func (t *MerkleTree) buildTree(leaves [][]byte) error { + t.leaves = make([]*Node, len(leaves)) + for i, leaf := range leaves { + if len(leaf) == 0 { + return errors.New("leaf cannot be empty") + } + + t.leaves[i] = &Node{ + hash: t.hashFunc(leaf), + } + } + + // leaves are sortable + sort.Slice(t.leaves, func(i, j int) bool { + return bytes.Compare(t.leaves[i].hash, t.leaves[j].hash) == -1 + }) + + // We add an extra empty node at the end, in case the number of leaves is odd. + if len(t.leaves)%2 == 1 { + t.leaves = append(t.leaves, &Node{ + hash: t.leaves[len(t.leaves)-1].hash, + }) + } + + t.buildInternalNodes(t.leaves) + + return nil +} + +func (t *MerkleTree) buildInternalNodes(nodes []*Node) { + // we are at the root + if len(nodes) == 1 { + t.root = nodes[0] + return + } + + // the number of parents is half of the number of children + parentNodes := make([]*Node, (len(nodes)+1)/2) + for i := 0; i < len(nodes); i += 2 { + // we loop in pairs, if the length of nodes is odd, left and right points to the same node + left, right := i, i+1 + if i+1 == len(nodes) { + right = i + } + + // hash pair needs to be sorted + l, r := sortPair(nodes[left].hash, nodes[right].hash) + + parent := &Node{ + hash: t.hashFunc(l, r), + left: nodes[left], + right: nodes[right], + } + nodes[left].parent, nodes[right].parent = parent, parent + parentNodes[i/2] = parent + } + + t.buildInternalNodes(parentNodes) +} + +// verifyTree calculates the merkle root again by traversing the tree and verify if it's the same it holds. +func (t *MerkleTree) verifyTree() bool { + merkleRoot := t.verify(t.root) + return bytes.Equal(t.root.hash, merkleRoot) +} + +func (t *MerkleTree) verify(node *Node) []byte { + if node.isLeaf() { + return node.hash + } + + if bytes.Compare(node.left.hash, node.right.hash) > 0 { + return t.hashFunc(t.verify(node.right), t.verify(node.left)) + } + + return t.hashFunc(t.verify(node.left), t.verify(node.right)) +} + +// GetProof gets the proof for a particular content. +func (t *MerkleTree) GetProof(leaf []byte) (bool, Proof) { + index, found := sort.Find(len(t.leaves), func(i int) int { + return bytes.Compare(leaf, t.leaves[i].hash) + }) + if !found { + return false, nil + } + + l := t.leaves[index] + var proof [][]byte + parent := l.parent + for parent != nil { + if bytes.Equal(parent.left.hash, l.hash) { + proof = append(proof, parent.right.hash) + } else { + proof = append(proof, parent.left.hash) + } + l, parent = parent, parent.parent + } + return true, proof +} + +// MerkleRoot returns the merkle root of the tree. +func (t *MerkleTree) MerkleRoot() []byte { + if t.root == nil { + return nil + } + return t.root.hash +} + +// VerifyProof verifies a given proof for a leaf. +func VerifyProof(proof Proof, root []byte, leaf []byte, hasher func() hash.Hash) bool { + if hasher == nil { + hasher = DefaultHasher + } + h := hasher() + + computedHash := leaf + for i := 0; i < len(proof); i++ { + left, right := sortPair(computedHash, proof[i]) + _, _ = h.Write(left) + _, _ = h.Write(right) + computedHash = h.Sum(nil) + h.Reset() + } + return bytes.Equal(root, computedHash) +} + +// Marshal serializes the tree. +// +// The encoding schema is such that the first 4 bytes we store the number of leaves (NL), +// and then the nodes' hashes are put one next to the order starting from the root +// on a level order traversal. +// +// 0 4 <----------------------- HashSize ------------------------->| +// |-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-| +// | NL | root | ... +// |-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|-|. +func (t *MerkleTree) Marshal() []byte { + // we start by filling the first 4 bytes with the leaves length + buf := new(bytes.Buffer) + _ = binary.Write(buf, binary.LittleEndian, uint32(len(t.leaves))) + + var node *Node + queue := []*Node{t.root} + for len(queue) > 0 { + node, queue = queue[0], queue[1:] + if node.left != nil { + queue = append(queue, node.left) + } + // the second condition is for the case the node's children point to the same node + // we don't want to duplicate the node + if node.right != nil && node.right != node.left { + queue = append(queue, node.right) + } + + buf.Write(node.hash) + } + + return buf.Bytes() +} + +// Unmarshal deserializes the tree. +func Unmarshal(data []byte, hasher func() hash.Hash) (*MerkleTree, error) { + if hasher == nil { + hasher = DefaultHasher + } + h := hasher() + hSize := h.Size() + + numberOfLeavesInBytes, data := data[0:EncodingSchemaNLBytes], data[EncodingSchemaNLBytes:] + numberOfLeaves := int(binary.LittleEndian.Uint32(numberOfLeavesInBytes)) + + if len(data)%hSize != 0 { + return nil, errors.New("leaves data is not multiple of hash size") + } + + // we start with an pointer p pointing to the end + p := len(data) + + // build the leaves + leaves := make([]*Node, numberOfLeaves) + for i := 0; i < numberOfLeaves; i++ { + leaves[numberOfLeaves-i-1] = &Node{ + hash: data[p-hSize*(1+i) : p-hSize*i], + } + } + + // adjust pointer position + p = p - hSize*len(leaves) + + // build next levels + previousLevelNodes := leaves + var root *Node + for { + currentLevelNodes := make([]*Node, len(previousLevelNodes)/2) + l, r := len(previousLevelNodes)-2, len(previousLevelNodes)-1 + + // adjust according to length of previous level + if len(previousLevelNodes)%2 != 0 { + currentLevelNodes = append(currentLevelNodes, nil) + l = r + } + + for i := 0; i < len(currentLevelNodes); i++ { + n := &Node{ + left: previousLevelNodes[l], + right: previousLevelNodes[r], + hash: data[p-hSize*(1+i) : p-hSize*i], + } + currentLevelNodes[len(currentLevelNodes)-i-1] = n + previousLevelNodes[l].parent, previousLevelNodes[r].parent = n, n + + // adjust according to the length of previous level only on first iteration + if i == 0 && len(previousLevelNodes)%2 != 0 { + l, r = l-2, r-1 + } else { + l, r = l-2, r-2 + } + } + + // we are at the root + if len(currentLevelNodes) == 1 { + root = currentLevelNodes[0] + break + } + + p = p - hSize*len(currentLevelNodes) + previousLevelNodes = currentLevelNodes + } + + return &MerkleTree{ + root: root, + leaves: leaves, + h: h, + }, nil +} + +func (t *MerkleTree) hashFunc(data ...[]byte) []byte { + t.h.Reset() + for _, part := range data { + _, _ = t.h.Write(part) + } + return t.h.Sum(nil) +} + +func sortPair(a []byte, b []byte) ([]byte, []byte) { + if bytes.Compare(a, b) > 0 { + return b, a + } + + return a, b +} diff --git a/pkg/merkletree/tree_test.go b/pkg/merkletree/tree_test.go new file mode 100644 index 00000000..14a951bd --- /dev/null +++ b/pkg/merkletree/tree_test.go @@ -0,0 +1,457 @@ +package merkletree + +import ( + "bytes" + "crypto/sha1" + "encoding/binary" + "fmt" + "hash" + "math/rand" + "sort" + "strconv" + "strings" + "testing" + "testing/quick" + + "github.com/stretchr/testify/require" + "golang.org/x/crypto/sha3" +) + +func TestNewTree(t *testing.T) { + t.Parallel() + testCases := []struct { + name string + leaves [][]byte + merkleRoot []byte + serialized []byte + }{ + { + "one node", + [][]byte{[]byte("001")}, + []byte("002"), + []byte("002001001"), + }, + { + "two nodes", + [][]byte{[]byte("001"), []byte("002")}, + []byte("003"), + []byte("003001002"), + }, + { + "three nodes", + // 003 is duplicated at the end + [][]byte{[]byte("001"), []byte("002"), []byte("003")}, + []byte("009"), + []byte("009003006001002003003"), + }, + { + "four nodes", + [][]byte{[]byte("001"), []byte("002"), []byte("003"), []byte("004")}, + []byte("010"), + []byte("010003007001002003004"), + }, + { + "five nodes", + // 005 is duplicated but does not have a power of 2 number of leaves + [][]byte{[]byte("001"), []byte("002"), []byte("003"), []byte("004"), []byte("005")}, + []byte("030"), + []byte("030010020003007010001002003004005005"), + }, + { + "eight nodes", + [][]byte{ + []byte("001"), []byte("002"), []byte("003"), []byte("004"), + []byte("005"), []byte("006"), []byte("007"), []byte("008"), + }, + []byte("036"), + []byte("036010026003007011015001002003004005006007008"), + }, + } + + for _, test := range testCases { + t.Run(test.name, func(t *testing.T) { + // shuffle leaves just to make sure the order of leaves does not affect the merkle root value + rand.Shuffle(len(test.leaves), func(i, j int) { + test.leaves[i], test.leaves[j] = test.leaves[j], test.leaves[i] + }) + + tree, err := NewTree(test.leaves, mockHash) + require.NoError(t, err) + require.Equal(t, test.merkleRoot, tree.MerkleRoot()) + + s := tree.Marshal() + require.Equal(t, test.serialized, s[EncodingSchemaNLBytes:]) + require.Equal(t, len(tree.leaves), int(binary.LittleEndian.Uint32(s[:EncodingSchemaNLBytes]))) + require.Len(t, s, EncodingSchemaNLBytes+expectedNumberOfNodes(len(tree.leaves))*mockHash().Size()) + require.True(t, tree.verifyTree()) + + // check that we get a tree equal to the original + tree2, err := Unmarshal(s, mockHash) + require.NoError(t, err) + require.True(t, tree2.verifyTree()) + require.Equal(t, s, tree2.Marshal()) + require.Equal(t, tree, tree2) + }) + } + + t.Run("no leaves", func(t *testing.T) { + t.Parallel() + var err error + + _, err = NewTree([][]byte{}, nil) + require.Error(t, err) + + _, err = NewTree(nil, nil) + require.Error(t, err) + }) +} + +func TestGetProof(t *testing.T) { + t.Parallel() + t.Run("five nodes", func(t *testing.T) { + t.Parallel() + leaves := [][]byte{ + []byte("001"), + []byte("002"), + []byte("003"), + []byte("004"), + []byte("005"), + } + + tree, err := NewTree(leaves, mockHash) + require.NoError(t, err) + + found, proof := tree.GetProof([]byte("005")) + require.True(t, found) + require.Equal(t, Proof([][]byte{[]byte("005"), []byte("010"), []byte("010")}), proof) + }) + + t.Run("eight nodes", func(t *testing.T) { + t.Parallel() + leaves := [][]byte{ + []byte("001"), + []byte("002"), + []byte("003"), + []byte("004"), + []byte("005"), + []byte("006"), + []byte("007"), + []byte("008"), + } + + tree, err := NewTree(leaves, mockHash) + require.NoError(t, err) + + found, proof := tree.GetProof([]byte("001")) + require.True(t, found) + require.Equal(t, Proof([][]byte{[]byte("002"), []byte("007"), []byte("026")}), proof) + + found, proof = tree.GetProof([]byte("002")) + require.True(t, found) + require.Equal(t, Proof([][]byte{[]byte("001"), []byte("007"), []byte("026")}), proof) + + found, proof = tree.GetProof([]byte("003")) + require.True(t, found) + require.Equal(t, Proof([][]byte{[]byte("004"), []byte("003"), []byte("026")}), proof) + + found, proof = tree.GetProof([]byte("004")) + require.True(t, found) + require.Equal(t, Proof([][]byte{[]byte("003"), []byte("003"), []byte("026")}), proof) + + found, proof = tree.GetProof([]byte("005")) + require.True(t, found) + require.Equal(t, Proof([][]byte{[]byte("006"), []byte("015"), []byte("010")}), proof) + + found, proof = tree.GetProof([]byte("006")) + require.True(t, found) + require.Equal(t, Proof([][]byte{[]byte("005"), []byte("015"), []byte("010")}), proof) + + found, proof = tree.GetProof([]byte("007")) + require.True(t, found) + require.Equal(t, Proof([][]byte{[]byte("008"), []byte("011"), []byte("010")}), proof) + + found, proof = tree.GetProof([]byte("008")) + require.True(t, found) + require.Equal(t, Proof([][]byte{[]byte("007"), []byte("011"), []byte("010")}), proof) + }) + + t.Run("not found", func(t *testing.T) { + t.Parallel() + leaves := [][]byte{ + []byte("001"), + []byte("002"), + []byte("003"), + []byte("004"), + []byte("005"), + } + + tree, err := NewTree(leaves, mockHash) + require.NoError(t, err) + + found, proof := tree.GetProof([]byte("006")) + require.False(t, found) + require.Nil(t, proof) + require.Len(t, proof, 0) + }) +} + +func TestVerifyProof(t *testing.T) { + t.Parallel() + t.Run("correct proof", func(t *testing.T) { + t.Parallel() + root := []byte("036") + proof := Proof([][]byte{[]byte("002"), []byte("007"), []byte("026")}) + leaf := []byte("001") + require.True(t, VerifyProof(proof, root, leaf, mockHash)) + }) + + t.Run("wrong root", func(t *testing.T) { + t.Parallel() + root := []byte("035") + proof := Proof([][]byte{[]byte("002"), []byte("007"), []byte("026")}) + leaf := []byte("001") + require.False(t, VerifyProof(proof, root, leaf, mockHash)) + }) + + t.Run("wrong proof", func(t *testing.T) { + t.Parallel() + root := []byte("036") + proof := Proof([][]byte{[]byte("001"), []byte("007"), []byte("026")}) + leaf := []byte("001") + require.False(t, VerifyProof(proof, root, leaf, mockHash)) + }) +} + +func TestProperties(t *testing.T) { + t.Parallel() + + // We test the properties in a bunch of hash functions to make sure the + // kind of hash function has no influence on properties. + hashers := []func() hash.Hash{ + nil, + func() hash.Hash { return &mockHasher{} }, + sha3.NewLegacyKeccak256, + sha3.NewLegacyKeccak512, + sha3.New224, + sha3.New256, + sha3.New384, + sha3.New512, + sha1.New, + } + + t.Run("tree holds merkle tree property", func(t *testing.T) { + t.Parallel() + + for _, hasher := range hashers { + property := func(leaves [][]byte) bool { + if len(leaves) == 0 { + return true + } + + tree, err := NewTree(leaves, hasher) + if err != nil { + // ignore check when leaf is empty + return strings.Contains(err.Error(), "leaf cannot be empty") + } + + return tree.verifyTree() + } + require.NoError(t, quick.Check(property, nil)) + } + }) + + t.Run("leaves are always sorted", func(t *testing.T) { + t.Parallel() + + for _, hasher := range hashers { + property := func(leaves [][]byte) bool { + if len(leaves) == 0 { + return true + } + + tree, err := NewTree(leaves, hasher) + if err != nil { + // ignore check when leaf is empty + return strings.Contains(err.Error(), "leaf cannot be empty") + } + + return sort.SliceIsSorted(tree.leaves, func(i, j int) bool { + return bytes.Compare(tree.leaves[i].hash, tree.leaves[j].hash) == -1 + }) + } + require.NoError(t, quick.Check(property, nil)) + } + }) + + t.Run("height of the tree is correct", func(t *testing.T) { + t.Parallel() + for _, hasher := range hashers { + property := func(leaves [][]byte) bool { + if len(leaves) == 0 { + return true + } + + tree, err := NewTree(leaves, hasher) + if err != nil { + // ignore check when leaf is empty + return strings.Contains(err.Error(), "leaf cannot be empty") + } + + return heightOfTree(tree.root) == expectedHeight(len(leaves)) + } + require.NoError(t, quick.Check(property, nil)) + } + }) + + t.Run("if number of leaves is odd, then the last leaf is duplicated", func(t *testing.T) { + t.Parallel() + for _, hasher := range hashers { + property := func(leaves [][]byte) bool { + if len(leaves)%2 == 0 { + return true + } + + tree, err := NewTree(leaves, hasher) + if err != nil { + // ignore check when leaf is empty + return strings.Contains(err.Error(), "leaf cannot be empty") + } + + return len(tree.leaves) == len(leaves)+1 && + bytes.Equal(tree.leaves[len(tree.leaves)-1].hash, tree.leaves[len(tree.leaves)-2].hash) + } + require.NoError(t, quick.Check(property, nil)) + } + }) + + t.Run("every leaf proof is correctly verifiable", func(t *testing.T) { + t.Parallel() + for _, hasher := range hashers { + property := func(leaves [][]byte) bool { + if len(leaves) == 0 { + return true + } + + tree, err := NewTree(leaves, hasher) + if err != nil { + // ignore check when leaf is empty + return strings.Contains(err.Error(), "leaf cannot be empty") + } + + for _, leaf := range tree.leaves { + _, proof := tree.GetProof(leaf.hash) + root := tree.MerkleRoot() + if !VerifyProof(proof, root, leaf.hash, hasher) { + return false + } + } + + return true + } + require.NoError(t, quick.Check(property, nil)) + } + }) + + t.Run("serializing then deserializing does not change the tree", func(t *testing.T) { + t.Parallel() + for _, hasher := range hashers { + property := func(leaves [][]byte) bool { + if len(leaves) == 0 { + return true + } + + tree, err := NewTree(leaves, hasher) + if err != nil { + // ignore check when leaf is empty + return strings.Contains(err.Error(), "leaf cannot be empty") + } + require.True(t, tree.verifyTree()) + + s := tree.Marshal() + tree2, err := Unmarshal(s, hasher) + require.NoError(t, err) + + require.True(t, tree2.verifyTree()) + require.Equal(t, s, tree2.Marshal()) + require.Equal(t, tree.root, tree2.root) + + return true + } + require.NoError(t, quick.Check(property, nil)) + } + }) +} + +func mockHash() hash.Hash { + return &mockHasher{} +} + +type mockHasher struct { + sum int64 +} + +func (h *mockHasher) Write(p []byte) (n int, err error) { + number, _ := strconv.ParseInt(string(p), 10, 0) + h.sum += number + return len(p), nil +} + +func (h *mockHasher) Sum(_ []byte) []byte { + hash := fmt.Sprintf("%03d", h.sum) + return []byte(hash) +} + +func (h *mockHasher) Reset() { + h.sum = 0 +} + +func (h *mockHasher) Size() int { + return 3 +} + +func (h *mockHasher) BlockSize() int { + return 0 +} + +// calculates the height of the tree. +func heightOfTree(node *Node) int { + height := 1 + for !node.isLeaf() { + node = node.left + height++ + } + + return height +} + +// calculates the expected height of a tree with n leaves. +func expectedHeight(n int) int { + if n%2 == 1 { + n++ + } + + h := 1 + for n > 1 { + if n%2 == 1 { + n++ + } + h++ + n = n / 2 + } + + return h +} + +// calculates the expected number of the nodes of a tree with n leaves. +func expectedNumberOfNodes(n int) int { + nodes := n + for n > 1 { + if n%2 == 1 { + n++ + } + n = n / 2 + nodes = nodes + n + } + + return nodes +} diff --git a/pkg/nonce/impl/tracker_test.go b/pkg/nonce/impl/tracker_test.go index 5ed68b78..03783997 100644 --- a/pkg/nonce/impl/tracker_test.go +++ b/pkg/nonce/impl/tracker_test.go @@ -2,7 +2,6 @@ package impl import ( "context" - "crypto/ecdsa" "encoding/hex" "errors" "fmt" @@ -23,7 +22,6 @@ import ( "github.com/textileio/go-tableland/pkg/sqlstore" "github.com/textileio/go-tableland/pkg/sqlstore/impl/system" "github.com/textileio/go-tableland/pkg/tables/impl/ethereum" - "github.com/textileio/go-tableland/pkg/tables/impl/testutil" "github.com/textileio/go-tableland/pkg/wallet" "github.com/textileio/go-tableland/tests" ) @@ -481,16 +479,16 @@ func setup(ctx context.Context, t *testing.T) ( ) { url := tests.Sqlite3URI(t) - backend, _, contract, txOptsFrom, sk := testutil.Setup(t) - - key, err := crypto.GenerateKey() + // Spin up the EVM chain with the contract. + simulatedChain := tests.NewSimulatedChain(t) + contract, err := simulatedChain.DeployContract(t, ethereum.Deploy) require.NoError(t, err) + key := simulatedChain.CreateAccountWithBalance(t) + txOptsTo, err := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) // nolint require.NoError(t, err) - requireTxn(t, backend, sk, txOptsFrom.From, txOptsTo.From, big.NewInt(1000000000000000000)) - wallet, err := wallet.NewWallet(hex.EncodeToString(crypto.FromECDSA(key))) require.NoError(t, err) @@ -501,54 +499,12 @@ func setup(ctx context.Context, t *testing.T) ( ctx, wallet, &NonceStore{sqlstore}, - 1337, - backend, + tableland.ChainID(simulatedChain.ChainID), + simulatedChain.Backend, 500*time.Millisecond, 0, 10*time.Minute) require.NoError(t, err) - return tracker, backend, contract, txOptsTo, wallet, sqlstore -} - -func requireTxn( - t *testing.T, - backend *backends.SimulatedBackend, - key *ecdsa.PrivateKey, - from common.Address, - to common.Address, - amt *big.Int, -) { - nonce, err := backend.PendingNonceAt(context.Background(), from) - require.NoError(t, err) - - gasLimit := uint64(21000) - gasPrice, err := backend.SuggestGasPrice(context.Background()) - require.NoError(t, err) - - var data []byte - txnData := &types.LegacyTx{ - Nonce: nonce, - GasPrice: gasPrice, - Gas: gasLimit, - To: &to, - Data: data, - Value: amt, - } - tx := types.NewTx(txnData) - signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, key) - require.NoError(t, err) - - bal, err := backend.BalanceAt(context.Background(), from, nil) - require.NoError(t, err) - require.NotZero(t, bal) - - err = backend.SendTransaction(context.Background(), signedTx) - require.NoError(t, err) - - backend.Commit() - - receipt, err := backend.TransactionReceipt(context.Background(), signedTx.Hash()) - require.NoError(t, err) - require.NotNil(t, receipt) + return tracker, simulatedChain.Backend, contract.Contract.(*ethereum.Contract), txOptsTo, wallet, sqlstore } diff --git a/pkg/sqlstore/impl/sqlite_db.go b/pkg/sqlstore/impl/sqlite_db.go new file mode 100644 index 00000000..bcd13512 --- /dev/null +++ b/pkg/sqlstore/impl/sqlite_db.go @@ -0,0 +1,96 @@ +package impl + +// import ( +// "database/sql" +// "fmt" + +// "github.com/XSAM/otelsql" +// "github.com/golang-migrate/migrate/v4" +// _ "github.com/golang-migrate/migrate/v4/database/sqlite3" // migration for sqlite3 +// bindata "github.com/golang-migrate/migrate/v4/source/go_bindata" +// "github.com/rs/zerolog" +// logger "github.com/rs/zerolog/log" +// "github.com/textileio/go-tableland/pkg/metrics" +// "github.com/textileio/go-tableland/pkg/sqlstore/impl/system/db" +// "github.com/textileio/go-tableland/pkg/sqlstore/impl/system/migrations" +// "go.opentelemetry.io/otel/attribute" +// ) + +// // SQLiteDB represents a connection to a SQLite database. +// type SQLiteDB struct { +// Log zerolog.Logger +// DB *sql.DB +// Queries *db.Queries +// } + +// // NewSQLiteDB returns a new SQLiteDB backed by database/sql. +// func NewSQLiteDB(dbURI string) (*SQLiteDB, error) { +// attrs := append([]attribute.KeyValue{ +// attribute.String("name", "sqlitedb"), +// }, +// metrics.BaseAttrs...) +// dbc, err := otelsql.Open("sqlite3", dbURI, otelsql.WithAttributes(attrs...)) +// if err != nil { +// return nil, fmt.Errorf("connecting to db: %s", err) +// } +// if err := otelsql.RegisterDBStatsMetrics(dbc, otelsql.WithAttributes( +// attribute.String("name", "sqlitedb"), +// )); err != nil { +// return nil, fmt.Errorf("registering dbstats: %s", err) +// } + +// log := logger.With(). +// Str("component", "sqlitedb"). +// Logger() + +// systemStore := &SQLiteDB{ +// Log: log, +// DB: dbc, +// Queries: db.New(dbc), +// } + +// as := bindata.Resource(migrations.AssetNames(), migrations.Asset) +// if err := systemStore.executeMigration(dbURI, as); err != nil { +// return nil, fmt.Errorf("initializing db connection: %s", err) +// } + +// return systemStore, nil +// } + +// // executeMigration run db migrations and return a ready to use connection to the SQLite database. +// func (db *SQLiteDB) executeMigration(dbURI string, as *bindata.AssetSource) error { +// d, err := bindata.WithInstance(as) +// if err != nil { +// return fmt.Errorf("creating source driver: %s", err) +// } + +// m, err := migrate.NewWithSourceInstance("go-bindata", d, "sqlite3://"+dbURI) +// if err != nil { +// return fmt.Errorf("creating migration: %s", err) +// } +// defer func() { +// if _, err := m.Close(); err != nil { +// db.Log.Error().Err(err).Msg("closing db migration") +// } +// }() +// version, dirty, err := m.Version() +// db.Log.Info(). +// Uint("dbVersion", version). +// Bool("dirty", dirty). +// Err(err). +// Msg("database migration executed") + +// if err := m.Up(); err != nil && err != migrate.ErrNoChange { +// return fmt.Errorf("running migration up: %s", err) +// } + +// return nil +// } + +// // Close closes the database. +// func (db *SQLiteDB) Close() error { +// if err := db.DB.Close(); err != nil { +// return fmt.Errorf("closing db: %s", err) +// } +// return nil +// } diff --git a/pkg/sqlstore/impl/system/internal/db/acl.sql.go b/pkg/sqlstore/impl/system/db/acl.sql.go similarity index 100% rename from pkg/sqlstore/impl/system/internal/db/acl.sql.go rename to pkg/sqlstore/impl/system/db/acl.sql.go diff --git a/pkg/sqlstore/impl/system/internal/db/db.go b/pkg/sqlstore/impl/system/db/db.go similarity index 83% rename from pkg/sqlstore/impl/system/internal/db/db.go rename to pkg/sqlstore/impl/system/db/db.go index 329f6140..af993011 100644 --- a/pkg/sqlstore/impl/system/internal/db/db.go +++ b/pkg/sqlstore/impl/system/db/db.go @@ -30,6 +30,15 @@ func Prepare(ctx context.Context, db DBTX) (*Queries, error) { if q.deletePendingTxByHashStmt, err = db.PrepareContext(ctx, deletePendingTxByHash); err != nil { return nil, fmt.Errorf("error preparing query DeletePendingTxByHash: %w", err) } + if q.deleteProcessingStmt, err = db.PrepareContext(ctx, deleteProcessing); err != nil { + return nil, fmt.Errorf("error preparing query DeleteProcessing: %w", err) + } + if q.fetchChainIDAndBlockNumberStmt, err = db.PrepareContext(ctx, fetchChainIDAndBlockNumber); err != nil { + return nil, fmt.Errorf("error preparing query FetchChainIDAndBlockNumber: %w", err) + } + if q.fetchLeavesByChainIDAndBlockNumberStmt, err = db.PrepareContext(ctx, fetchLeavesByChainIDAndBlockNumber); err != nil { + return nil, fmt.Errorf("error preparing query FetchLeavesByChainIDAndBlockNumber: %w", err) + } if q.getAclByTableAndControllerStmt, err = db.PrepareContext(ctx, getAclByTableAndController); err != nil { return nil, fmt.Errorf("error preparing query GetAclByTableAndController: %w", err) } @@ -96,6 +105,21 @@ func (q *Queries) Close() error { err = fmt.Errorf("error closing deletePendingTxByHashStmt: %w", cerr) } } + if q.deleteProcessingStmt != nil { + if cerr := q.deleteProcessingStmt.Close(); cerr != nil { + err = fmt.Errorf("error closing deleteProcessingStmt: %w", cerr) + } + } + if q.fetchChainIDAndBlockNumberStmt != nil { + if cerr := q.fetchChainIDAndBlockNumberStmt.Close(); cerr != nil { + err = fmt.Errorf("error closing fetchChainIDAndBlockNumberStmt: %w", cerr) + } + } + if q.fetchLeavesByChainIDAndBlockNumberStmt != nil { + if cerr := q.fetchLeavesByChainIDAndBlockNumberStmt.Close(); cerr != nil { + err = fmt.Errorf("error closing fetchLeavesByChainIDAndBlockNumberStmt: %w", cerr) + } + } if q.getAclByTableAndControllerStmt != nil { if cerr := q.getAclByTableAndControllerStmt.Close(); cerr != nil { err = fmt.Errorf("error closing getAclByTableAndControllerStmt: %w", cerr) @@ -222,6 +246,9 @@ type Queries struct { tx *sql.Tx areEVMEventsPersistedStmt *sql.Stmt deletePendingTxByHashStmt *sql.Stmt + deleteProcessingStmt *sql.Stmt + fetchChainIDAndBlockNumberStmt *sql.Stmt + fetchLeavesByChainIDAndBlockNumberStmt *sql.Stmt getAclByTableAndControllerStmt *sql.Stmt getBlockExtraInfoStmt *sql.Stmt getBlocksMissingExtraInfoStmt *sql.Stmt @@ -243,13 +270,16 @@ type Queries struct { func (q *Queries) WithTx(tx *sql.Tx) *Queries { return &Queries{ - db: tx, - tx: tx, - areEVMEventsPersistedStmt: q.areEVMEventsPersistedStmt, - deletePendingTxByHashStmt: q.deletePendingTxByHashStmt, - getAclByTableAndControllerStmt: q.getAclByTableAndControllerStmt, - getBlockExtraInfoStmt: q.getBlockExtraInfoStmt, - getBlocksMissingExtraInfoStmt: q.getBlocksMissingExtraInfoStmt, + db: tx, + tx: tx, + areEVMEventsPersistedStmt: q.areEVMEventsPersistedStmt, + deletePendingTxByHashStmt: q.deletePendingTxByHashStmt, + deleteProcessingStmt: q.deleteProcessingStmt, + fetchChainIDAndBlockNumberStmt: q.fetchChainIDAndBlockNumberStmt, + fetchLeavesByChainIDAndBlockNumberStmt: q.fetchLeavesByChainIDAndBlockNumberStmt, + getAclByTableAndControllerStmt: q.getAclByTableAndControllerStmt, + getBlockExtraInfoStmt: q.getBlockExtraInfoStmt, + getBlocksMissingExtraInfoStmt: q.getBlocksMissingExtraInfoStmt, getBlocksMissingExtraInfoByBlockNumberStmt: q.getBlocksMissingExtraInfoByBlockNumberStmt, getEVMEventsStmt: q.getEVMEventsStmt, getIdStmt: q.getIdStmt, diff --git a/pkg/sqlstore/impl/system/internal/db/evm_events.sql.go b/pkg/sqlstore/impl/system/db/evm_events.sql.go similarity index 100% rename from pkg/sqlstore/impl/system/internal/db/evm_events.sql.go rename to pkg/sqlstore/impl/system/db/evm_events.sql.go diff --git a/pkg/sqlstore/impl/system/internal/db/id.sql.go b/pkg/sqlstore/impl/system/db/id.sql.go similarity index 100% rename from pkg/sqlstore/impl/system/internal/db/id.sql.go rename to pkg/sqlstore/impl/system/db/id.sql.go diff --git a/pkg/sqlstore/impl/system/internal/db/models.go b/pkg/sqlstore/impl/system/db/models.go similarity index 90% rename from pkg/sqlstore/impl/system/internal/db/models.go rename to pkg/sqlstore/impl/system/db/models.go index b8287ba9..86dd669c 100644 --- a/pkg/sqlstore/impl/system/internal/db/models.go +++ b/pkg/sqlstore/impl/system/db/models.go @@ -71,6 +71,15 @@ type SystemPendingTx struct { UpdatedAt sql.NullInt64 } +type SystemTreeLeafe struct { + Prefix string + ChainID int64 + TableID int64 + BlockNumber int64 + Leaves []byte + Processing int64 +} + type SystemTxnProcessor struct { ChainID int64 BlockNumber int64 diff --git a/pkg/sqlstore/impl/system/internal/db/nonce.sql.go b/pkg/sqlstore/impl/system/db/nonce.sql.go similarity index 100% rename from pkg/sqlstore/impl/system/internal/db/nonce.sql.go rename to pkg/sqlstore/impl/system/db/nonce.sql.go diff --git a/pkg/sqlstore/impl/system/internal/db/receipt.sql.go b/pkg/sqlstore/impl/system/db/receipt.sql.go similarity index 100% rename from pkg/sqlstore/impl/system/internal/db/receipt.sql.go rename to pkg/sqlstore/impl/system/db/receipt.sql.go diff --git a/pkg/sqlstore/impl/system/internal/db/registry.sql.go b/pkg/sqlstore/impl/system/db/registry.sql.go similarity index 100% rename from pkg/sqlstore/impl/system/internal/db/registry.sql.go rename to pkg/sqlstore/impl/system/db/registry.sql.go diff --git a/pkg/sqlstore/impl/system/internal/db/schema.sql.go b/pkg/sqlstore/impl/system/db/schema.sql.go similarity index 100% rename from pkg/sqlstore/impl/system/internal/db/schema.sql.go rename to pkg/sqlstore/impl/system/db/schema.sql.go diff --git a/pkg/sqlstore/impl/system/db/tree_leaves.sql.go b/pkg/sqlstore/impl/system/db/tree_leaves.sql.go new file mode 100644 index 00000000..e8c022ef --- /dev/null +++ b/pkg/sqlstore/impl/system/db/tree_leaves.sql.go @@ -0,0 +1,95 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.15.0 +// source: tree_leaves.sql + +package db + +import ( + "context" +) + +const deleteProcessing = `-- name: DeleteProcessing :exec +DELETE FROM system_tree_leaves WHERE chain_id = ?1 AND block_number = ?2 AND processing = 1 +` + +type DeleteProcessingParams struct { + ChainID int64 + BlockNumber int64 +} + +func (q *Queries) DeleteProcessing(ctx context.Context, arg DeleteProcessingParams) error { + _, err := q.exec(ctx, q.deleteProcessingStmt, deleteProcessing, arg.ChainID, arg.BlockNumber) + return err +} + +const fetchChainIDAndBlockNumber = `-- name: FetchChainIDAndBlockNumber :many +SELECT chain_id, block_number FROM system_tree_leaves GROUP BY chain_id, block_number ORDER BY chain_id, block_number +` + +type FetchChainIDAndBlockNumberRow struct { + ChainID int64 + BlockNumber int64 +} + +func (q *Queries) FetchChainIDAndBlockNumber(ctx context.Context) ([]FetchChainIDAndBlockNumberRow, error) { + rows, err := q.query(ctx, q.fetchChainIDAndBlockNumberStmt, fetchChainIDAndBlockNumber) + if err != nil { + return nil, err + } + defer rows.Close() + var items []FetchChainIDAndBlockNumberRow + for rows.Next() { + var i FetchChainIDAndBlockNumberRow + if err := rows.Scan(&i.ChainID, &i.BlockNumber); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const fetchLeavesByChainIDAndBlockNumber = `-- name: FetchLeavesByChainIDAndBlockNumber :many +UPDATE system_tree_leaves SET processing = 1 WHERE chain_id = ?1 AND block_number = ?2 RETURNING prefix, chain_id, table_id, block_number, leaves, processing +` + +type FetchLeavesByChainIDAndBlockNumberParams struct { + ChainID int64 + BlockNumber int64 +} + +func (q *Queries) FetchLeavesByChainIDAndBlockNumber(ctx context.Context, arg FetchLeavesByChainIDAndBlockNumberParams) ([]SystemTreeLeafe, error) { + rows, err := q.query(ctx, q.fetchLeavesByChainIDAndBlockNumberStmt, fetchLeavesByChainIDAndBlockNumber, arg.ChainID, arg.BlockNumber) + if err != nil { + return nil, err + } + defer rows.Close() + var items []SystemTreeLeafe + for rows.Next() { + var i SystemTreeLeafe + if err := rows.Scan( + &i.Prefix, + &i.ChainID, + &i.TableID, + &i.BlockNumber, + &i.Leaves, + &i.Processing, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/pkg/sqlstore/impl/system/migrations/005_system_tree_leaves.down.sql b/pkg/sqlstore/impl/system/migrations/005_system_tree_leaves.down.sql new file mode 100644 index 00000000..b1ac7e3c --- /dev/null +++ b/pkg/sqlstore/impl/system/migrations/005_system_tree_leaves.down.sql @@ -0,0 +1 @@ +DROP TABLE system_tree_leaves; \ No newline at end of file diff --git a/pkg/sqlstore/impl/system/migrations/005_system_tree_leaves.up.sql b/pkg/sqlstore/impl/system/migrations/005_system_tree_leaves.up.sql new file mode 100644 index 00000000..e2e51e0f --- /dev/null +++ b/pkg/sqlstore/impl/system/migrations/005_system_tree_leaves.up.sql @@ -0,0 +1,10 @@ +CREATE TABLE IF NOT EXISTS system_tree_leaves ( + prefix TEXT NOT NULL, + chain_id INTEGER NOT NULL, + table_id INTEGER NOT NULL, + block_number INTEGER NOT NULL, + leaves BLOB NOT NULL, + processing INTEGER NOT NULL DEFAULT 0, + + PRIMARY KEY(chain_id, table_id, block_number) +); diff --git a/pkg/sqlstore/impl/system/migrations/migrations.go b/pkg/sqlstore/impl/system/migrations/migrations.go index 0be4a939..b4ded20d 100644 --- a/pkg/sqlstore/impl/system/migrations/migrations.go +++ b/pkg/sqlstore/impl/system/migrations/migrations.go @@ -9,6 +9,8 @@ // migrations/003_evm_events.up.sql // migrations/004_system_id.down.sql // migrations/004_system_id.up.sql +// migrations/005_system_tree_leaves.down.sql +// migrations/005_system_tree_leaves.up.sql package migrations import ( @@ -245,6 +247,46 @@ func _004_system_idUpSql() (*asset, error) { return a, nil } +var __005_system_tree_leavesDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x09\xf2\x0f\x50\x08\x71\x74\xf2\x71\x55\x28\xae\x2c\x2e\x49\xcd\x8d\x2f\x29\x4a\x4d\x8d\xcf\x49\x4d\x2c\x4b\x2d\xb6\x06\x04\x00\x00\xff\xff\x6d\x49\xa7\x48\x1e\x00\x00\x00") + +func _005_system_tree_leavesDownSqlBytes() ([]byte, error) { + return bindataRead( + __005_system_tree_leavesDownSql, + "005_system_tree_leaves.down.sql", + ) +} + +func _005_system_tree_leavesDownSql() (*asset, error) { + bytes, err := _005_system_tree_leavesDownSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "005_system_tree_leaves.down.sql", size: 30, mode: os.FileMode(420), modTime: time.Unix(1677101838, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var __005_system_tree_leavesUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x74\x8e\xbd\x6a\x84\x40\x14\x46\x7b\x9f\xe2\x96\x0a\x16\xe9\x53\x8d\xc9\x35\x0c\x99\x68\x18\xaf\xa0\xd5\xa0\xe6\x26\x19\xe2\x1f\x33\x26\x64\xdf\x7e\xc1\xfd\x81\x75\xb1\xf9\x9a\x73\x3e\x38\x4f\x1a\x05\x21\x90\x48\x14\x82\x4c\x21\xcb\x09\xb0\x92\x05\x15\xe0\x0f\x7e\xe1\xc1\x2c\x8e\xd9\xf4\xdc\xfc\xb1\x87\x30\x00\x00\x98\x1d\x7f\xda\x7f\x20\xac\x68\xf5\xb3\x52\xa9\x78\x25\xdd\x77\x63\x47\x63\x3f\x40\x66\x84\x2f\xa8\x37\x78\x69\xda\x9e\xf7\x71\xdb\x4f\xdd\x8f\x19\x7f\x87\x96\xdd\x8e\x72\xee\x48\x54\x9e\x6c\xc8\xec\xa6\x8e\xbd\xb7\xe3\xd7\xdd\x15\x9e\x31\x15\xa5\x22\x78\x38\xa9\xeb\xbc\x6b\xf9\x26\x74\x0d\xaf\x58\x87\x97\xec\xf8\x5a\x18\xdf\xc4\x44\x41\xf4\x18\x1c\x03\x00\x00\xff\xff\x77\xe0\xa9\xeb\x2a\x01\x00\x00") + +func _005_system_tree_leavesUpSqlBytes() ([]byte, error) { + return bindataRead( + __005_system_tree_leavesUpSql, + "005_system_tree_leaves.up.sql", + ) +} + +func _005_system_tree_leavesUpSql() (*asset, error) { + bytes, err := _005_system_tree_leavesUpSqlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "005_system_tree_leaves.up.sql", size: 298, mode: os.FileMode(420), modTime: time.Unix(1677101841, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + // Asset loads and returns the asset for the given name. // It returns an error if the asset could not be found or // could not be loaded. @@ -297,14 +339,16 @@ func AssetNames() []string { // _bindata is a table, holding each asset generator, mapped to its name. var _bindata = map[string]func() (*asset, error){ - "001_init.down.sql": _001_initDownSql, - "001_init.up.sql": _001_initUpSql, - "002_receipterroridx.down.sql": _002_receipterroridxDownSql, - "002_receipterroridx.up.sql": _002_receipterroridxUpSql, - "003_evm_events.down.sql": _003_evm_eventsDownSql, - "003_evm_events.up.sql": _003_evm_eventsUpSql, - "004_system_id.down.sql": _004_system_idDownSql, - "004_system_id.up.sql": _004_system_idUpSql, + "001_init.down.sql": _001_initDownSql, + "001_init.up.sql": _001_initUpSql, + "002_receipterroridx.down.sql": _002_receipterroridxDownSql, + "002_receipterroridx.up.sql": _002_receipterroridxUpSql, + "003_evm_events.down.sql": _003_evm_eventsDownSql, + "003_evm_events.up.sql": _003_evm_eventsUpSql, + "004_system_id.down.sql": _004_system_idDownSql, + "004_system_id.up.sql": _004_system_idUpSql, + "005_system_tree_leaves.down.sql": _005_system_tree_leavesDownSql, + "005_system_tree_leaves.up.sql": _005_system_tree_leavesUpSql, } // AssetDir returns the file names below a certain @@ -348,14 +392,16 @@ type bintree struct { } var _bintree = &bintree{nil, map[string]*bintree{ - "001_init.down.sql": &bintree{_001_initDownSql, map[string]*bintree{}}, - "001_init.up.sql": &bintree{_001_initUpSql, map[string]*bintree{}}, - "002_receipterroridx.down.sql": &bintree{_002_receipterroridxDownSql, map[string]*bintree{}}, - "002_receipterroridx.up.sql": &bintree{_002_receipterroridxUpSql, map[string]*bintree{}}, - "003_evm_events.down.sql": &bintree{_003_evm_eventsDownSql, map[string]*bintree{}}, - "003_evm_events.up.sql": &bintree{_003_evm_eventsUpSql, map[string]*bintree{}}, - "004_system_id.down.sql": &bintree{_004_system_idDownSql, map[string]*bintree{}}, - "004_system_id.up.sql": &bintree{_004_system_idUpSql, map[string]*bintree{}}, + "001_init.down.sql": &bintree{_001_initDownSql, map[string]*bintree{}}, + "001_init.up.sql": &bintree{_001_initUpSql, map[string]*bintree{}}, + "002_receipterroridx.down.sql": &bintree{_002_receipterroridxDownSql, map[string]*bintree{}}, + "002_receipterroridx.up.sql": &bintree{_002_receipterroridxUpSql, map[string]*bintree{}}, + "003_evm_events.down.sql": &bintree{_003_evm_eventsDownSql, map[string]*bintree{}}, + "003_evm_events.up.sql": &bintree{_003_evm_eventsUpSql, map[string]*bintree{}}, + "004_system_id.down.sql": &bintree{_004_system_idDownSql, map[string]*bintree{}}, + "004_system_id.up.sql": &bintree{_004_system_idUpSql, map[string]*bintree{}}, + "005_system_tree_leaves.down.sql": &bintree{_005_system_tree_leavesDownSql, map[string]*bintree{}}, + "005_system_tree_leaves.up.sql": &bintree{_005_system_tree_leavesUpSql, map[string]*bintree{}}, }} // RestoreAsset restores an asset under the given directory diff --git a/pkg/sqlstore/impl/system/queries/tree_leaves.sql b/pkg/sqlstore/impl/system/queries/tree_leaves.sql new file mode 100644 index 00000000..240513a1 --- /dev/null +++ b/pkg/sqlstore/impl/system/queries/tree_leaves.sql @@ -0,0 +1,8 @@ +-- name: FetchChainIDAndBlockNumber :many +SELECT chain_id, block_number FROM system_tree_leaves GROUP BY chain_id, block_number ORDER BY chain_id, block_number; + +-- name: FetchLeavesByChainIDAndBlockNumber :many +UPDATE system_tree_leaves SET processing = 1 WHERE chain_id = ?1 AND block_number = ?2 RETURNING *; + +-- name: DeleteProcessing :exec +DELETE FROM system_tree_leaves WHERE chain_id = ?1 AND block_number = ?2 AND processing = 1; \ No newline at end of file diff --git a/pkg/sqlstore/impl/system/sqlc.yaml b/pkg/sqlstore/impl/system/sqlc.yaml index eabc8d33..d7dc8186 100644 --- a/pkg/sqlstore/impl/system/sqlc.yaml +++ b/pkg/sqlstore/impl/system/sqlc.yaml @@ -8,7 +8,7 @@ sql: gen: go: package: "db" - out: "./internal/db" + out: "./db" emit_prepared_queries: true emit_interface: false emit_exact_table_names: false diff --git a/pkg/sqlstore/impl/system/store.go b/pkg/sqlstore/impl/system/store.go index 473c14fe..0ac0911c 100644 --- a/pkg/sqlstore/impl/system/store.go +++ b/pkg/sqlstore/impl/system/store.go @@ -25,7 +25,7 @@ import ( "github.com/textileio/go-tableland/pkg/metrics" "github.com/textileio/go-tableland/pkg/nonce" "github.com/textileio/go-tableland/pkg/sqlstore" - "github.com/textileio/go-tableland/pkg/sqlstore/impl/system/internal/db" + "github.com/textileio/go-tableland/pkg/sqlstore/impl/system/db" "github.com/textileio/go-tableland/pkg/sqlstore/impl/system/migrations" "github.com/textileio/go-tableland/pkg/tables" ) @@ -580,6 +580,11 @@ func aclFromSQLtoDTO(acl db.SystemAcl) (sqlstore.SystemACL, error) { return systemACL, nil } +// Queries is a temp hack to get direct access to db.Queries. +func (s *SystemStore) Queries() *db.Queries { + return s.dbWithTx.queries() +} + func sanitizeAddress(address string) error { if strings.ContainsAny(address, "%_") { return errors.New("address contains invalid characters") diff --git a/pkg/sqlstore/impl/system_store_instrumented.go b/pkg/sqlstore/impl/system_store_instrumented.go index 1d5361d5..791ca3c4 100644 --- a/pkg/sqlstore/impl/system_store_instrumented.go +++ b/pkg/sqlstore/impl/system_store_instrumented.go @@ -13,6 +13,7 @@ import ( "github.com/textileio/go-tableland/pkg/metrics" "github.com/textileio/go-tableland/pkg/nonce" "github.com/textileio/go-tableland/pkg/sqlstore" + "github.com/textileio/go-tableland/pkg/sqlstore/impl/system/db" "github.com/textileio/go-tableland/pkg/tables" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric/global" @@ -411,3 +412,8 @@ func (s *InstrumentedSystemStore) GetID(ctx context.Context) (string, error) { return id, err } + +// Queries is a temp hack to get direct access to db.Queries. +func (s *InstrumentedSystemStore) Queries() *db.Queries { + return s.store.Queries() +} diff --git a/pkg/sqlstore/store_system.go b/pkg/sqlstore/store_system.go index 2df06a57..d6e4a867 100644 --- a/pkg/sqlstore/store_system.go +++ b/pkg/sqlstore/store_system.go @@ -8,6 +8,7 @@ import ( "github.com/textileio/go-tableland/internal/tableland" "github.com/textileio/go-tableland/pkg/eventprocessor" "github.com/textileio/go-tableland/pkg/nonce" + "github.com/textileio/go-tableland/pkg/sqlstore/impl/system/db" "github.com/textileio/go-tableland/pkg/tables" ) @@ -39,5 +40,6 @@ type SystemStore interface { Begin(context.Context) (*sql.Tx, error) WithTx(tx *sql.Tx) SystemStore + Queries() *db.Queries Close() error } diff --git a/pkg/tables/impl/ethereum/client.go b/pkg/tables/impl/ethereum/client.go index 9b2abeb4..ce5def5c 100644 --- a/pkg/tables/impl/ethereum/client.go +++ b/pkg/tables/impl/ethereum/client.go @@ -8,6 +8,7 @@ import ( "strings" "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/rs/zerolog/log" @@ -221,3 +222,20 @@ func (c *Client) callWithRetry(ctx context.Context, f func() (*types.Transaction return tx, nil } + +// Deploy deploys the contract to a simulated backend. +var Deploy = func(auth *bind.TransactOpts, sb *backends.SimulatedBackend) ( + address common.Address, contract interface{}, err error, +) { + addr, _, c, err := DeployContract(auth, sb) + if err != nil { + return common.Address{}, nil, err + } + + _, err = c.Initialize(auth, "https://foo.xyz") + if err != nil { + return common.Address{}, nil, err + } + + return addr, c, nil +} diff --git a/pkg/tables/impl/ethereum/client_test.go b/pkg/tables/impl/ethereum/client_test.go index c218af62..3e1b77fc 100644 --- a/pkg/tables/impl/ethereum/client_test.go +++ b/pkg/tables/impl/ethereum/client_test.go @@ -2,9 +2,7 @@ package ethereum import ( "context" - "crypto/ecdsa" "encoding/hex" - "math" "math/big" "strings" "testing" @@ -14,8 +12,6 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/stretchr/testify/require" "github.com/textileio/go-tableland/internal/tableland" @@ -31,9 +27,12 @@ import ( func TestCreateTable(t *testing.T) { t.Parallel() - backend, _, fromAuth, _, client := setup(t) + simulatedChain, client, _ := setup(t) + backend := simulatedChain.Backend - txn, err := client.CreateTable(context.Background(), fromAuth.From, "CREATE TABLE foo (bar int)") + txn, err := client.CreateTable( + context.Background(), simulatedChain.DeployerTransactOpts.From, "CREATE TABLE foo (bar int)", + ) require.NoError(t, err) backend.Commit() @@ -49,10 +48,13 @@ func TestCreateTable(t *testing.T) { func TestIsOwner(t *testing.T) { t.Parallel() - backend, key, fromAuth, contract, client := setup(t) - _, toAuth := requireNewAuth(t) - requireAuthGas(t, backend, toAuth) - requireTxn(t, backend, key, fromAuth.From, toAuth.From, big.NewInt(1000000000000000000)) + simulatedChain, client, contract := setup(t) + backend, fromAuth := simulatedChain.Backend, simulatedChain.DeployerTransactOpts + + key := simulatedChain.CreateAccountWithBalance(t) + toAuth, err := bind.NewKeyedTransactorWithChainID(key, big.NewInt(simulatedChain.ChainID)) + require.NoError(t, err) + tokenID := requireMint(t, backend, contract, toAuth, toAuth.From) owner, err := client.IsOwner(context.Background(), toAuth.From, tokenID) @@ -67,7 +69,8 @@ func TestIsOwner(t *testing.T) { func TestRunSQL(t *testing.T) { t.Parallel() - backend, _, txOpts, contract, client := setup(t) + simulatedChain, client, contract := setup(t) + backend, txOpts := simulatedChain.Backend, simulatedChain.DeployerTransactOpts tokenID := requireMint(t, backend, contract, txOpts, txOpts.From) @@ -108,8 +111,10 @@ func TestRunSQL(t *testing.T) { func TestSetController(t *testing.T) { t.Parallel() - backend, _, txOpts, contract, client := setup(t) - + simulatedChain, client, contract := setup(t) + backend, contract, txOpts := simulatedChain.Backend, + contract, + simulatedChain.DeployerTransactOpts // You have to be the owner of the token to set the controller tokenID := requireMint(t, backend, contract, txOpts, txOpts.From) @@ -146,14 +151,15 @@ func TestSetController(t *testing.T) { func TestRunSQLWithPolicy(t *testing.T) { t.Parallel() - backend, _, txOpts, contract, client := setup(t) + simulatedChain, client, contract := setup(t) + backend, txOpts := simulatedChain.Backend, simulatedChain.DeployerTransactOpts // caller must be the sender - callerAddress := txOpts.From + callerAddress := simulatedChain.DeployerTransactOpts.From // Deploy controller contract controllerAddress, _, controllerContract, err := controller.DeployContract( - txOpts, + simulatedChain.DeployerTransactOpts, backend, ) require.NoError(t, err) @@ -250,7 +256,8 @@ func TestNonceTooLow(t *testing.T) { t.Run("run-sql", func(t *testing.T) { t.Parallel() - backend, txOpts, contract, client := setupWithLocalTracker(t) + simulatedChain, client, contract := setupWithLocalTracker(t) + backend, txOpts := simulatedChain.Backend, simulatedChain.DeployerTransactOpts tokenID := requireMint(t, backend, contract, txOpts, txOpts.From) @@ -267,7 +274,8 @@ func TestNonceTooLow(t *testing.T) { t.Run("set-controller", func(t *testing.T) { t.Parallel() - backend, txOpts, contract, client := setupWithLocalTracker(t) + simulatedChain, client, contract := setupWithLocalTracker(t) + backend, txOpts := simulatedChain.Backend, simulatedChain.DeployerTransactOpts tokenID := requireMint(t, backend, contract, txOpts, txOpts.From) @@ -307,142 +315,32 @@ func requireMint( return id } -func requireTxn( - t *testing.T, - backend *backends.SimulatedBackend, - key *ecdsa.PrivateKey, - from common.Address, - to common.Address, - amt *big.Int, -) { - nonce, err := backend.PendingNonceAt(context.Background(), from) - require.NoError(t, err) - - gasLimit := uint64(21000) - gasPrice, err := backend.SuggestGasPrice(context.Background()) - require.NoError(t, err) - - var data []byte - txnData := &types.LegacyTx{ - Nonce: nonce, - GasPrice: gasPrice, - Gas: gasLimit, - To: &to, - Data: data, - Value: amt, - } - tx := types.NewTx(txnData) - signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, key) - require.NoError(t, err) - - bal, err := backend.BalanceAt(context.Background(), from, nil) +func setup(t *testing.T) (*tests.SimulatedChain, *Client, *Contract) { + simulatedChain := tests.NewSimulatedChain(t) + contract, err := simulatedChain.DeployContract(t, Deploy) require.NoError(t, err) - require.NotZero(t, bal) - err = backend.SendTransaction(context.Background(), signedTx) + w, err := wallet.NewWallet(hex.EncodeToString(crypto.FromECDSA(simulatedChain.DeployerPrivateKey))) require.NoError(t, err) - backend.Commit() - - receipt, err := backend.TransactionReceipt(context.Background(), signedTx.Hash()) - require.NoError(t, err) - require.NotNil(t, receipt) -} - -func requireAuthGas(t *testing.T, backend *backends.SimulatedBackend, auth *bind.TransactOpts) { - gas, err := backend.SuggestGasPrice(context.Background()) - require.NoError(t, err) - auth.GasPrice = gas -} - -func requireNewAuth(t *testing.T) (*ecdsa.PrivateKey, *bind.TransactOpts) { - key, err := crypto.GenerateKey() - require.NoError(t, err) - auth, err := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - - require.NoError(t, err) - return key, auth -} - -func setup(t *testing.T) (*backends.SimulatedBackend, *ecdsa.PrivateKey, *bind.TransactOpts, *Contract, *Client) { - key, auth := requireNewAuth(t) - - alloc := make(core.GenesisAlloc) - alloc[auth.From] = core.GenesisAccount{Balance: big.NewInt(math.MaxInt64)} - backend := backends.NewSimulatedBackend(alloc, math.MaxInt64) - - requireAuthGas(t, backend, auth) - - // Deploy contract - address, _, contract, err := DeployContract( - auth, - backend, + client, err := NewClient( + simulatedChain.Backend, + tableland.ChainID(simulatedChain.ChainID), + contract.ContractAddr, + w, + nonceimpl.NewSimpleTracker(w, simulatedChain.Backend), ) - - // commit all pending transactions - backend.Commit() - - require.NoError(t, err) - - if len(address.Bytes()) == 0 { - t.Error("Expected a valid deployment address. Received empty address byte array instead") - } - - // Initialize the contract - _, err = contract.Initialize(auth, "https://foo.xyz") - - // commit all pending transactions - backend.Commit() - - require.NoError(t, err) - - w, err := wallet.NewWallet(hex.EncodeToString(crypto.FromECDSA(key))) - require.NoError(t, err) - - client, err := NewClient(backend, 1337, address, w, nonceimpl.NewSimpleTracker(w, backend)) require.NoError(t, err) - return backend, key, auth, contract, client + return simulatedChain, client, contract.Contract.(*Contract) } -func setupWithLocalTracker(t *testing.T) ( - *backends.SimulatedBackend, - *bind.TransactOpts, - *Contract, - *Client, -) { - key, auth := requireNewAuth(t) - - alloc := make(core.GenesisAlloc) - alloc[auth.From] = core.GenesisAccount{Balance: big.NewInt(math.MaxInt64)} - backend := backends.NewSimulatedBackend(alloc, math.MaxInt64) - - requireAuthGas(t, backend, auth) - - // Deploy contract - address, _, contract, err := DeployContract( - auth, - backend, - ) - - // commit all pending transactions - backend.Commit() - +func setupWithLocalTracker(t *testing.T) (*tests.SimulatedChain, *Client, *Contract) { + simulatedChain := tests.NewSimulatedChain(t) + contract, err := simulatedChain.DeployContract(t, Deploy) require.NoError(t, err) - if len(address.Bytes()) == 0 { - t.Error("Expected a valid deployment address. Received empty address byte array instead") - } - - // Initialize the contract - _, err = contract.Initialize(auth, "https://foo.xyz") - - // commit all pending transactions - backend.Commit() - - require.NoError(t, err) - - w, err := wallet.NewWallet(hex.EncodeToString(crypto.FromECDSA(key))) + w, err := wallet.NewWallet(hex.EncodeToString(crypto.FromECDSA(simulatedChain.DeployerPrivateKey))) require.NoError(t, err) url := tests.Sqlite3URI(t) @@ -455,15 +353,15 @@ func setupWithLocalTracker(t *testing.T) ( w, nonceimpl.NewNonceStore(systemStore), tableland.ChainID(1337), - backend, + simulatedChain.Backend, 5*time.Second, 0, 3*time.Microsecond, ) require.NoError(t, err) - client, err := NewClient(backend, 1337, address, w, tracker) + client, err := NewClient(simulatedChain.Backend, 1337, contract.ContractAddr, w, tracker) require.NoError(t, err) - return backend, auth, contract, client + return simulatedChain, client, contract.Contract.(*Contract) } diff --git a/pkg/tables/impl/testutil/testutil.go b/pkg/tables/impl/testutil/testutil.go deleted file mode 100644 index 705ca8fc..00000000 --- a/pkg/tables/impl/testutil/testutil.go +++ /dev/null @@ -1,63 +0,0 @@ -package testutil - -import ( - "context" - "crypto/ecdsa" - "math" - "math/big" - "testing" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/crypto" - "github.com/stretchr/testify/require" - "github.com/textileio/go-tableland/pkg/tables/impl/ethereum" -) - -// Setup spinup a simulated backend node connected to a test EVM chain running the Registry smart-contract. -func Setup(t *testing.T) ( - *backends.SimulatedBackend, - common.Address, - *ethereum.Contract, - *bind.TransactOpts, - *ecdsa.PrivateKey, -) { - key, err := crypto.GenerateKey() - require.NoError(t, err) - auth, err := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) // nolint - require.NoError(t, err) - - alloc := make(core.GenesisAlloc) - alloc[auth.From] = core.GenesisAccount{Balance: big.NewInt(math.MaxInt64)} - backend := backends.NewSimulatedBackend(alloc, math.MaxInt64) - gas, err := backend.SuggestGasPrice(context.Background()) - require.NoError(t, err) - auth.GasPrice = gas - - // Deploy contract - address, _, contract, err := ethereum.DeployContract( - auth, - backend, - ) - - // commit all pending transactions - backend.Commit() - - require.NoError(t, err) - - if len(address.Bytes()) == 0 { - t.Error("Expected a valid deployment address. Received empty address byte array instead") - } - - // Initialize the contract - _, err = contract.Initialize(auth, "https://foo.xyz") - - // commit all pending transactions - backend.Commit() - - require.NoError(t, err) - - return backend, address, contract, auth, key -} diff --git a/tests/fullstack/fullstack.go b/tests/fullstack/fullstack.go index f74ea1fd..f43b4f41 100644 --- a/tests/fullstack/fullstack.go +++ b/tests/fullstack/fullstack.go @@ -5,6 +5,7 @@ import ( "database/sql" "encoding/hex" "net/http/httptest" + "os" "testing" "time" @@ -24,6 +25,8 @@ import ( efimpl "github.com/textileio/go-tableland/pkg/eventprocessor/eventfeed/impl" epimpl "github.com/textileio/go-tableland/pkg/eventprocessor/impl" executor "github.com/textileio/go-tableland/pkg/eventprocessor/impl/executor/impl" + merklepublisher "github.com/textileio/go-tableland/pkg/merkletree/publisher" + merklepublisherimpl "github.com/textileio/go-tableland/pkg/merkletree/publisher/impl" nonceimpl "github.com/textileio/go-tableland/pkg/nonce/impl" "github.com/textileio/go-tableland/pkg/parsing" parserimpl "github.com/textileio/go-tableland/pkg/parsing/impl" @@ -33,7 +36,6 @@ import ( "github.com/textileio/go-tableland/pkg/sqlstore/impl/user" "github.com/textileio/go-tableland/pkg/tables" "github.com/textileio/go-tableland/pkg/tables/impl/ethereum" - "github.com/textileio/go-tableland/pkg/tables/impl/testutil" "github.com/textileio/go-tableland/pkg/wallet" "github.com/textileio/go-tableland/tests" ) @@ -86,17 +88,20 @@ func CreateFullStack(t *testing.T, deps Deps) FullStack { require.NoError(t, err) } - backend, addr, contract, transactOpts, sk := testutil.Setup(t) + // Spin up the EVM chain with the contract. + simulatedChain := tests.NewSimulatedChain(t) + contract, err := simulatedChain.DeployContract(t, ethereum.Deploy) + require.NoError(t, err) - wallet, err := wallet.NewWallet(hex.EncodeToString(crypto.FromECDSA(sk))) + wallet, err := wallet.NewWallet(hex.EncodeToString(crypto.FromECDSA(simulatedChain.DeployerPrivateKey))) require.NoError(t, err) registry, err := ethereum.NewClient( - backend, + simulatedChain.Backend, ChainID, - addr, + contract.ContractAddr, wallet, - nonceimpl.NewSimpleTracker(wallet, backend), + nonceimpl.NewSimpleTracker(wallet, simulatedChain.Backend), ) require.NoError(t, err) @@ -116,21 +121,18 @@ func CreateFullStack(t *testing.T, deps Deps) FullStack { ef, err := efimpl.New( systemStore, ChainID, - backend, - addr, + simulatedChain.Backend, + contract.ContractAddr, eventfeed.WithNewHeadPollFreq(time.Millisecond), eventfeed.WithMinBlockDepth(0)) require.NoError(t, err) // Create EventProcessor for our test. - ep, err := epimpl.New(parser, ex, ef, 1337) + ep, err := epimpl.New(parser, ex, ef, 1337, eventprocessor.WithHashCalcStep(1)) require.NoError(t, err) err = ep.Start() require.NoError(t, err) - t.Cleanup(func() { - ep.Stop() - }) chainStacks := map[tableland.ChainID]chains.ChainStack{ 1337: { @@ -174,17 +176,48 @@ func CreateFullStack(t *testing.T, deps Deps) FullStack { require.NoError(t, err) } - router, err := router.ConfiguredRouter(tbl, systemService, 10, time.Second, []tableland.ChainID{ChainID}) + treeStore, err := merklepublisherimpl.NewMerkleTreeStore(tempfile(t)) + require.NoError(t, err) + + merkleRootContract, err := simulatedChain.DeployContract(t, + func(auth *bind.TransactOpts, sb *backends.SimulatedBackend) (common.Address, interface{}, error) { + addr, _, contract, err := merklepublisherimpl.DeployContract(auth, sb) + return addr, contract, err + }) + require.NoError(t, err) + + rootRegistry, err := merklepublisherimpl.NewMerkleRootRegistryEthereum( + simulatedChain.Backend, + merkleRootContract.ContractAddr, + wallet, + nonceimpl.NewSimpleTracker(wallet, simulatedChain.Backend), + ) + require.NoError(t, err) + + merkleRootPublisher := merklepublisher.NewMerkleRootPublisher( + merklepublisherimpl.NewLeavesStore(systemStore), + treeStore, + rootRegistry, + time.Second, + ) + merkleRootPublisher.Start() + + router, err := router.ConfiguredRouter(tbl, systemService, treeStore, 10, time.Second, []tableland.ChainID{ChainID}) require.NoError(t, err) server := httptest.NewServer(router.Handler()) - t.Cleanup(server.Close) + + t.Cleanup(func() { + server.Close() + ep.Stop() + merkleRootPublisher.Close() + }) return FullStack{ - Backend: backend, - Address: addr, - Contract: contract, - TransactOpts: transactOpts, + Backend: simulatedChain.Backend, + Address: contract.ContractAddr, + Contract: contract.Contract.(*ethereum.Contract), + TransactOpts: simulatedChain.DeployerTransactOpts, Wallet: wallet, TblContractClient: registry, Server: server, @@ -209,3 +242,14 @@ func (acl *aclHalfMock) CheckPrivileges( func (acl *aclHalfMock) IsOwner(_ context.Context, _ common.Address, _ tables.TableID) (bool, error) { return true, nil } + +// tempfile returns a temporary file path. +func tempfile(t *testing.T) string { + t.Helper() + + f, err := os.CreateTemp(t.TempDir(), "bolt_*.db") + require.NoError(t, err) + require.NoError(t, f.Close()) + + return f.Name() +} diff --git a/tests/simulated_chain.go b/tests/simulated_chain.go new file mode 100644 index 00000000..701332e4 --- /dev/null +++ b/tests/simulated_chain.go @@ -0,0 +1,139 @@ +package tests + +import ( + "context" + "crypto/ecdsa" + "math" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/require" +) + +// SimulatedChain is simulated Ethereum backend with a contract deployed. +type SimulatedChain struct { + ChainID int64 + Backend *backends.SimulatedBackend + + // deployer info + DeployerPrivateKey *ecdsa.PrivateKey + DeployerTransactOpts *bind.TransactOpts +} + +// Contract holds contract information and bindings. +type Contract struct { + ContractAddr common.Address + Contract interface{} // it can be a Tableland Registry or Root Registry contract +} + +// ContractDeployer represents a function that deploys a contract in the simulated backend. +type ContractDeployer func( + *bind.TransactOpts, + *backends.SimulatedBackend, +) (address common.Address, contract interface{}, err error) + +// NewSimulatedChain creates a new simulated chain. +func NewSimulatedChain(t *testing.T) *SimulatedChain { + c := &SimulatedChain{ + ChainID: 1337, + } + + c.bootstrap(t) + return c +} + +func (c *SimulatedChain) bootstrap(t *testing.T) { + key, err := crypto.GenerateKey() + require.NoError(t, err) + transactOpts, err := bind.NewKeyedTransactorWithChainID(key, big.NewInt(c.ChainID)) // nolint + require.NoError(t, err) + + alloc := make(core.GenesisAlloc) + alloc[transactOpts.From] = core.GenesisAccount{Balance: big.NewInt(math.MaxInt64)} + backend := backends.NewSimulatedBackend(alloc, math.MaxInt64) + gas, err := backend.SuggestGasPrice(context.Background()) + require.NoError(t, err) + transactOpts.GasPrice = gas + + c.Backend = backend + c.DeployerPrivateKey = key + c.DeployerTransactOpts = transactOpts +} + +// CreateAccountWithBalance creates a new account inside the simulated backend with balance and returns the private key. +func (c *SimulatedChain) CreateAccountWithBalance(t *testing.T) *ecdsa.PrivateKey { + fromOpts, err := bind.NewKeyedTransactorWithChainID(c.DeployerPrivateKey, big.NewInt(c.ChainID)) + require.NoError(t, err) + + gasLimit := uint64(21000) + gasPrice, err := c.Backend.SuggestGasPrice(context.Background()) + require.NoError(t, err) + fromOpts.GasPrice = gasPrice + + nonce, err := c.Backend.PendingNonceAt(context.Background(), fromOpts.From) + require.NoError(t, err) + + // generate random key + key, err := crypto.GenerateKey() + require.NoError(t, err) + + toOpts, err := bind.NewKeyedTransactorWithChainID(key, big.NewInt(c.ChainID)) + require.NoError(t, err) + + var data []byte + txnData := &types.LegacyTx{ + Nonce: nonce, + GasPrice: gasPrice, + Gas: gasLimit, + To: &toOpts.From, + Data: data, + Value: big.NewInt(1000000000000000000), + } + tx := types.NewTx(txnData) + signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, c.DeployerPrivateKey) + require.NoError(t, err) + + bal, err := c.Backend.BalanceAt(context.Background(), fromOpts.From, nil) + require.NoError(t, err) + require.NotZero(t, bal) + + err = c.Backend.SendTransaction(context.Background(), signedTx) + require.NoError(t, err) + + c.Backend.Commit() + + receipt, err := c.Backend.TransactionReceipt(context.Background(), signedTx.Hash()) + require.NoError(t, err) + require.NotNil(t, receipt) + + return key +} + +// DeployContract deploys a new contract to the chain. +func (c *SimulatedChain) DeployContract(t *testing.T, deploy ContractDeployer) (*Contract, error) { + // Deploy contract + address, contract, err := deploy( + c.DeployerTransactOpts, + c.Backend, + ) + require.NoError(t, err) + + // commit all pending transactions + c.Backend.Commit() + require.NoError(t, err) + + if len(address.Bytes()) == 0 { + t.Error("Expected a valid deployment address. Received empty address byte array instead") + } + + return &Contract{ + ContractAddr: address, + Contract: contract, + }, nil +}