From 03d71e4880aafd4acddc8e2f1e499e7e08e3c63c Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Tue, 19 Nov 2024 17:14:30 +0530 Subject: [PATCH 1/6] Add flag to disable DAS chunked stores --- cmd/datool/datool.go | 4 +++- das/aggregator.go | 3 +++ das/dasRpcClient.go | 39 +++++++++++++++++++++++++-------------- das/rpc_aggregator.go | 2 +- 4 files changed, 32 insertions(+), 16 deletions(-) diff --git a/cmd/datool/datool.go b/cmd/datool/datool.go index 06f94dc952..cb8507593c 100644 --- a/cmd/datool/datool.go +++ b/cmd/datool/datool.go @@ -92,6 +92,7 @@ type ClientStoreConfig struct { SigningWallet string `koanf:"signing-wallet"` SigningWalletPassword string `koanf:"signing-wallet-password"` MaxStoreChunkBodySize int `koanf:"max-store-chunk-body-size"` + UseLegacyStore bool `koanf:"use-legacy-store"` } func parseClientStoreConfig(args []string) (*ClientStoreConfig, error) { @@ -104,6 +105,7 @@ func parseClientStoreConfig(args []string) (*ClientStoreConfig, error) { f.String("signing-wallet-password", genericconf.PASSWORD_NOT_SET, "password to unlock the wallet, if not specified the user is prompted for the password") f.Duration("das-retention-period", 24*time.Hour, "The period which DASes are requested to retain the stored batches.") f.Int("max-store-chunk-body-size", 512*1024, "The maximum HTTP POST body size for a chunked store request") + f.Bool("use-legacy-store", false, "enabling this forces the das rpc clients to use das_store. Disabled by default") k, err := confighelpers.BeginCommonParse(f, args) if err != nil { @@ -152,7 +154,7 @@ func startClientStore(args []string) error { } } - client, err := das.NewDASRPCClient(config.URL, signer, config.MaxStoreChunkBodySize) + client, err := das.NewDASRPCClient(config.URL, signer, config.MaxStoreChunkBodySize, config.UseLegacyStore) if err != nil { return err } diff --git a/das/aggregator.go b/das/aggregator.go index 372e448e76..99cc2d58b0 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -41,12 +41,14 @@ type AggregatorConfig struct { AssumedHonest int `koanf:"assumed-honest"` Backends BackendConfigList `koanf:"backends"` MaxStoreChunkBodySize int `koanf:"max-store-chunk-body-size"` + UseLegacyStore bool `koanf:"use-legacy-store"` } var DefaultAggregatorConfig = AggregatorConfig{ AssumedHonest: 0, Backends: nil, MaxStoreChunkBodySize: 512 * 1024, + UseLegacyStore: false, } var parsedBackendsConf BackendConfigList @@ -56,6 +58,7 @@ func AggregatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".assumed-honest", DefaultAggregatorConfig.AssumedHonest, "Number of assumed honest backends (H). If there are N backends, K=N+1-H valid responses are required to consider an Store request to be successful.") f.Var(&parsedBackendsConf, prefix+".backends", "JSON RPC backend configuration. This can be specified on the command line as a JSON array, eg: [{\"url\": \"...\", \"pubkey\": \"...\"},...], or as a JSON array in the config file.") f.Int(prefix+".max-store-chunk-body-size", DefaultAggregatorConfig.MaxStoreChunkBodySize, "maximum HTTP POST body size to use for individual batch chunks, including JSON RPC overhead and an estimated overhead of 512B of headers") + f.Bool(prefix+".use-legacy-store", DefaultAggregatorConfig.UseLegacyStore, "enabling this forces the das rpc clients to use das_store. Disabled by default") } type Aggregator struct { diff --git a/das/dasRpcClient.go b/das/dasRpcClient.go index 3ea6c4e2c6..37c3c30220 100644 --- a/das/dasRpcClient.go +++ b/das/dasRpcClient.go @@ -35,10 +35,11 @@ var ( ) type DASRPCClient struct { // implements DataAvailabilityService - clnt *rpc.Client - url string - signer signature.DataSignerFunc - chunkSize uint64 + clnt *rpc.Client + url string + signer signature.DataSignerFunc + chunkSize uint64 + useLegacyStore bool } func nilSigner(_ []byte) ([]byte, error) { @@ -47,7 +48,7 @@ func nilSigner(_ []byte) ([]byte, error) { const sendChunkJSONBoilerplate = "{\"jsonrpc\":\"2.0\",\"id\":4294967295,\"method\":\"das_sendChunked\",\"params\":[\"\"]}" -func NewDASRPCClient(target string, signer signature.DataSignerFunc, maxStoreChunkBodySize int) (*DASRPCClient, error) { +func NewDASRPCClient(target string, signer signature.DataSignerFunc, maxStoreChunkBodySize int, useLegacyStore bool) (*DASRPCClient, error) { clnt, err := rpc.Dial(target) if err != nil { return nil, err @@ -56,18 +57,23 @@ func NewDASRPCClient(target string, signer signature.DataSignerFunc, maxStoreChu signer = nilSigner } + client := &DASRPCClient{ + clnt: clnt, + url: target, + signer: signer, + useLegacyStore: useLegacyStore, + } + // Byte arrays are encoded in base64 - chunkSize := (maxStoreChunkBodySize - len(sendChunkJSONBoilerplate) - 512 /* headers */) / 2 - if chunkSize <= 0 { - return nil, fmt.Errorf("max-store-chunk-body-size %d doesn't leave enough room for chunk payload", maxStoreChunkBodySize) + if !useLegacyStore { + chunkSize := (maxStoreChunkBodySize - len(sendChunkJSONBoilerplate) - 512 /* headers */) / 2 + if chunkSize <= 0 { + return nil, fmt.Errorf("max-store-chunk-body-size %d doesn't leave enough room for chunk payload", maxStoreChunkBodySize) + } + client.chunkSize = uint64(chunkSize) } - return &DASRPCClient{ - clnt: clnt, - url: target, - signer: signer, - chunkSize: uint64(chunkSize), - }, nil + return client, nil } func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64) (*daprovider.DataAvailabilityCertificate, error) { @@ -83,6 +89,11 @@ func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64 rpcClientStoreDurationHistogram.Update(time.Since(start).Nanoseconds()) }() + if c.useLegacyStore { + log.Info("Legacy store is being force-used by the DAS client", "url", c.url) + return c.legacyStore(ctx, message, timeout) + } + // #nosec G115 timestamp := uint64(start.Unix()) nChunks := uint64(len(message)) / c.chunkSize diff --git a/das/rpc_aggregator.go b/das/rpc_aggregator.go index 916637aac6..6869e140f1 100644 --- a/das/rpc_aggregator.go +++ b/das/rpc_aggregator.go @@ -110,7 +110,7 @@ func ParseServices(config AggregatorConfig, signer signature.DataSignerFunc) ([] } metricName := metricsutil.CanonicalizeMetricName(url.Hostname()) - service, err := NewDASRPCClient(b.URL, signer, config.MaxStoreChunkBodySize) + service, err := NewDASRPCClient(b.URL, signer, config.MaxStoreChunkBodySize, config.UseLegacyStore) if err != nil { return nil, err } From a391c6352e8cb4eedef99cbb84046d048a62a85b Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Wed, 20 Nov 2024 09:50:58 +0530 Subject: [PATCH 2/6] address PR comments --- cmd/datool/datool.go | 6 +++--- das/aggregator.go | 6 +++--- das/dasRpcClient.go | 24 ++++++++++++------------ das/rpc_aggregator.go | 2 +- 4 files changed, 19 insertions(+), 19 deletions(-) diff --git a/cmd/datool/datool.go b/cmd/datool/datool.go index cb8507593c..fc186c76c4 100644 --- a/cmd/datool/datool.go +++ b/cmd/datool/datool.go @@ -92,7 +92,7 @@ type ClientStoreConfig struct { SigningWallet string `koanf:"signing-wallet"` SigningWalletPassword string `koanf:"signing-wallet-password"` MaxStoreChunkBodySize int `koanf:"max-store-chunk-body-size"` - UseLegacyStore bool `koanf:"use-legacy-store"` + DisableChunkedStore bool `koanf:"disable-chunked-store"` } func parseClientStoreConfig(args []string) (*ClientStoreConfig, error) { @@ -105,7 +105,7 @@ func parseClientStoreConfig(args []string) (*ClientStoreConfig, error) { f.String("signing-wallet-password", genericconf.PASSWORD_NOT_SET, "password to unlock the wallet, if not specified the user is prompted for the password") f.Duration("das-retention-period", 24*time.Hour, "The period which DASes are requested to retain the stored batches.") f.Int("max-store-chunk-body-size", 512*1024, "The maximum HTTP POST body size for a chunked store request") - f.Bool("use-legacy-store", false, "enabling this forces the das rpc clients to use das_store. Disabled by default") + f.Bool("disable-chunked-store", false, "force data to always be sent to DAS all at once instead of splitting into chunks. Disabled by default") k, err := confighelpers.BeginCommonParse(f, args) if err != nil { @@ -154,7 +154,7 @@ func startClientStore(args []string) error { } } - client, err := das.NewDASRPCClient(config.URL, signer, config.MaxStoreChunkBodySize, config.UseLegacyStore) + client, err := das.NewDASRPCClient(config.URL, signer, config.MaxStoreChunkBodySize, config.DisableChunkedStore) if err != nil { return err } diff --git a/das/aggregator.go b/das/aggregator.go index 99cc2d58b0..3797922bb5 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -41,14 +41,14 @@ type AggregatorConfig struct { AssumedHonest int `koanf:"assumed-honest"` Backends BackendConfigList `koanf:"backends"` MaxStoreChunkBodySize int `koanf:"max-store-chunk-body-size"` - UseLegacyStore bool `koanf:"use-legacy-store"` + DisableChunkedStore bool `koanf:"disable-chunked-store"` } var DefaultAggregatorConfig = AggregatorConfig{ AssumedHonest: 0, Backends: nil, MaxStoreChunkBodySize: 512 * 1024, - UseLegacyStore: false, + DisableChunkedStore: false, } var parsedBackendsConf BackendConfigList @@ -58,7 +58,7 @@ func AggregatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".assumed-honest", DefaultAggregatorConfig.AssumedHonest, "Number of assumed honest backends (H). If there are N backends, K=N+1-H valid responses are required to consider an Store request to be successful.") f.Var(&parsedBackendsConf, prefix+".backends", "JSON RPC backend configuration. This can be specified on the command line as a JSON array, eg: [{\"url\": \"...\", \"pubkey\": \"...\"},...], or as a JSON array in the config file.") f.Int(prefix+".max-store-chunk-body-size", DefaultAggregatorConfig.MaxStoreChunkBodySize, "maximum HTTP POST body size to use for individual batch chunks, including JSON RPC overhead and an estimated overhead of 512B of headers") - f.Bool(prefix+".use-legacy-store", DefaultAggregatorConfig.UseLegacyStore, "enabling this forces the das rpc clients to use das_store. Disabled by default") + f.Bool(prefix+".disable-chunked-store", DefaultAggregatorConfig.DisableChunkedStore, "force data to always be sent to DAS all at once instead of splitting into chunks. Disabled by default") } type Aggregator struct { diff --git a/das/dasRpcClient.go b/das/dasRpcClient.go index 37c3c30220..cd4ed078f4 100644 --- a/das/dasRpcClient.go +++ b/das/dasRpcClient.go @@ -35,11 +35,11 @@ var ( ) type DASRPCClient struct { // implements DataAvailabilityService - clnt *rpc.Client - url string - signer signature.DataSignerFunc - chunkSize uint64 - useLegacyStore bool + clnt *rpc.Client + url string + signer signature.DataSignerFunc + chunkSize uint64 + disableChunkedStore bool } func nilSigner(_ []byte) ([]byte, error) { @@ -48,7 +48,7 @@ func nilSigner(_ []byte) ([]byte, error) { const sendChunkJSONBoilerplate = "{\"jsonrpc\":\"2.0\",\"id\":4294967295,\"method\":\"das_sendChunked\",\"params\":[\"\"]}" -func NewDASRPCClient(target string, signer signature.DataSignerFunc, maxStoreChunkBodySize int, useLegacyStore bool) (*DASRPCClient, error) { +func NewDASRPCClient(target string, signer signature.DataSignerFunc, maxStoreChunkBodySize int, disableChunkedStore bool) (*DASRPCClient, error) { clnt, err := rpc.Dial(target) if err != nil { return nil, err @@ -58,14 +58,14 @@ func NewDASRPCClient(target string, signer signature.DataSignerFunc, maxStoreChu } client := &DASRPCClient{ - clnt: clnt, - url: target, - signer: signer, - useLegacyStore: useLegacyStore, + clnt: clnt, + url: target, + signer: signer, + disableChunkedStore: disableChunkedStore, } // Byte arrays are encoded in base64 - if !useLegacyStore { + if !disableChunkedStore { chunkSize := (maxStoreChunkBodySize - len(sendChunkJSONBoilerplate) - 512 /* headers */) / 2 if chunkSize <= 0 { return nil, fmt.Errorf("max-store-chunk-body-size %d doesn't leave enough room for chunk payload", maxStoreChunkBodySize) @@ -89,7 +89,7 @@ func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64 rpcClientStoreDurationHistogram.Update(time.Since(start).Nanoseconds()) }() - if c.useLegacyStore { + if c.disableChunkedStore { log.Info("Legacy store is being force-used by the DAS client", "url", c.url) return c.legacyStore(ctx, message, timeout) } diff --git a/das/rpc_aggregator.go b/das/rpc_aggregator.go index 6869e140f1..91fdc07b45 100644 --- a/das/rpc_aggregator.go +++ b/das/rpc_aggregator.go @@ -110,7 +110,7 @@ func ParseServices(config AggregatorConfig, signer signature.DataSignerFunc) ([] } metricName := metricsutil.CanonicalizeMetricName(url.Hostname()) - service, err := NewDASRPCClient(b.URL, signer, config.MaxStoreChunkBodySize, config.UseLegacyStore) + service, err := NewDASRPCClient(b.URL, signer, config.MaxStoreChunkBodySize, config.DisableChunkedStore) if err != nil { return nil, err } From 0b31ccb531b3eed3255749b3b30d911082201c4c Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Mon, 25 Nov 2024 10:50:29 +0530 Subject: [PATCH 3/6] address PR comments --- cmd/datool/datool.go | 2 +- das/aggregator.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/datool/datool.go b/cmd/datool/datool.go index fc186c76c4..9cc2f5ebd9 100644 --- a/cmd/datool/datool.go +++ b/cmd/datool/datool.go @@ -105,7 +105,7 @@ func parseClientStoreConfig(args []string) (*ClientStoreConfig, error) { f.String("signing-wallet-password", genericconf.PASSWORD_NOT_SET, "password to unlock the wallet, if not specified the user is prompted for the password") f.Duration("das-retention-period", 24*time.Hour, "The period which DASes are requested to retain the stored batches.") f.Int("max-store-chunk-body-size", 512*1024, "The maximum HTTP POST body size for a chunked store request") - f.Bool("disable-chunked-store", false, "force data to always be sent to DAS all at once instead of splitting into chunks. Disabled by default") + f.Bool("disable-chunked-store", false, "force data to always be sent to DAS all at once instead of splitting into chunks") k, err := confighelpers.BeginCommonParse(f, args) if err != nil { diff --git a/das/aggregator.go b/das/aggregator.go index 3797922bb5..44f1568272 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -58,7 +58,7 @@ func AggregatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".assumed-honest", DefaultAggregatorConfig.AssumedHonest, "Number of assumed honest backends (H). If there are N backends, K=N+1-H valid responses are required to consider an Store request to be successful.") f.Var(&parsedBackendsConf, prefix+".backends", "JSON RPC backend configuration. This can be specified on the command line as a JSON array, eg: [{\"url\": \"...\", \"pubkey\": \"...\"},...], or as a JSON array in the config file.") f.Int(prefix+".max-store-chunk-body-size", DefaultAggregatorConfig.MaxStoreChunkBodySize, "maximum HTTP POST body size to use for individual batch chunks, including JSON RPC overhead and an estimated overhead of 512B of headers") - f.Bool(prefix+".disable-chunked-store", DefaultAggregatorConfig.DisableChunkedStore, "force data to always be sent to DAS all at once instead of splitting into chunks. Disabled by default") + f.Bool(prefix+".disable-chunked-store", DefaultAggregatorConfig.DisableChunkedStore, "force data to always be sent to DAS all at once instead of splitting into chunks") } type Aggregator struct { From e89533b377e392ba6aa5b78e90bdf37b921bd7a4 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Mon, 9 Dec 2024 16:25:57 -0600 Subject: [PATCH 4/6] address PR comments --- cmd/datool/datool.go | 6 +++--- das/aggregator.go | 6 +++--- das/dasRpcClient.go | 24 ++++++++++++------------ das/rpc_aggregator.go | 2 +- 4 files changed, 19 insertions(+), 19 deletions(-) diff --git a/cmd/datool/datool.go b/cmd/datool/datool.go index 9cc2f5ebd9..7ff82be229 100644 --- a/cmd/datool/datool.go +++ b/cmd/datool/datool.go @@ -92,7 +92,7 @@ type ClientStoreConfig struct { SigningWallet string `koanf:"signing-wallet"` SigningWalletPassword string `koanf:"signing-wallet-password"` MaxStoreChunkBodySize int `koanf:"max-store-chunk-body-size"` - DisableChunkedStore bool `koanf:"disable-chunked-store"` + EnableChunkedStore bool `koanf:"enable-chunked-store"` } func parseClientStoreConfig(args []string) (*ClientStoreConfig, error) { @@ -105,7 +105,7 @@ func parseClientStoreConfig(args []string) (*ClientStoreConfig, error) { f.String("signing-wallet-password", genericconf.PASSWORD_NOT_SET, "password to unlock the wallet, if not specified the user is prompted for the password") f.Duration("das-retention-period", 24*time.Hour, "The period which DASes are requested to retain the stored batches.") f.Int("max-store-chunk-body-size", 512*1024, "The maximum HTTP POST body size for a chunked store request") - f.Bool("disable-chunked-store", false, "force data to always be sent to DAS all at once instead of splitting into chunks") + f.Bool("enable-chunked-store", true, "force data to always be sent to DAS all at once instead of splitting into chunks") k, err := confighelpers.BeginCommonParse(f, args) if err != nil { @@ -154,7 +154,7 @@ func startClientStore(args []string) error { } } - client, err := das.NewDASRPCClient(config.URL, signer, config.MaxStoreChunkBodySize, config.DisableChunkedStore) + client, err := das.NewDASRPCClient(config.URL, signer, config.MaxStoreChunkBodySize, config.EnableChunkedStore) if err != nil { return err } diff --git a/das/aggregator.go b/das/aggregator.go index 44f1568272..46ca89415d 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -41,14 +41,14 @@ type AggregatorConfig struct { AssumedHonest int `koanf:"assumed-honest"` Backends BackendConfigList `koanf:"backends"` MaxStoreChunkBodySize int `koanf:"max-store-chunk-body-size"` - DisableChunkedStore bool `koanf:"disable-chunked-store"` + EnableChunkedStore bool `koanf:"enable-chunked-store"` } var DefaultAggregatorConfig = AggregatorConfig{ AssumedHonest: 0, Backends: nil, MaxStoreChunkBodySize: 512 * 1024, - DisableChunkedStore: false, + EnableChunkedStore: true, } var parsedBackendsConf BackendConfigList @@ -58,7 +58,7 @@ func AggregatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".assumed-honest", DefaultAggregatorConfig.AssumedHonest, "Number of assumed honest backends (H). If there are N backends, K=N+1-H valid responses are required to consider an Store request to be successful.") f.Var(&parsedBackendsConf, prefix+".backends", "JSON RPC backend configuration. This can be specified on the command line as a JSON array, eg: [{\"url\": \"...\", \"pubkey\": \"...\"},...], or as a JSON array in the config file.") f.Int(prefix+".max-store-chunk-body-size", DefaultAggregatorConfig.MaxStoreChunkBodySize, "maximum HTTP POST body size to use for individual batch chunks, including JSON RPC overhead and an estimated overhead of 512B of headers") - f.Bool(prefix+".disable-chunked-store", DefaultAggregatorConfig.DisableChunkedStore, "force data to always be sent to DAS all at once instead of splitting into chunks") + f.Bool(prefix+".enable-chunked-store", DefaultAggregatorConfig.EnableChunkedStore, "force data to always be sent to DAS all at once instead of splitting into chunks") } type Aggregator struct { diff --git a/das/dasRpcClient.go b/das/dasRpcClient.go index cd4ed078f4..aaa26a3aa9 100644 --- a/das/dasRpcClient.go +++ b/das/dasRpcClient.go @@ -35,11 +35,11 @@ var ( ) type DASRPCClient struct { // implements DataAvailabilityService - clnt *rpc.Client - url string - signer signature.DataSignerFunc - chunkSize uint64 - disableChunkedStore bool + clnt *rpc.Client + url string + signer signature.DataSignerFunc + chunkSize uint64 + enableChunkedStore bool } func nilSigner(_ []byte) ([]byte, error) { @@ -48,7 +48,7 @@ func nilSigner(_ []byte) ([]byte, error) { const sendChunkJSONBoilerplate = "{\"jsonrpc\":\"2.0\",\"id\":4294967295,\"method\":\"das_sendChunked\",\"params\":[\"\"]}" -func NewDASRPCClient(target string, signer signature.DataSignerFunc, maxStoreChunkBodySize int, disableChunkedStore bool) (*DASRPCClient, error) { +func NewDASRPCClient(target string, signer signature.DataSignerFunc, maxStoreChunkBodySize int, enableChunkedStore bool) (*DASRPCClient, error) { clnt, err := rpc.Dial(target) if err != nil { return nil, err @@ -58,14 +58,14 @@ func NewDASRPCClient(target string, signer signature.DataSignerFunc, maxStoreChu } client := &DASRPCClient{ - clnt: clnt, - url: target, - signer: signer, - disableChunkedStore: disableChunkedStore, + clnt: clnt, + url: target, + signer: signer, + enableChunkedStore: enableChunkedStore, } // Byte arrays are encoded in base64 - if !disableChunkedStore { + if enableChunkedStore { chunkSize := (maxStoreChunkBodySize - len(sendChunkJSONBoilerplate) - 512 /* headers */) / 2 if chunkSize <= 0 { return nil, fmt.Errorf("max-store-chunk-body-size %d doesn't leave enough room for chunk payload", maxStoreChunkBodySize) @@ -89,7 +89,7 @@ func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64 rpcClientStoreDurationHistogram.Update(time.Since(start).Nanoseconds()) }() - if c.disableChunkedStore { + if !c.enableChunkedStore { log.Info("Legacy store is being force-used by the DAS client", "url", c.url) return c.legacyStore(ctx, message, timeout) } diff --git a/das/rpc_aggregator.go b/das/rpc_aggregator.go index 91fdc07b45..1c9e2eecab 100644 --- a/das/rpc_aggregator.go +++ b/das/rpc_aggregator.go @@ -110,7 +110,7 @@ func ParseServices(config AggregatorConfig, signer signature.DataSignerFunc) ([] } metricName := metricsutil.CanonicalizeMetricName(url.Hostname()) - service, err := NewDASRPCClient(b.URL, signer, config.MaxStoreChunkBodySize, config.DisableChunkedStore) + service, err := NewDASRPCClient(b.URL, signer, config.MaxStoreChunkBodySize, config.EnableChunkedStore) if err != nil { return nil, err } From 3f7dd3e035caa9b2847f2bb79197d53df91a68d0 Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Mon, 9 Dec 2024 17:21:46 -0600 Subject: [PATCH 5/6] fix failing tests --- das/aggregator_test.go | 4 ++-- das/rpc_test.go | 1 + system_tests/das_test.go | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/das/aggregator_test.go b/das/aggregator_test.go index 217315eef0..b14c2961ce 100644 --- a/das/aggregator_test.go +++ b/das/aggregator_test.go @@ -50,7 +50,7 @@ func TestDAS_BasicAggregationLocal(t *testing.T) { backends = append(backends, *details) } - aggregator, err := NewAggregator(ctx, DataAvailabilityConfig{RPCAggregator: AggregatorConfig{AssumedHonest: 1}, ParentChainNodeURL: "none"}, backends) + aggregator, err := NewAggregator(ctx, DataAvailabilityConfig{RPCAggregator: AggregatorConfig{AssumedHonest: 1, EnableChunkedStore: true}, ParentChainNodeURL: "none"}, backends) Require(t, err) rawMsg := []byte("It's time for you to see the fnords.") @@ -207,7 +207,7 @@ func testConfigurableStorageFailures(t *testing.T, shouldFailAggregation bool) { aggregator, err := NewAggregator( ctx, DataAvailabilityConfig{ - RPCAggregator: AggregatorConfig{AssumedHonest: assumedHonest}, + RPCAggregator: AggregatorConfig{AssumedHonest: assumedHonest, EnableChunkedStore: true}, ParentChainNodeURL: "none", RequestTimeout: time.Millisecond * 2000, }, backends) diff --git a/das/rpc_test.go b/das/rpc_test.go index ebc4b736d5..c4ee71aa4f 100644 --- a/das/rpc_test.go +++ b/das/rpc_test.go @@ -84,6 +84,7 @@ func testRpcImpl(t *testing.T, size, times int, concurrent bool) { AssumedHonest: 1, Backends: beConfigs, MaxStoreChunkBodySize: (chunkSize * 2) + len(sendChunkJSONBoilerplate), + EnableChunkedStore: true, }, RequestTimeout: time.Minute, } diff --git a/system_tests/das_test.go b/system_tests/das_test.go index 52703c879d..ba50dcfff2 100644 --- a/system_tests/das_test.go +++ b/system_tests/das_test.go @@ -90,6 +90,7 @@ func aggConfigForBackend(backendConfig das.BackendConfig) das.AggregatorConfig { AssumedHonest: 1, Backends: das.BackendConfigList{backendConfig}, MaxStoreChunkBodySize: 512 * 1024, + EnableChunkedStore: true, } } From 296314bf92d7f230ac368c2513a3aa7bdf1331ff Mon Sep 17 00:00:00 2001 From: Ganesh Vanahalli Date: Tue, 10 Dec 2024 09:19:14 -0600 Subject: [PATCH 6/6] address PR comment --- cmd/datool/datool.go | 2 +- das/aggregator.go | 2 +- das/dasRpcClient.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/datool/datool.go b/cmd/datool/datool.go index 7ff82be229..67998880e0 100644 --- a/cmd/datool/datool.go +++ b/cmd/datool/datool.go @@ -105,7 +105,7 @@ func parseClientStoreConfig(args []string) (*ClientStoreConfig, error) { f.String("signing-wallet-password", genericconf.PASSWORD_NOT_SET, "password to unlock the wallet, if not specified the user is prompted for the password") f.Duration("das-retention-period", 24*time.Hour, "The period which DASes are requested to retain the stored batches.") f.Int("max-store-chunk-body-size", 512*1024, "The maximum HTTP POST body size for a chunked store request") - f.Bool("enable-chunked-store", true, "force data to always be sent to DAS all at once instead of splitting into chunks") + f.Bool("enable-chunked-store", true, "enable data to be sent to DAS in chunks instead of all at once") k, err := confighelpers.BeginCommonParse(f, args) if err != nil { diff --git a/das/aggregator.go b/das/aggregator.go index 46ca89415d..d6922fced8 100644 --- a/das/aggregator.go +++ b/das/aggregator.go @@ -58,7 +58,7 @@ func AggregatorConfigAddOptions(prefix string, f *flag.FlagSet) { f.Int(prefix+".assumed-honest", DefaultAggregatorConfig.AssumedHonest, "Number of assumed honest backends (H). If there are N backends, K=N+1-H valid responses are required to consider an Store request to be successful.") f.Var(&parsedBackendsConf, prefix+".backends", "JSON RPC backend configuration. This can be specified on the command line as a JSON array, eg: [{\"url\": \"...\", \"pubkey\": \"...\"},...], or as a JSON array in the config file.") f.Int(prefix+".max-store-chunk-body-size", DefaultAggregatorConfig.MaxStoreChunkBodySize, "maximum HTTP POST body size to use for individual batch chunks, including JSON RPC overhead and an estimated overhead of 512B of headers") - f.Bool(prefix+".enable-chunked-store", DefaultAggregatorConfig.EnableChunkedStore, "force data to always be sent to DAS all at once instead of splitting into chunks") + f.Bool(prefix+".enable-chunked-store", DefaultAggregatorConfig.EnableChunkedStore, "enable data to be sent to DAS in chunks instead of all at once") } type Aggregator struct { diff --git a/das/dasRpcClient.go b/das/dasRpcClient.go index aaa26a3aa9..5d4ca0dc93 100644 --- a/das/dasRpcClient.go +++ b/das/dasRpcClient.go @@ -90,7 +90,7 @@ func (c *DASRPCClient) Store(ctx context.Context, message []byte, timeout uint64 }() if !c.enableChunkedStore { - log.Info("Legacy store is being force-used by the DAS client", "url", c.url) + log.Debug("Legacy store is being force-used by the DAS client", "url", c.url) return c.legacyStore(ctx, message, timeout) }