Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add ds max chann entry config #1597

Merged
merged 2 commits into from
Jan 10, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions cmd/utils/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -415,6 +415,11 @@ var (
Usage: "L2 datastreamer endpoint",
Value: "",
}
L2DataStreamerMaxEntryChanFlag = cli.Uint64Flag{
Name: "zkevm.l2-datastreamer-max-entrychan",
Usage: "L2 datastreamer max entry channel size",
Value: 1000000,
}
L2DataStreamerUseTLSFlag = cli.BoolFlag{
Name: "zkevm.l2-datastreamer-use-tls",
Usage: "Use TLS connection to L2 datastreamer endpoint",
Expand Down
2 changes: 1 addition & 1 deletion eth/backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -1302,7 +1302,7 @@ func newEtherMan(cfg *ethconfig.Config, l2ChainName, url string) *etherman.Clien

// creates a datastream client with default parameters
func initDataStreamClient(ctx context.Context, cfg *ethconfig.Zk, latestForkId uint16) *client.StreamClient {
return client.NewClient(ctx, cfg.L2DataStreamerUrl, cfg.L2DataStreamerUseTLS, cfg.DatastreamVersion, cfg.L2DataStreamerTimeout, latestForkId)
return client.NewClient(ctx, cfg.L2DataStreamerUrl, cfg.L2DataStreamerUseTLS, cfg.DatastreamVersion, cfg.L2DataStreamerTimeout, latestForkId, cfg.L2DataStreamerMaxEntryChan)
}

func (s *Ethereum) Init(stack *node.Node, config *ethconfig.Config, chainConfig *chain.Config) error {
Expand Down
1 change: 1 addition & 0 deletions eth/ethconfig/config_zkevm.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ type Zk struct {
L2ChainId uint64
L2RpcUrl string
L2DataStreamerUrl string
L2DataStreamerMaxEntryChan uint64
L2DataStreamerUseTLS bool
L2DataStreamerTimeout time.Duration
L2ShortCircuitToVerifiedBatch bool
Expand Down
1 change: 1 addition & 0 deletions turbo/cli/default_flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,7 @@ var DefaultFlags = []cli.Flag{
&utils.L2ChainIdFlag,
&utils.L2RpcUrlFlag,
&utils.L2DataStreamerUrlFlag,
&utils.L2DataStreamerMaxEntryChanFlag,
&utils.L2DataStreamerUseTLSFlag,
&utils.L2DataStreamerTimeout,
&utils.L2ShortCircuitToVerifiedBatchFlag,
Expand Down
1 change: 1 addition & 0 deletions turbo/cli/flags_zkevm.go
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,7 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) {
L2ChainId: ctx.Uint64(utils.L2ChainIdFlag.Name),
L2RpcUrl: ctx.String(utils.L2RpcUrlFlag.Name),
L2DataStreamerUrl: ctx.String(utils.L2DataStreamerUrlFlag.Name),
L2DataStreamerMaxEntryChan: ctx.Uint64(utils.L2DataStreamerMaxEntryChanFlag.Name),
L2DataStreamerUseTLS: ctx.Bool(utils.L2DataStreamerUseTLSFlag.Name),
L2DataStreamerTimeout: l2DataStreamTimeout,
L2ShortCircuitToVerifiedBatch: l2ShortCircuitToVerifiedBatchVal,
Expand Down
38 changes: 24 additions & 14 deletions zk/datastream/client/stream_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,8 @@ type EntityDefinition struct {
const (
versionProto = 2 // converted to proto
versionAddedBlockEnd = 3 // Added block end
entryChannelSize = 100000

DefaultEntryChannelSize = 100000
)

var (
Expand Down Expand Up @@ -58,7 +59,8 @@ type StreamClient struct {
stopReadingToChannel atomic.Bool

// Channels
entryChan chan interface{}
entryChan chan interface{}
maxEntryChanSize uint64

// keeps track of the latest fork from the stream to assign to l2 blocks
currentFork uint64
Expand Down Expand Up @@ -89,18 +91,19 @@ const (

// Creates a new client fo datastream
// server must be in format "url:port"
func NewClient(ctx context.Context, server string, useTLS bool, version int, checkTimeout time.Duration, latestDownloadedForkId uint16) *StreamClient {
func NewClient(ctx context.Context, server string, useTLS bool, version int, checkTimeout time.Duration, latestDownloadedForkId uint16, maxEntryChanSize uint64) *StreamClient {
c := &StreamClient{
ctx: ctx,
checkTimeout: checkTimeout,
server: server,
version: version,
streamType: StSequencer,
entryChan: make(chan interface{}, 100000),
currentFork: uint64(latestDownloadedForkId),
mtxStreaming: &sync.Mutex{},
useTLS: useTLS,
tlsConfig: &tls.Config{},
ctx: ctx,
checkTimeout: checkTimeout,
server: server,
version: version,
streamType: StSequencer,
entryChan: make(chan interface{}, 100000),
maxEntryChanSize: maxEntryChanSize,
currentFork: uint64(latestDownloadedForkId),
mtxStreaming: &sync.Mutex{},
useTLS: useTLS,
tlsConfig: &tls.Config{},
}

// Extract hostname from server address (removing port if present)
Expand Down Expand Up @@ -430,7 +433,14 @@ func (c *StreamClient) clearEntryCHannel() {
// close old entry chan and read all elements before opening a new one
func (c *StreamClient) RenewEntryChannel() {
c.clearEntryCHannel()
c.entryChan = make(chan interface{}, entryChannelSize)
c.entryChan = make(chan interface{}, DefaultEntryChannelSize)
}

// close old entry chan and read all elements before opening a new one
func (c *StreamClient) RenewMaxEntryChannel() {
c.clearEntryCHannel()
log.Warn(fmt.Sprintf("[datastream_client] Renewing max entry channel:%v", c.maxEntryChanSize))
c.entryChan = make(chan interface{}, c.maxEntryChanSize)
}

func (c *StreamClient) ReadAllEntriesToChannel() (err error) {
Expand Down
12 changes: 6 additions & 6 deletions zk/datastream/client/stream_client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ func TestStreamClientReadHeaderEntry(t *testing.T) {

for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
c := NewClient(context.Background(), "", false, 0, 500*time.Millisecond, 0)
c := NewClient(context.Background(), "", false, 0, 500*time.Millisecond, 0, DefaultEntryChannelSize)
server, conn := net.Pipe()
defer server.Close()
defer c.Stop()
Expand Down Expand Up @@ -118,7 +118,7 @@ func TestStreamClientReadResultEntry(t *testing.T) {

for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
c := NewClient(context.Background(), "", false, 0, 500*time.Millisecond, 0)
c := NewClient(context.Background(), "", false, 0, 500*time.Millisecond, 0, DefaultEntryChannelSize)
server, conn := net.Pipe()
defer server.Close()
defer c.Stop()
Expand Down Expand Up @@ -190,7 +190,7 @@ func TestStreamClientReadFileEntry(t *testing.T) {
}
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
c := NewClient(context.Background(), "", false, 0, 500*time.Millisecond, 0)
c := NewClient(context.Background(), "", false, 0, 500*time.Millisecond, 0, DefaultEntryChannelSize)
server, conn := net.Pipe()
defer server.Close()
defer c.Stop()
Expand All @@ -213,7 +213,7 @@ func TestStreamClientReadFileEntry(t *testing.T) {
}

func TestStreamClientReadParsedProto(t *testing.T) {
c := NewClient(context.Background(), "", false, 0, 500*time.Millisecond, 0)
c := NewClient(context.Background(), "", false, 0, 500*time.Millisecond, 0, DefaultEntryChannelSize)
serverConn, clientConn := net.Pipe()
c.conn = clientConn
c.checkTimeout = 1 * time.Second
Expand Down Expand Up @@ -285,7 +285,7 @@ func TestStreamClientGetLatestL2Block(t *testing.T) {
clientConn.Close()
}()

c := NewClient(context.Background(), "", false, 0, 500*time.Millisecond, 0)
c := NewClient(context.Background(), "", false, 0, 500*time.Millisecond, 0, DefaultEntryChannelSize)
c.conn = clientConn
c.checkTimeout = 1 * time.Second
c.allowStops = false
Expand Down Expand Up @@ -399,7 +399,7 @@ func TestStreamClientGetL2BlockByNumber(t *testing.T) {
clientConn.Close()
}()

c := NewClient(context.Background(), "", false, 0, 500*time.Millisecond, 0)
c := NewClient(context.Background(), "", false, 0, 500*time.Millisecond, 0, DefaultEntryChannelSize)
c.header = &types.HeaderEntry{
TotalEntries: 4,
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ func main() {
flag.StringVar(&stream2, "stream2", "", "the second stream to pull data from")
flag.Parse()

client1 := client.NewClient(ctx, stream1, false, 0, 0, 0)
client2 := client.NewClient(ctx, stream2, false, 0, 0, 0)
client1 := client.NewClient(ctx, stream1, false, 0, 0, 0, client.DefaultEntryChannelSize)
client2 := client.NewClient(ctx, stream2, false, 0, 0, 0, client.DefaultEntryChannelSize)

err := client1.Start()
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion zk/debug_tools/datastream-correctness-check/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ func main() {
}

// Create client
client := client.NewClient(ctx, cfg.Datastream, false, 3, 500, 0)
client := client.NewClient(ctx, cfg.Datastream, false, 3, 500, 0, client.DefaultEntryChannelSize)

// Start client (connect to the server)
defer client.Stop()
Expand Down
17 changes: 9 additions & 8 deletions zk/stages/stage_batches.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,28 +5,28 @@ import (
"errors"
"fmt"
"math/big"
"sync/atomic"
"time"
"os"
"sync/atomic"
"syscall"
"time"

"github.com/ledgerwatch/erigon-lib/chain"
"github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/kv"

"github.com/ledgerwatch/erigon/core/rawdb"
"github.com/ledgerwatch/erigon/core/state"
ethTypes "github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/eth/ethconfig"
"github.com/ledgerwatch/erigon/eth/stagedsync"
"github.com/ledgerwatch/erigon/eth/stagedsync/stages"
"github.com/ledgerwatch/erigon/params"
"github.com/ledgerwatch/erigon/zk"
"github.com/ledgerwatch/erigon/zk/datastream/client"
"github.com/ledgerwatch/erigon/zk/datastream/types"
"github.com/ledgerwatch/erigon/zk/erigon_db"
"github.com/ledgerwatch/erigon/zk/hermez_db"
"github.com/ledgerwatch/erigon/zk/sequencer"
"github.com/ledgerwatch/erigon/core/rawdb"
"github.com/ledgerwatch/erigon/core/state"
"github.com/ledgerwatch/erigon/eth/ethconfig"
"github.com/ledgerwatch/erigon/zk/datastream/client"
"github.com/ledgerwatch/log/v3"
)

Expand Down Expand Up @@ -60,6 +60,7 @@ type HermezDb interface {

type DatastreamClient interface {
RenewEntryChannel()
RenewMaxEntryChannel()
ReadAllEntriesToChannel() error
StopReadingToChannel()
GetEntryChan() *chan interface{}
Expand Down Expand Up @@ -281,7 +282,7 @@ func SpawnStageBatches(
// start routine to download blocks and push them in a channel
errorChan := make(chan struct{})
dsClientRunner := NewDatastreamClientRunner(dsQueryClient, logPrefix)
dsClientRunner.StartRead(errorChan)
dsClientRunner.StartRead(errorChan, highestDSL2Block-stageProgressBlockNo)
defer dsClientRunner.StopRead()

entryChan := dsQueryClient.GetEntryChan()
Expand Down Expand Up @@ -853,5 +854,5 @@ func getHighestDSL2Block(ctx context.Context, batchCfg BatchesCfg, latestFork ui

func buildNewStreamClient(ctx context.Context, batchesCfg BatchesCfg, latestFork uint16) *client.StreamClient {
cfg := batchesCfg.zkCfg
return client.NewClient(ctx, cfg.L2DataStreamerUrl, cfg.L2DataStreamerUseTLS, cfg.DatastreamVersion, cfg.L2DataStreamerTimeout, latestFork)
return client.NewClient(ctx, cfg.L2DataStreamerUrl, cfg.L2DataStreamerUseTLS, cfg.DatastreamVersion, cfg.L2DataStreamerTimeout, latestFork, client.DefaultEntryChannelSize)
}
15 changes: 12 additions & 3 deletions zk/stages/stage_batches_datastream.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,10 @@ import (
"fmt"
"math/rand"
"sync/atomic"
"time"

"github.com/ledgerwatch/erigon/zk/datastream/client"
"github.com/ledgerwatch/log/v3"
"time"
)

type DatastreamClientRunner struct {
Expand All @@ -23,7 +24,12 @@ func NewDatastreamClientRunner(dsClient DatastreamClient, logPrefix string) *Dat
}
}

func (r *DatastreamClientRunner) StartRead(errorChan chan struct{}) error {
func (r *DatastreamClientRunner) StartRead(errorChan chan struct{}, diffBlock uint64) error {
if diffBlock > client.DefaultEntryChannelSize {
r.dsClient.RenewMaxEntryChannel()
} else {
r.dsClient.RenewEntryChannel()
}
r.dsClient.RenewEntryChannel()
if r.isReading.Load() {
return fmt.Errorf("tried starting datastream client runner thread while another is running")
Expand All @@ -41,7 +47,10 @@ func (r *DatastreamClientRunner) StartRead(errorChan chan struct{}) error {
defer r.isReading.Store(false)

if err := r.dsClient.ReadAllEntriesToChannel(); err != nil {
time.Sleep(1 * time.Second)
log.Warn("Start to waiting for all entries to be processed before stopping...")
for len(*r.dsClient.GetEntryChan()) > 0 {
time.Sleep(1 * time.Second)
}
errorChan <- struct{}{}
log.Warn(fmt.Sprintf("[%s] Error downloading blocks from datastream", r.logPrefix), "error", err)
}
Expand Down
3 changes: 3 additions & 0 deletions zk/stages/test_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,9 @@ func (c *TestDatastreamClient) ReadAllEntriesToChannel() error {
func (c *TestDatastreamClient) RenewEntryChannel() {
}

func (c *TestDatastreamClient) RenewMaxEntryChannel() {
}

func (c *TestDatastreamClient) StopReadingToChannel() {
c.stopReadingToChannel.Store(true)
}
Expand Down
Loading