Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test: actors: manual CC onboarding and proving integration test #12017

Merged
merged 36 commits into from
Jun 6, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
36 commits
Select commit Hold shift + click to select a range
96b4593
remove client CLI
aarshkshah1992 May 16, 2024
03bd0a5
remove markets CLI from miner
aarshkshah1992 May 16, 2024
74dbabb
remove markets from all CLI
aarshkshah1992 May 16, 2024
2c4d1bd
remove client API
aarshkshah1992 May 16, 2024
ba3e26c
update go mod
aarshkshah1992 May 16, 2024
d1be22a
remove EnableMarkets flag
aarshkshah1992 May 16, 2024
5672c5f
remove market subsystem
aarshkshah1992 May 16, 2024
789848b
remove dagstore
aarshkshah1992 May 16, 2024
ee3a83d
remove index provider
aarshkshah1992 May 16, 2024
aea0b8f
remove graphsync and data-transfer
aarshkshah1992 May 17, 2024
5b09bbc
remove markets
aarshkshah1992 May 17, 2024
13a34c6
go mod tidy
aarshkshah1992 May 17, 2024
96579ff
fix cbor gen deps
aarshkshah1992 May 17, 2024
5596c96
remove deal making from config
aarshkshah1992 May 17, 2024
a0f0d0d
remove eol alert
aarshkshah1992 May 17, 2024
64133ec
Merge remote-tracking branch 'origin/master' into feat/remove-markets…
aarshkshah1992 May 17, 2024
a7728c3
Merge remote-tracking branch 'origin/master' into feat/remove-markets…
aarshkshah1992 May 20, 2024
84ba02d
go mod tidy
aarshkshah1992 May 20, 2024
86824d7
Merge remote-tracking branch 'origin/feat/remove-markets-client' into…
aarshkshah1992 May 20, 2024
0a44fce
Merge remote-tracking branch 'origin/master' into feat/remove-markets…
aarshkshah1992 May 22, 2024
149e0a7
update go mod
aarshkshah1992 May 22, 2024
42a0799
changes as per review
aarshkshah1992 May 30, 2024
31e45ca
merge master
aarshkshah1992 May 30, 2024
3ea92f9
Merge remote-tracking branch 'origin/feat/remove-markets-client' into…
aarshkshah1992 May 30, 2024
42743e7
make jen
aarshkshah1992 May 30, 2024
01b9ab6
changes as per review
aarshkshah1992 May 30, 2024
a6d82ae
test: actors: manual CC onboarding and proving integration test
rvagg May 20, 2024
4134be2
test: actors: manual CC onboarding itest with real proofs
rvagg May 22, 2024
789dea2
test: actors: fix lint issue, require proofs in CI
rvagg May 22, 2024
ebfb5f2
test: actors: rename real proofs test, fix dispute window wait
rvagg May 23, 2024
4134b11
feat: add TestUnmanagedMiner in the itest kit for non-storage managed…
rvagg May 27, 2024
ad651df
feat: test: improve UnmanagedMiner test harness
aarshkshah1992 May 27, 2024
1d5ed51
feat: test: MineBlocksMustPost can watch for >1 miners (#12063)
rvagg Jun 4, 2024
748c03a
Implement snap deals test for manual sector onboarding (#12066)
aarshkshah1992 Jun 6, 2024
119f877
Merge remote-tracking branch 'origin/main' into rvagg/manual_cc_itest
aarshkshah1992 Jun 6, 2024
a7c76a7
fix config
aarshkshah1992 Jun 6, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ jobs:
"itest-get_messages_in_ts": ["self-hosted", "linux", "x64", "xlarge"],
"itest-lite_migration": ["self-hosted", "linux", "x64", "xlarge"],
"itest-lookup_robust_address": ["self-hosted", "linux", "x64", "xlarge"],
"itest-manual_onboarding": ["self-hosted", "linux", "x64", "xlarge"],
"itest-mempool": ["self-hosted", "linux", "x64", "xlarge"],
"itest-mpool_msg_uuid": ["self-hosted", "linux", "x64", "xlarge"],
"itest-mpool_push_with_uuid": ["self-hosted", "linux", "x64", "xlarge"],
Expand Down Expand Up @@ -129,6 +130,7 @@ jobs:
"itest-deals",
"itest-direct_data_onboard_verified",
"itest-direct_data_onboard",
"itest-manual_onboarding",
"itest-net",
"itest-path_detach_redeclare",
"itest-path_type_filters",
Expand Down
133 changes: 104 additions & 29 deletions itests/kit/blockminer.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import (

"github.com/stretchr/testify/require"

"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-bitfield"
"github.com/filecoin-project/go-jsonrpc"
"github.com/filecoin-project/go-state-types/abi"
Expand All @@ -20,6 +21,7 @@ import (
"github.com/filecoin-project/go-state-types/dline"

"github.com/filecoin-project/lotus/api"
"github.com/filecoin-project/lotus/api/v1api"
"github.com/filecoin-project/lotus/chain/types"
"github.com/filecoin-project/lotus/miner"
)
Expand All @@ -29,11 +31,13 @@ type BlockMiner struct {
t *testing.T
miner *TestMiner

nextNulls int64
pause chan struct{}
unpause chan struct{}
wg sync.WaitGroup
cancel context.CancelFunc
nextNulls int64
postWatchMiners []address.Address
postWatchMinersLk sync.Mutex
pause chan struct{}
unpause chan struct{}
wg sync.WaitGroup
cancel context.CancelFunc
}

func NewBlockMiner(t *testing.T, miner *TestMiner) *BlockMiner {
Expand All @@ -46,19 +50,58 @@ func NewBlockMiner(t *testing.T, miner *TestMiner) *BlockMiner {
}
}

type minerDeadline struct {
addr address.Address
deadline dline.Info
}

type minerDeadlines []minerDeadline

func (mds minerDeadlines) CloseList() []abi.ChainEpoch {
var ret []abi.ChainEpoch
for _, md := range mds {
ret = append(ret, md.deadline.Last())
}
return ret
}

func (mds minerDeadlines) MinerStringList() []string {
var ret []string
for _, md := range mds {
ret = append(ret, md.addr.String())
}
return ret
}

// FilterByLast returns a new minerDeadlines with only the deadlines that have a Last() epoch
// greater than or equal to last.
func (mds minerDeadlines) FilterByLast(last abi.ChainEpoch) minerDeadlines {
var ret minerDeadlines
for _, md := range mds {
if last >= md.deadline.Last() {
ret = append(ret, md)
}
}
return ret
}

type partitionTracker struct {
minerAddr address.Address
partitions []api.Partition
posted bitfield.BitField
}

func newPartitionTracker(ctx context.Context, dlIdx uint64, bm *BlockMiner) *partitionTracker {
dlines, err := bm.miner.FullNode.StateMinerDeadlines(ctx, bm.miner.ActorAddr, types.EmptyTSK)
require.NoError(bm.t, err)
// newPartitionTracker creates a new partitionTracker that tracks the deadline index dlIdx for the
// given minerAddr. It uses the BlockMiner bm to interact with the chain.
func newPartitionTracker(ctx context.Context, t *testing.T, client v1api.FullNode, minerAddr address.Address, dlIdx uint64) *partitionTracker {
dlines, err := client.StateMinerDeadlines(ctx, minerAddr, types.EmptyTSK)
require.NoError(t, err)
dl := dlines[dlIdx]

parts, err := bm.miner.FullNode.StateMinerPartitions(ctx, bm.miner.ActorAddr, dlIdx, types.EmptyTSK)
require.NoError(bm.t, err)
parts, err := client.StateMinerPartitions(ctx, minerAddr, dlIdx, types.EmptyTSK)
require.NoError(t, err)
return &partitionTracker{
minerAddr: minerAddr,
partitions: parts,
posted: dl.PostSubmissions,
}
Expand All @@ -74,11 +117,11 @@ func (p *partitionTracker) done(t *testing.T) bool {
return uint64(len(p.partitions)) == p.count(t)
}

func (p *partitionTracker) recordIfPost(t *testing.T, bm *BlockMiner, msg *types.Message) (ret bool) {
func (p *partitionTracker) recordIfPost(t *testing.T, msg *types.Message) (ret bool) {
defer func() {
ret = p.done(t)
}()
if !(msg.To == bm.miner.ActorAddr) {
if !(msg.To == p.minerAddr) {
return
}
if msg.Method != builtin.MethodsMiner.SubmitWindowedPoSt {
Expand All @@ -92,19 +135,18 @@ func (p *partitionTracker) recordIfPost(t *testing.T, bm *BlockMiner, msg *types
return
}

func (bm *BlockMiner) forcePoSt(ctx context.Context, ts *types.TipSet, dlinfo *dline.Info) {

tracker := newPartitionTracker(ctx, dlinfo.Index, bm)
func (bm *BlockMiner) forcePoSt(ctx context.Context, ts *types.TipSet, minerAddr address.Address, dlinfo dline.Info) {
tracker := newPartitionTracker(ctx, bm.t, bm.miner.FullNode, minerAddr, dlinfo.Index)
if !tracker.done(bm.t) { // need to wait for post
bm.t.Logf("expect %d partitions proved but only see %d", len(tracker.partitions), tracker.count(bm.t))
poolEvts, err := bm.miner.FullNode.MpoolSub(ctx) //subscribe before checking pending so we don't miss any events
poolEvts, err := bm.miner.FullNode.MpoolSub(ctx) // subscribe before checking pending so we don't miss any events
require.NoError(bm.t, err)

// First check pending messages we'll mine this epoch
msgs, err := bm.miner.FullNode.MpoolPending(ctx, types.EmptyTSK)
require.NoError(bm.t, err)
for _, msg := range msgs {
if tracker.recordIfPost(bm.t, bm, &msg.Message) {
if tracker.recordIfPost(bm.t, &msg.Message) {
fmt.Printf("found post in mempool pending\n")
}
}
Expand All @@ -114,13 +156,13 @@ func (bm *BlockMiner) forcePoSt(ctx context.Context, ts *types.TipSet, dlinfo *d
msgs, err := bm.miner.FullNode.ChainGetBlockMessages(ctx, bc)
require.NoError(bm.t, err)
for _, msg := range msgs.BlsMessages {
if tracker.recordIfPost(bm.t, bm, msg) {
if tracker.recordIfPost(bm.t, msg) {
fmt.Printf("found post in message of prev tipset\n")
}

}
for _, msg := range msgs.SecpkMessages {
if tracker.recordIfPost(bm.t, bm, &msg.Message) {
if tracker.recordIfPost(bm.t, &msg.Message) {
fmt.Printf("found post in message of prev tipset\n")
}
}
Expand All @@ -139,7 +181,7 @@ func (bm *BlockMiner) forcePoSt(ctx context.Context, ts *types.TipSet, dlinfo *d
bm.t.Logf("pool event: %d", evt.Type)
if evt.Type == api.MpoolAdd {
bm.t.Logf("incoming message %v", evt.Message)
if tracker.recordIfPost(bm.t, bm, &evt.Message.Message) {
if tracker.recordIfPost(bm.t, &evt.Message.Message) {
fmt.Printf("found post in mempool evt\n")
break POOL
}
Expand All @@ -151,11 +193,24 @@ func (bm *BlockMiner) forcePoSt(ctx context.Context, ts *types.TipSet, dlinfo *d
}
}

// WatchMinerForPost adds a miner to the list of miners that the BlockMiner will watch for window
// post submissions when using MineBlocksMustPost. This is useful when we have more than just the
// BlockMiner submitting posts, particularly in the case of UnmanagedMiners which don't participate
// in block mining.
func (bm *BlockMiner) WatchMinerForPost(minerAddr address.Address) {
bm.postWatchMinersLk.Lock()
bm.postWatchMiners = append(bm.postWatchMiners, minerAddr)
bm.postWatchMinersLk.Unlock()
}

// Like MineBlocks but refuses to mine until the window post scheduler has wdpost messages in the mempool
// and everything shuts down if a post fails. It also enforces that every block mined succeeds
func (bm *BlockMiner) MineBlocksMustPost(ctx context.Context, blocktime time.Duration) {
time.Sleep(time.Second)

// watch for our own window posts
bm.WatchMinerForPost(bm.miner.ActorAddr)

// wrap context in a cancellable context.
ctx, bm.cancel = context.WithCancel(ctx)
bm.wg.Add(1)
Expand All @@ -182,11 +237,25 @@ func (bm *BlockMiner) MineBlocksMustPost(ctx context.Context, blocktime time.Dur
ts, err := bm.miner.FullNode.ChainHead(ctx)
require.NoError(bm.t, err)

dlinfo, err := bm.miner.FullNode.StateMinerProvingDeadline(ctx, bm.miner.ActorAddr, ts.Key())
require.NoError(bm.t, err)
if ts.Height()+5+abi.ChainEpoch(nulls) >= dlinfo.Last() { // Next block brings us past the last epoch in dline, we need to wait for miner to post
bm.t.Logf("forcing post to get in before deadline closes at %d", dlinfo.Last())
bm.forcePoSt(ctx, ts, dlinfo)
// Get current deadline information for all miners, then filter by the ones that are about to
// close so we can force a post for them.
bm.postWatchMinersLk.Lock()
var impendingDeadlines minerDeadlines
for _, minerAddr := range bm.postWatchMiners {
dlinfo, err := bm.miner.FullNode.StateMinerProvingDeadline(ctx, minerAddr, ts.Key())
require.NoError(bm.t, err)
require.NotNil(bm.t, dlinfo, "no deadline info for miner %s", minerAddr)
impendingDeadlines = append(impendingDeadlines, minerDeadline{addr: minerAddr, deadline: *dlinfo})
}
bm.postWatchMinersLk.Unlock()
impendingDeadlines = impendingDeadlines.FilterByLast(ts.Height() + 5 + abi.ChainEpoch(nulls))

if len(impendingDeadlines) > 0 {
// Next block brings us too close for at least one deadline, we need to wait for miners to post
bm.t.Logf("forcing post to get in if due before deadline closes at %v for %v", impendingDeadlines.CloseList(), impendingDeadlines.MinerStringList())
for _, md := range impendingDeadlines {
bm.forcePoSt(ctx, ts, md.addr, md.deadline)
}
}

var target abi.ChainEpoch
Expand Down Expand Up @@ -216,10 +285,13 @@ func (bm *BlockMiner) MineBlocksMustPost(ctx context.Context, blocktime time.Dur
return
}
if !success {
// if we are mining a new null block and it brings us past deadline boundary we need to wait for miner to post
if ts.Height()+5+abi.ChainEpoch(nulls+i) >= dlinfo.Last() {
bm.t.Logf("forcing post to get in before deadline closes at %d", dlinfo.Last())
bm.forcePoSt(ctx, ts, dlinfo)
// if we are mining a new null block and it brings us past deadline boundary we need to wait for miners to post
impendingDeadlines = impendingDeadlines.FilterByLast(ts.Height() + 5 + abi.ChainEpoch(nulls+i))
if len(impendingDeadlines) > 0 {
bm.t.Logf("forcing post to get in if due before deadline closes at %v for %v", impendingDeadlines.CloseList(), impendingDeadlines.MinerStringList())
for _, md := range impendingDeadlines {
bm.forcePoSt(ctx, ts, md.addr, md.deadline)
}
}
}
}
Expand Down Expand Up @@ -378,4 +450,7 @@ func (bm *BlockMiner) Stop() {
close(bm.pause)
bm.pause = nil
}
bm.postWatchMinersLk.Lock()
bm.postWatchMiners = nil
bm.postWatchMinersLk.Unlock()
}
Loading
Loading