Skip to content

Commit

Permalink
DKG Safer transition of phases (#22)
Browse files Browse the repository at this point in the history
* better transition

* further cleaning up

* comment on phase sync

* test with one absent node

* allowing new nodes to transition
  • Loading branch information
nikkolasg authored Dec 10, 2020
1 parent 0893a25 commit 0c3784c
Show file tree
Hide file tree
Showing 3 changed files with 204 additions and 53 deletions.
32 changes: 16 additions & 16 deletions share/dkg/dkg.go
Original file line number Diff line number Diff line change
Expand Up @@ -377,21 +377,19 @@ func (d *DistKeyGenerator) Deals() (*DealBundle, error) {
// missing deals. It returns an error if the node is not in the right state, or
// if there is not enough valid shares, i.e. the dkg is failing already.
func (d *DistKeyGenerator) ProcessDeals(bundles []*DealBundle) (*ResponseBundle, error) {
if !d.canReceive {
// a node that is only in the group node should not process deals
// XXX it was an error before, but it simplifies the higher level logic
// if the library itself takes care of ignoring irrelevant messages. It
// means higher level library can simply broadcast to all nodes (old and
// new) without looking at which nodes should a packet be sent.
return nil, nil
}

if d.canIssue && d.state != DealPhase {
// oldnode member is not in the right state
return nil, fmt.Errorf("processdeals can only be called after producing shares")
return nil, fmt.Errorf("processdeals can only be called after producing shares - state %s", d.state.String())
}
if d.canReceive && !d.canIssue && d.state != InitPhase {
// newnode member which is not in the old group is not in the riht state
return nil, fmt.Errorf("processdeals can only be called once after creating the dkg for a new member")
return nil, fmt.Errorf("processdeals can only be called once after creating the dkg for a new member - state %s", d.state.String())
}
if !d.canReceive {
// a node that is only in the old group should not process deals
d.state = ResponsePhase // he moves on to the next phase silently
return nil, nil
}
seenIndex := make(map[uint32]bool)
for _, bundle := range bundles {
Expand Down Expand Up @@ -542,7 +540,7 @@ func (d *DistKeyGenerator) ProcessResponses(bundles []*ResponseBundle) (*Result,
// if we are a old node that will leave
return nil, nil, fmt.Errorf("leaving node can process responses only after creating shares")
} else if d.state != ResponsePhase {
return nil, nil, fmt.Errorf("can only process responses after processing shares")
return nil, nil, fmt.Errorf("can only process responses after processing shares - current state %s", d.state)
}

if !d.c.FastSync && len(bundles) == 0 && d.canReceive && d.statuses.CompleteSuccess() {
Expand Down Expand Up @@ -614,13 +612,15 @@ func (d *DistKeyGenerator) ProcessResponses(bundles []*ResponseBundle) (*Result,
}
}

if !foundComplaint {
// there is no complaint !
if d.canReceive && d.statuses.CompleteSuccess() {
// there is no complaint in the responses received and the status matrix
// is all filled with success that means we can finish the protocol -
// regardless of the mode chosen (fast sync or not).
if !foundComplaint && d.statuses.CompleteSuccess() {
d.state = FinishPhase
if d.canReceive {
res, err := d.computeResult()
return res, nil, err
} else {
d.state = FinishPhase
// old nodes that are not present in the new group
return nil, nil, nil
}
Expand Down Expand Up @@ -690,7 +690,7 @@ func (d *DistKeyGenerator) ProcessJustifications(bundles []*JustificationBundle)
return nil, nil
}
if d.state != JustifPhase {
return nil, fmt.Errorf("node can only process justifications after processing responses")
return nil, fmt.Errorf("node can only process justifications after processing responses - current state %s", d.state.String())
}

seen := make(map[uint32]bool)
Expand Down
153 changes: 150 additions & 3 deletions share/dkg/proto_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import (

type TestNetwork struct {
boards []*TestBoard
noops []uint32
}

func NewTestNetwork(n int) *TestNetwork {
Expand All @@ -25,6 +26,10 @@ func NewTestNetwork(n int) *TestNetwork {
return t
}

func (n *TestNetwork) SetNoop(index uint32) {
n.noops = append(n.noops, index)
}

func (n *TestNetwork) BoardFor(index uint32) *TestBoard {
for _, b := range n.boards {
if b.index == index {
Expand All @@ -34,21 +39,36 @@ func (n *TestNetwork) BoardFor(index uint32) *TestBoard {
panic("no such indexes")
}

func (n *TestNetwork) isNoop(i uint32) bool {
for _, j := range n.noops {
if i == j {
return true
}
}
return false
}

func (n *TestNetwork) BroadcastDeal(a *DealBundle) {
for _, board := range n.boards {
board.newDeals <- (*a)
if !n.isNoop(board.index) {
board.newDeals <- (*a)
}
}
}

func (n *TestNetwork) BroadcastResponse(a *ResponseBundle) {
for _, board := range n.boards {
board.newResps <- *a
if !n.isNoop(board.index) {
board.newResps <- *a
}
}
}

func (n *TestNetwork) BroadcastJustification(a *JustificationBundle) {
for _, board := range n.boards {
board.newJusts <- *a
if !n.isNoop(board.index) {
board.newJusts <- *a
}
}
}

Expand Down Expand Up @@ -290,6 +310,7 @@ func TestProtoResharing(t *testing.T) {
}
}
testResults(t, suite, newT, newN, results)

}

func TestProtoThreshold(t *testing.T) {
Expand Down Expand Up @@ -384,6 +405,132 @@ func TestProtoFullFast(t *testing.T) {
testResults(t, suite, thr, n, results)
}

func TestProtoResharingAbsent(t *testing.T) {
n := 4
thr := 3
// we setup now the second group with one node left from old group and two
// new node
newN := n + 1
newT := thr + 1

period := 1 * time.Second
suite := edwards25519.NewBlakeSHA256Ed25519()
tns := GenerateTestNodes(suite, n)
list := NodesFromTest(tns)
network := NewTestNetwork(n)
dkgConf := Config{
Suite: suite,
NewNodes: list,
Threshold: thr,
Auth: schnorr.NewScheme(suite),
}
SetupNodes(tns, &dkgConf)
SetupProto(tns, &dkgConf, period, network)

var resCh = make(chan OptionResult, 1)
// start all nodes and wait until each end
for _, node := range tns {
go func(n *TestNode) {
optRes := <-n.proto.WaitEnd()
n.res = optRes.Result
resCh <- optRes
}(node)

}
// start the phasers
for _, node := range tns {
go node.phaser.Start()
}
time.Sleep(100 * time.Millisecond)
// move two periods:
// nodes already sent they deals, so they need to receive them after one
// period, then they send their responses. Second period to receive the
// responses, and then they send the justifications, if any.
// since there is no faults we expect to receive the result only after two
// periods.
for i := 0; i < 2; i++ {
moveTime(tns, period)
time.Sleep(100 * time.Millisecond)
}

// expect all results
var results []*Result
for optRes := range resCh {
require.NoError(t, optRes.Error)
results = append(results, optRes.Result)
if len(results) == n {
break
}
}
testResults(t, suite, thr, n, results)

fmt.Printf("\n\n ----- RESHARING ----\n\n")
// RESHARING
var newTns = make([]*TestNode, newN)
copy(newTns, tns[:n-1])
// new node can have the same index as a previous one, separation is made
newTns[n-1] = NewTestNode(suite, n-1)
newTns[n] = NewTestNode(suite, n)
network = NewTestNetwork(newN)
newList := NodesFromTest(newTns)
newConf := &Config{
Suite: suite,
NewNodes: newList,
OldNodes: list,
Threshold: newT,
OldThreshold: thr,
Auth: schnorr.NewScheme(suite),
}

SetupReshareNodes(newTns, newConf, tns[0].res.Key.Commits)
SetupProto(newTns, newConf, period, network)
///
/// We set a node as registered but offline
///
network.SetNoop(newTns[0].Index)
resCh = make(chan OptionResult, 1)
// start all nodes and wait until each end
for _, node := range newTns {
go func(n *TestNode) {
optRes := <-n.proto.WaitEnd()
n.res = optRes.Result
resCh <- optRes
}(node)
}
// start the phasers
for _, node := range newTns {
go node.phaser.Start()
}
time.Sleep(100 * time.Millisecond)
// move three periods:
// nodes already sent they deals, so they need to receive them after one
// period, then they send their responses. Second period to receive the
// responses, and then they send the justifications, if any. A third period
// is needed to receive all justifications.
for i := 0; i < 3; i++ {
moveTime(newTns, period)
time.Sleep(100 * time.Millisecond)
}

// expect results-1 OK and 1 Err
results = nil
var errNode error
for optRes := range resCh {
if optRes.Error != nil {
fmt.Printf("GOT ONE ERROR\n")
require.Nil(t, errNode, "already an error saved!?")
errNode = optRes.Error
continue
}
results = append(results, optRes.Result)
fmt.Printf("GOT %d RESULTS\n", len(results))
if len(results) == newN-1 {
break
}
}
testResults(t, suite, newT, newN, results)
}

func TestProtoThresholdFast(t *testing.T) {
n := 5
thr := 4
Expand Down
Loading

0 comments on commit 0c3784c

Please sign in to comment.