Skip to content
This repository has been archived by the owner on Jan 11, 2024. It is now read-only.

New proposal logic #447

Merged
merged 9 commits into from
Nov 29, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions fendermint/app/settings/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,12 @@ pub struct TopDownConfig {
/// conservative and avoid other from rejecting the proposal because they don't see the
/// height as final yet.
pub chain_head_delay: BlockHeight,
/// The number of blocks on top of `chain_head_delay` to wait before proposing a height
/// as final on the parent chain, to avoid slight disagreements between validators whether
/// a block is final, or not just yet.
pub proposal_delay: BlockHeight,
/// The max number of blocks one should make the topdown proposal
pub max_proposal_range: BlockHeight,
/// Parent syncing cron period, in seconds
#[serde_as(as = "DurationSeconds<u64>")]
pub polling_interval: Duration,
Expand Down
4 changes: 3 additions & 1 deletion fendermint/app/src/cmd/run.rs
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,9 @@ async fn run(settings: Settings) -> anyhow::Result<()> {
topdown_config.polling_interval,
topdown_config.exponential_back_off,
topdown_config.exponential_retry_limit,
);
)
.with_proposal_delay(topdown_config.proposal_delay)
.with_max_proposal_range(topdown_config.max_proposal_range);
let ipc_provider = Arc::new(create_ipc_provider_proxy(&settings)?);
let finality_provider =
CachedFinalityProvider::uninitialized(config.clone(), ipc_provider.clone()).await?;
Expand Down
23 changes: 13 additions & 10 deletions fendermint/vm/interpreter/src/chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -264,18 +264,21 @@ where
"chain interpreter committed topdown finality",
);

// The commitment of the finality for block `N` triggers
// the execution of all side-effects up till `N-1`, as for
// deferred execution chains, this is the latest state that
// we know for sure that we have available.
let execution_fr = prev_height;
let execution_to = finality.height - 1;

// error happens if we cannot get the validator set from ipc agent after retries
let validator_changes = provider
.validator_changes_from(
prev_height + 1,
finality.height,
&finality.block_hash,
)
.validator_changes_from(execution_fr, execution_to)
.await
.context("failed to fetch validator changes")?;
tracing::debug!(
from = prev_height + 1,
to = finality.height,
from = execution_fr,
to = execution_to,
msgs = validator_changes.len(),
"chain interpreter received total validator changes"
);
Expand All @@ -286,13 +289,13 @@ where

// error happens if we cannot get the cross messages from ipc agent after retries
let msgs = provider
.top_down_msgs_from(prev_height + 1, p.height as u64, &finality.block_hash)
.top_down_msgs_from(execution_fr, execution_to)
.await
.context("failed to fetch top down messages")?;
tracing::debug!(
number_of_messages = msgs.len(),
start = prev_height + 1,
end = p.height,
start = execution_fr,
end = execution_to,
"chain interpreter received topdown msgs",
);

Expand Down
191 changes: 10 additions & 181 deletions fendermint/vm/topdown/src/finality/fetch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,9 @@ use crate::finality::null::FinalityWithNull;
use crate::finality::ParentViewPayload;
use crate::proxy::ParentQueryProxy;
use crate::{
handle_null_round, is_null_round_error, BlockHash, BlockHeight, Config, Error,
IPCParentFinality, ParentFinalityProvider, ParentViewProvider,
handle_null_round, BlockHash, BlockHeight, Config, Error, IPCParentFinality,
ParentFinalityProvider, ParentViewProvider,
};
use anyhow::anyhow;
use async_stm::{Stm, StmResult};
use ipc_sdk::cross::CrossMsg;
use ipc_sdk::staking::StakingChangeRequest;
Expand Down Expand Up @@ -72,10 +71,9 @@ impl<T: ParentQueryProxy + Send + Sync + 'static> ParentViewProvider for CachedF
&self,
from: BlockHeight,
to: BlockHeight,
block_hash: &BlockHash,
) -> anyhow::Result<Vec<StakingChangeRequest>> {
let mut v = vec![];
for h in from..to {
for h in from..=to {
let mut r = self.validator_changes(h).await?;
tracing::debug!(
number_of_messages = r.len(),
Expand All @@ -85,31 +83,19 @@ impl<T: ParentQueryProxy + Send + Sync + 'static> ParentViewProvider for CachedF
v.append(&mut r);
}

// now we query the validator changes at height `to` and we also checks the block hash,
// so that we know the parent node is reliable enough.
let mut r = self.validator_changes_with_check(to, block_hash).await?;
tracing::debug!(
number_of_messages = r.len(),
height = to,
"fetched validator change set",
);
v.append(&mut r);

Ok(v)
}

/// Get top down message in the range `from` to `to`, both inclusive. It also validates the block
/// hash at height `to` must equal to the input `block_hash`. For the check to be valid, one
/// Get top down message in the range `from` to `to`, both inclusive. For the check to be valid, one
/// should not pass a height `to` that is a null block, otherwise the check is useless. In debug
/// mode, it will throw an error.
async fn top_down_msgs_from(
&self,
from: BlockHeight,
to: BlockHeight,
block_hash: &BlockHash,
) -> anyhow::Result<Vec<CrossMsg>> {
let mut v = vec![];
for h in from..to {
for h in from..=to {
let mut r = self.top_down_msgs(h).await?;
tracing::debug!(
number_of_top_down_messages = r.len(),
Expand All @@ -118,16 +104,6 @@ impl<T: ParentQueryProxy + Send + Sync + 'static> ParentViewProvider for CachedF
);
v.append(&mut r);
}

// now we query the topdown messages at height `to` and we also checks the block hash,
// so that we know the parent node is reliable enough.
let mut r = self.top_down_msgs_with_check(to, block_hash).await?;
tracing::debug!(
number_of_messages = r.len(),
height = to,
"obtained topdown messages",
);
v.append(&mut r);
Ok(v)
}
}
Expand Down Expand Up @@ -186,42 +162,6 @@ impl<T: ParentQueryProxy + Send + Sync + 'static> CachedFinalityProvider<T> {
handle_null_round(r, Vec::new)
}

/// Queries the validator change set, but also performs a check on the block hash
async fn validator_changes_with_check(
&self,
height: BlockHeight,
block_hash: &BlockHash,
) -> anyhow::Result<Vec<StakingChangeRequest>> {
let r = self.inner.validator_changes(height).await?;

if let Some(v) = r {
return Ok(v);
}

let r = retry!(
self.config.exponential_back_off,
self.config.exponential_retry_limit,
self.parent_client
.get_validator_changes(height)
.await
.and_then(|r| {
if r.block_hash != *block_hash {
Err(anyhow!("block hash not equal"))
} else {
Ok(r.value)
}
})
);
if let Err(e) = &r {
debug_assert!(
!is_null_round_error(e),
"no way of checking block hash if block is null, report bug"
)
}

handle_null_round(r, Vec::new)
}

/// Should always return the top down messages, only when ipc parent_client is down after exponential
/// retries
async fn top_down_msgs(&self, height: BlockHeight) -> anyhow::Result<Vec<CrossMsg>> {
Expand All @@ -242,42 +182,6 @@ impl<T: ParentQueryProxy + Send + Sync + 'static> CachedFinalityProvider<T> {

handle_null_round(r, Vec::new)
}

/// Queries the top down messages but also checks the block hash actually matches
async fn top_down_msgs_with_check(
&self,
height: BlockHeight,
block_hash: &BlockHash,
) -> anyhow::Result<Vec<CrossMsg>> {
let r = self.inner.top_down_msgs(height).await?;

if let Some(v) = r {
return Ok(v);
}

let r = retry!(
self.config.exponential_back_off,
self.config.exponential_retry_limit,
self.parent_client
.get_top_down_msgs(height)
.await
.and_then(|r| {
if r.block_hash != *block_hash {
Err(anyhow!("block hash not equal"))
} else {
Ok(r.value)
}
})
);

if let Err(e) = &r {
debug_assert!(
!is_null_round_error(e),
"no way of checking block hash if block is null, report bug"
)
}
handle_null_round(r, Vec::new)
}
}

impl<T> CachedFinalityProvider<T> {
Expand All @@ -287,11 +191,7 @@ impl<T> CachedFinalityProvider<T> {
committed_finality: Option<IPCParentFinality>,
parent_client: Arc<T>,
) -> Self {
let inner = FinalityWithNull::new(
config.min_proposal_interval(),
genesis_epoch,
committed_finality,
);
let inner = FinalityWithNull::new(config.clone(), genesis_epoch, committed_finality);
Self {
inner,
config,
Expand Down Expand Up @@ -440,8 +340,9 @@ mod tests {
polling_interval: Default::default(),
exponential_back_off: Default::default(),
exponential_retry_limit: 0,
min_proposal_interval: Some(1),
max_proposal_range: Some(1),
max_cache_blocks: None,
proposal_delay: None,
};
let genesis_epoch = blocks.lower_bound().unwrap();
let proxy = Arc::new(TestParentProxy { blocks });
Expand Down Expand Up @@ -516,10 +417,7 @@ mod tests {
106 => Some((vec![6; 32], vec![], vec![new_cross_msg(6)]))
);
let provider = new_provider(parent_blocks);
let messages = provider
.top_down_msgs_from(100, 106, &vec![6; 32])
.await
.unwrap();
let messages = provider.top_down_msgs_from(100, 106).await.unwrap();

assert_eq!(
messages,
Expand All @@ -533,38 +431,6 @@ mod tests {
)
}

/// This test should panic with "no way of checking block hash if block is null"
/// In our implementation, `top_down_msgs_from` should not be called when `to` is a null block
#[tokio::test]
#[should_panic]
async fn test_query_topdown_msgs_boundary_null() {
let parent_blocks = new_parent_blocks!(
100 => Some((vec![0; 32], vec![], vec![new_cross_msg(0)])), // genesis block
101 => None
);
let provider = new_provider(parent_blocks);
provider
.top_down_msgs_from(100, 101, &vec![6; 32])
.await
.unwrap();
}

#[tokio::test]
async fn test_query_topdown_msgs_invalid_hash() {
let parent_blocks = new_parent_blocks!(
100 => Some((vec![0; 32], vec![], vec![new_cross_msg(0)])), // genesis block
101 => Some((vec![1; 32], vec![], vec![new_cross_msg(1)])),
102 => Some((vec![2; 32], vec![], vec![new_cross_msg(2)])),
103 => Some((vec![3; 32], vec![], vec![new_cross_msg(3)])),
104 => None,
105 => None,
106 => Some((vec![6; 32], vec![], vec![new_cross_msg(6)]))
);
let provider = new_provider(parent_blocks);
let r = provider.top_down_msgs_from(100, 106, &vec![7; 32]).await;
assert!(r.is_err());
}

#[tokio::test]
async fn test_query_validator_changes() {
let parent_blocks = new_parent_blocks!(
Expand All @@ -577,45 +443,8 @@ mod tests {
106 => Some((vec![6; 32], vec![new_validator_changes(6)], vec![]))
);
let provider = new_provider(parent_blocks);
let messages = provider
.validator_changes_from(100, 106, &vec![6; 32])
.await
.unwrap();
let messages = provider.validator_changes_from(100, 106).await.unwrap();

assert_eq!(messages.len(), 4)
}

/// This test should panic with "no way of checking block hash if block is null"
/// In our implementation, `validator_changes_from` should not be called when `to` is a null block
#[tokio::test]
#[should_panic]
async fn test_query_validator_changes_boundary_null() {
let parent_blocks = new_parent_blocks!(
100 => Some((vec![0; 32], vec![new_validator_changes(0)], vec![new_cross_msg(0)])), // genesis block
101 => None
);
let provider = new_provider(parent_blocks);
provider
.validator_changes_from(100, 101, &vec![6; 32])
.await
.unwrap();
}

#[tokio::test]
async fn test_query_validator_changes_invalid_hash() {
let parent_blocks = new_parent_blocks!(
100 => Some((vec![0; 32], vec![new_validator_changes(0)], vec![new_cross_msg(0)])), // genesis block
101 => Some((vec![1; 32], vec![new_validator_changes(1)], vec![new_cross_msg(1)])),
102 => Some((vec![2; 32], vec![new_validator_changes(2)], vec![new_cross_msg(2)])),
103 => Some((vec![3; 32], vec![new_validator_changes(3)], vec![new_cross_msg(3)])),
104 => None,
105 => None,
106 => Some((vec![6; 32], vec![new_validator_changes(6)], vec![new_cross_msg(6)]))
);
let provider = new_provider(parent_blocks);
let r = provider
.validator_changes_from(100, 106, &vec![7; 32])
.await;
assert!(r.is_err());
}
}
Loading
Loading