Skip to content

Commit

Permalink
leader: do not perform blockstore check in next_leader_slot
Browse files Browse the repository at this point in the history
  • Loading branch information
AshwinSekar committed Aug 5, 2024
1 parent f10bb0e commit 64a36b2
Show file tree
Hide file tree
Showing 3 changed files with 41 additions and 132 deletions.
12 changes: 5 additions & 7 deletions core/src/replay_stage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -644,7 +644,6 @@ impl ReplayStage {

Self::reset_poh_recorder(
&my_pubkey,
&blockstore,
working_bank,
&poh_recorder,
&leader_schedule_cache,
Expand Down Expand Up @@ -1046,7 +1045,6 @@ impl ReplayStage {

Self::reset_poh_recorder(
&my_pubkey,
&blockstore,
reset_bank.clone(),
&poh_recorder,
&leader_schedule_cache,
Expand Down Expand Up @@ -2020,7 +2018,7 @@ impl ReplayStage {
assert!(!poh_recorder.read().unwrap().has_bank());

let (poh_slot, parent_slot) =
match poh_recorder.read().unwrap().reached_leader_slot(my_pubkey) {
match poh_recorder.write().unwrap().reached_leader_slot(my_pubkey) {
PohLeaderStatus::Reached {
poh_slot,
parent_slot,
Expand Down Expand Up @@ -2717,7 +2715,6 @@ impl ReplayStage {

fn reset_poh_recorder(
my_pubkey: &Pubkey,
blockstore: &Blockstore,
bank: Arc<Bank>,
poh_recorder: &RwLock<PohRecorder>,
leader_schedule_cache: &LeaderScheduleCache,
Expand All @@ -2729,7 +2726,6 @@ impl ReplayStage {
my_pubkey,
slot,
&bank,
Some(blockstore),
GRACE_TICKS_FACTOR * MAX_GRACE_SLOTS,
);

Expand Down Expand Up @@ -9148,7 +9144,6 @@ pub(crate) mod tests {
// Reset PoH recorder to the completed bank to ensure consistent state
ReplayStage::reset_poh_recorder(
&my_pubkey,
&blockstore,
working_bank.clone(),
&poh_recorder,
&leader_schedule_cache,
Expand Down Expand Up @@ -9176,7 +9171,10 @@ pub(crate) mod tests {

// We should not attempt to start leader for the dummy_slot
assert_matches!(
poh_recorder.read().unwrap().reached_leader_slot(&my_pubkey),
poh_recorder
.write()
.unwrap()
.reached_leader_slot(&my_pubkey),
PohLeaderStatus::NotReached
);
assert!(!ReplayStage::maybe_start_leader(
Expand Down
109 changes: 5 additions & 104 deletions ledger/src/leader_schedule_cache.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
use {
crate::{
blockstore::Blockstore,
leader_schedule::{FixedSchedule, LeaderSchedule},
leader_schedule_utils,
},
Expand Down Expand Up @@ -111,7 +110,6 @@ impl LeaderScheduleCache {
pubkey: &Pubkey,
current_slot: Slot,
bank: &Bank,
blockstore: Option<&Blockstore>,
max_slot_range: u64,
) -> Option<(Slot, Slot)> {
let (epoch, start_index) = bank.get_epoch_and_slot_index(current_slot + 1);
Expand All @@ -137,16 +135,6 @@ impl LeaderScheduleCache {
.get_indices(pubkey, offset)
.take_while(move |i| *i < num_slots)
.map(move |i| i as Slot + first_slot)
})
.skip_while(|slot| {
match blockstore {
None => false,
// Skip slots we have already sent a shred for.
Some(blockstore) => match blockstore.meta(*slot).unwrap() {
Some(meta) => meta.received > 0,
None => false,
},
}
});
let first_slot = schedule.next()?;
let max_slot = first_slot.saturating_add(max_slot_range);
Expand Down Expand Up @@ -251,12 +239,10 @@ mod tests {
use {
super::*,
crate::{
blockstore::make_slot_entries,
genesis_utils::{
bootstrap_validator_stake_lamports, create_genesis_config,
create_genesis_config_with_leader, GenesisConfigInfo,
},
get_tmp_ledger_path_auto_delete,
staking_utils::tests::setup_vote_and_stake_accounts,
},
crossbeam_channel::unbounded,
Expand Down Expand Up @@ -393,19 +379,18 @@ mod tests {
pubkey
);
assert_eq!(
cache.next_leader_slot(&pubkey, 0, &bank, None, u64::MAX),
cache.next_leader_slot(&pubkey, 0, &bank, u64::MAX),
Some((1, 863_999))
);
assert_eq!(
cache.next_leader_slot(&pubkey, 1, &bank, None, u64::MAX),
cache.next_leader_slot(&pubkey, 1, &bank, u64::MAX),
Some((2, 863_999))
);
assert_eq!(
cache.next_leader_slot(
&pubkey,
2 * genesis_config.epoch_schedule.slots_per_epoch - 1, // no schedule generated for epoch 2
&bank,
None,
u64::MAX
),
None
Expand All @@ -416,84 +401,6 @@ mod tests {
&solana_sdk::pubkey::new_rand(), // not in leader_schedule
0,
&bank,
None,
u64::MAX
),
None
);
}

#[test]
fn test_next_leader_slot_blockstore() {
let pubkey = solana_sdk::pubkey::new_rand();
let mut genesis_config =
create_genesis_config_with_leader(42, &pubkey, bootstrap_validator_stake_lamports())
.genesis_config;
genesis_config.epoch_schedule.warmup = false;

let bank = Bank::new_for_tests(&genesis_config);
let cache = Arc::new(LeaderScheduleCache::new_from_bank(&bank));
let ledger_path = get_tmp_ledger_path_auto_delete!();

let blockstore = Blockstore::open(ledger_path.path())
.expect("Expected to be able to open database ledger");

assert_eq!(
cache.slot_leader_at(bank.slot(), Some(&bank)).unwrap(),
pubkey
);
// Check that the next leader slot after 0 is slot 1
assert_eq!(
cache
.next_leader_slot(&pubkey, 0, &bank, Some(&blockstore), u64::MAX)
.unwrap()
.0,
1
);

// Write a shred into slot 2 that chains to slot 1,
// but slot 1 is empty so should not be skipped
let (shreds, _) = make_slot_entries(2, 1, 1, /*merkle_variant:*/ true);
blockstore.insert_shreds(shreds, None, false).unwrap();
assert_eq!(
cache
.next_leader_slot(&pubkey, 0, &bank, Some(&blockstore), u64::MAX)
.unwrap()
.0,
1
);

// Write a shred into slot 1
let (shreds, _) = make_slot_entries(1, 0, 1, /*merkle_variant:*/ true);

// Check that slot 1 and 2 are skipped
blockstore.insert_shreds(shreds, None, false).unwrap();
assert_eq!(
cache
.next_leader_slot(&pubkey, 0, &bank, Some(&blockstore), u64::MAX)
.unwrap()
.0,
3
);

// Integrity checks
assert_eq!(
cache.next_leader_slot(
&pubkey,
2 * genesis_config.epoch_schedule.slots_per_epoch - 1, // no schedule generated for epoch 2
&bank,
Some(&blockstore),
u64::MAX
),
None
);

assert_eq!(
cache.next_leader_slot(
&solana_sdk::pubkey::new_rand(), // not in leader_schedule
0,
&bank,
Some(&blockstore),
u64::MAX
),
None
Expand Down Expand Up @@ -554,25 +461,19 @@ mod tests {

// If the max root isn't set, we'll get None
assert!(cache
.next_leader_slot(&node_pubkey, 0, &bank, None, u64::MAX)
.next_leader_slot(&node_pubkey, 0, &bank, u64::MAX)
.is_none());

cache.set_root(&bank);
let res = cache
.next_leader_slot(&node_pubkey, 0, &bank, None, u64::MAX)
.next_leader_slot(&node_pubkey, 0, &bank, u64::MAX)
.unwrap();

assert_eq!(res.0, expected_slot);
assert!(res.1 >= expected_slot + NUM_CONSECUTIVE_LEADER_SLOTS - 1);

let res = cache
.next_leader_slot(
&node_pubkey,
0,
&bank,
None,
NUM_CONSECUTIVE_LEADER_SLOTS - 1,
)
.next_leader_slot(&node_pubkey, 0, &bank, NUM_CONSECUTIVE_LEADER_SLOTS - 1)
.unwrap();

assert_eq!(res.0, expected_slot);
Expand Down
52 changes: 31 additions & 21 deletions poh/src/poh_recorder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -315,23 +315,7 @@ impl PohRecorder {
fn clear_bank(&mut self) {
if let Some(WorkingBank { bank, start, .. }) = self.working_bank.take() {
self.leader_bank_notifier.set_completed(bank.slot());
let next_leader_slot = self.leader_schedule_cache.next_leader_slot(
bank.collector_id(),
bank.slot(),
&bank,
Some(&self.blockstore),
GRACE_TICKS_FACTOR * MAX_GRACE_SLOTS,
);
assert_eq!(self.ticks_per_slot, bank.ticks_per_slot());
let (
leader_first_tick_height_including_grace_ticks,
leader_last_tick_height,
grace_ticks,
) = Self::compute_leader_slot_tick_heights(next_leader_slot, self.ticks_per_slot);
self.grace_ticks = grace_ticks;
self.leader_first_tick_height_including_grace_ticks =
leader_first_tick_height_including_grace_ticks;
self.leader_last_tick_height = leader_last_tick_height;
self.set_next_leader_tick_height_from_cache(bank.collector_id(), bank.slot(), &bank);

datapoint_info!(
"leader-slot-start-to-cleared-elapsed-ms",
Expand All @@ -353,6 +337,22 @@ impl PohRecorder {
}
}

fn set_next_leader_tick_height_from_cache(&mut self, pubkey: &Pubkey, slot: Slot, bank: &Bank) {
let next_leader_slot = self.leader_schedule_cache.next_leader_slot(
pubkey,
slot,
bank,
GRACE_TICKS_FACTOR * MAX_GRACE_SLOTS,
);
assert_eq!(self.ticks_per_slot, bank.ticks_per_slot());
let (leader_first_tick_height_including_grace_ticks, leader_last_tick_height, grace_ticks) =
Self::compute_leader_slot_tick_heights(next_leader_slot, self.ticks_per_slot);
self.grace_ticks = grace_ticks;
self.leader_first_tick_height_including_grace_ticks =
leader_first_tick_height_including_grace_ticks;
self.leader_last_tick_height = leader_last_tick_height;
}

pub fn would_be_leader(&self, within_next_n_ticks: u64) -> bool {
self.has_bank()
|| self.leader_first_tick_height_including_grace_ticks.map_or(
Expand Down Expand Up @@ -552,7 +552,7 @@ impl PohRecorder {
/// Returns if the leader slot has been reached along with the current poh
/// slot and the parent slot (could be a few slots ago if any previous
/// leaders needed to be skipped).
pub fn reached_leader_slot(&self, my_pubkey: &Pubkey) -> PohLeaderStatus {
pub fn reached_leader_slot(&mut self, my_pubkey: &Pubkey) -> PohLeaderStatus {
trace!(
"tick_height {}, start_tick_height {}, leader_first_tick_height_including_grace_ticks {:?}, grace_ticks {}, has_bank {}",
self.tick_height,
Expand All @@ -564,6 +564,18 @@ impl PohRecorder {

let next_tick_height = self.tick_height + 1;
let next_poh_slot = self.slot_for_tick_height(next_tick_height);

if next_tick_height > self.leader_last_tick_height {
// We have ticked through all our leader slots. We need to update to our
// next leader slots.
let last_slot = self.slot_for_tick_height(self.leader_last_tick_height);
self.set_next_leader_tick_height_from_cache(
my_pubkey,
last_slot,
&self.start_bank.clone(),
);
}

let Some(leader_first_tick_height_including_grace_ticks) =
self.leader_first_tick_height_including_grace_ticks
else {
Expand All @@ -574,9 +586,7 @@ impl PohRecorder {
if self.blockstore.has_existing_shreds_for_slot(next_poh_slot) {
// We already have existing shreds for this slot. This can happen when this block was previously
// created and added to BankForks, however a recent PoH reset caused this bank to be removed
// as it was not part of the rooted fork. If this slot is not the first slot for this leader,
// and the first slot was previously ticked over, the check in `leader_schedule_cache::next_leader_slot`
// will not suffice, as it only checks if there are shreds for the first slot.
// as it was not part of the rooted fork.
return PohLeaderStatus::NotReached;
}

Expand Down

0 comments on commit 64a36b2

Please sign in to comment.