use crate::{
metrics::{MakeCanonicalAction, MakeCanonicalDurationsRecorder, TreeMetrics},
state::{SidechainId, TreeState},
AppendableChain, BlockIndices, BlockchainTreeConfig, ExecutionData, TreeExternals,
};
use alloy_eips::{BlockNumHash, ForkBlock};
use alloy_primitives::{BlockHash, BlockNumber, B256, U256};
use reth_blockchain_tree_api::{
error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind},
BlockAttachment, BlockStatus, BlockValidationKind, CanonicalOutcome, InsertPayloadOk,
};
use reth_consensus::{Consensus, ConsensusError};
use reth_evm::execute::BlockExecutorProvider;
use reth_execution_errors::{BlockExecutionError, BlockValidationError};
use reth_execution_types::{Chain, ExecutionOutcome};
use reth_node_types::NodeTypesWithDB;
use reth_primitives::{
EthereumHardfork, GotExpected, Hardforks, Receipt, SealedBlock, SealedBlockWithSenders,
SealedHeader, StaticFileSegment,
};
use reth_provider::{
providers::ProviderNodeTypes, BlockExecutionWriter, BlockNumReader, BlockWriter,
CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications,
ChainSpecProvider, ChainSplit, ChainSplitTarget, DBProvider, DisplayBlocksChain,
HeaderProvider, ProviderError, StaticFileProviderFactory,
};
use reth_stages_api::{MetricEvent, MetricEventsSender};
use reth_storage_errors::provider::{ProviderResult, RootMismatch};
use reth_trie::{hashed_cursor::HashedPostStateCursorFactory, StateRoot};
use reth_trie_db::{DatabaseHashedCursorFactory, DatabaseStateRoot};
use std::{
collections::{btree_map::Entry, BTreeMap, HashSet},
sync::Arc,
};
use tracing::{debug, error, info, instrument, trace, warn};
#[cfg_attr(doc, aquamarine::aquamarine)]
#[derive(Debug)]
pub struct BlockchainTree<N: NodeTypesWithDB, E> {
state: TreeState,
externals: TreeExternals<N, E>,
config: BlockchainTreeConfig,
canon_state_notification_sender: CanonStateNotificationSender,
sync_metrics_tx: Option<MetricEventsSender>,
metrics: TreeMetrics,
}
impl<N: NodeTypesWithDB, E> BlockchainTree<N, E> {
pub fn subscribe_canon_state(&self) -> CanonStateNotifications {
self.canon_state_notification_sender.subscribe()
}
pub fn canon_state_notification_sender(&self) -> CanonStateNotificationSender {
self.canon_state_notification_sender.clone()
}
}
impl<N, E> BlockchainTree<N, E>
where
N: ProviderNodeTypes,
E: BlockExecutorProvider,
{
pub fn new(
externals: TreeExternals<N, E>,
config: BlockchainTreeConfig,
) -> ProviderResult<Self> {
let max_reorg_depth = config.max_reorg_depth() as usize;
let (canon_state_notification_sender, _receiver) =
tokio::sync::broadcast::channel(max_reorg_depth * 2);
let last_canonical_hashes =
externals.fetch_latest_canonical_hashes(config.num_of_canonical_hashes() as usize)?;
let last_finalized_block_number =
externals.fetch_latest_finalized_block_number()?.unwrap_or_default();
Ok(Self {
externals,
state: TreeState::new(
last_finalized_block_number,
last_canonical_hashes,
config.max_unconnected_blocks(),
),
config,
canon_state_notification_sender,
sync_metrics_tx: None,
metrics: Default::default(),
})
}
#[doc(hidden)]
pub fn with_canon_state_notification_sender(
mut self,
canon_state_notification_sender: CanonStateNotificationSender,
) -> Self {
self.canon_state_notification_sender = canon_state_notification_sender;
self
}
pub fn with_sync_metrics_tx(mut self, metrics_tx: MetricEventsSender) -> Self {
self.sync_metrics_tx = Some(metrics_tx);
self
}
pub(crate) fn is_block_known(
&self,
block: BlockNumHash,
) -> Result<Option<BlockStatus>, InsertBlockErrorKind> {
if self.is_block_hash_canonical(&block.hash)? {
return Ok(Some(BlockStatus::Valid(BlockAttachment::Canonical)));
}
let last_finalized_block = self.block_indices().last_finalized_block();
if block.number <= last_finalized_block {
if self.externals.provider_factory.provider()?.block_number(block.hash)?.is_some() {
return Ok(Some(BlockStatus::Valid(BlockAttachment::Canonical)));
}
return Err(BlockchainTreeError::PendingBlockIsFinalized {
last_finalized: last_finalized_block,
}
.into())
}
if let Some(attachment) = self.is_block_inside_sidechain(&block) {
return Ok(Some(BlockStatus::Valid(attachment)));
}
if let Some(block) = self.state.buffered_blocks.block(&block.hash) {
return Ok(Some(BlockStatus::Disconnected {
head: self.state.block_indices.canonical_tip(),
missing_ancestor: block.parent_num_hash(),
}))
}
Ok(None)
}
#[inline]
pub const fn block_indices(&self) -> &BlockIndices {
self.state.block_indices()
}
#[inline]
pub fn sidechain_block_by_hash(&self, block_hash: BlockHash) -> Option<&SealedBlock> {
self.state.block_by_hash(block_hash)
}
#[inline]
pub fn block_with_senders_by_hash(
&self,
block_hash: BlockHash,
) -> Option<&SealedBlockWithSenders> {
self.state.block_with_senders_by_hash(block_hash)
}
pub fn receipts_by_block_hash(&self, block_hash: BlockHash) -> Option<Vec<&Receipt>> {
self.state.receipts_by_block_hash(block_hash)
}
pub fn pending_block(&self) -> Option<&SealedBlock> {
let b = self.block_indices().pending_block_num_hash()?;
self.sidechain_block_by_hash(b.hash)
}
pub fn post_state_data(&self, block_hash: BlockHash) -> Option<ExecutionData> {
trace!(target: "blockchain_tree", ?block_hash, "Searching for post state data");
let canonical_chain = self.state.block_indices.canonical_chain();
if let Some(chain_id) = self.block_indices().get_side_chain_id(&block_hash) {
trace!(target: "blockchain_tree", ?block_hash, "Constructing post state data based on non-canonical chain");
let Some(chain) = self.state.chains.get(&chain_id) else {
debug!(target: "blockchain_tree", ?chain_id, "Chain with ID not present");
return None;
};
let block_number = chain.block_number(block_hash)?;
let execution_outcome = chain.execution_outcome_at_block(block_number)?;
let mut parent_block_hashes = self.all_chain_hashes(chain_id);
let Some((first_pending_block_number, _)) = parent_block_hashes.first_key_value()
else {
debug!(target: "blockchain_tree", ?chain_id, "No block hashes stored");
return None;
};
let canonical_chain = canonical_chain
.iter()
.filter(|&(key, _)| &key < first_pending_block_number)
.collect::<Vec<_>>();
parent_block_hashes.extend(canonical_chain);
let canonical_fork = self.canonical_fork(chain_id)?;
return Some(ExecutionData { execution_outcome, parent_block_hashes, canonical_fork });
}
if let Some(canonical_number) = canonical_chain.canonical_number(&block_hash) {
trace!(target: "blockchain_tree", %block_hash, "Constructing post state data based on canonical chain");
return Some(ExecutionData {
canonical_fork: ForkBlock { number: canonical_number, hash: block_hash },
execution_outcome: ExecutionOutcome::default(),
parent_block_hashes: canonical_chain.inner().clone(),
});
}
None
}
#[instrument(level = "trace", skip_all, fields(block = ?block.num_hash()), target = "blockchain_tree", ret)]
fn try_insert_validated_block(
&mut self,
block: SealedBlockWithSenders,
block_validation_kind: BlockValidationKind,
) -> Result<BlockStatus, InsertBlockErrorKind> {
debug_assert!(self.validate_block(&block).is_ok(), "Block must be validated");
let parent = block.parent_num_hash();
if let Some(chain_id) = self.block_indices().get_side_chain_id(&parent.hash) {
return self.try_insert_block_into_side_chain(block, chain_id, block_validation_kind);
}
if self.is_block_hash_canonical(&parent.hash)? {
return self.try_append_canonical_chain(block.clone(), block_validation_kind);
}
if let Some(canonical_parent_number) =
self.block_indices().canonical_number(&block.parent_hash)
{
if canonical_parent_number != parent.number {
return Err(ConsensusError::ParentBlockNumberMismatch {
parent_block_number: canonical_parent_number,
block_number: block.number,
}
.into())
}
}
if let Some(buffered_parent) = self.state.buffered_blocks.block(&parent.hash) {
self.externals.consensus.validate_header_against_parent(&block, buffered_parent)?;
}
self.state.buffered_blocks.insert_block(block.clone());
let block_hash = block.hash();
let lowest_ancestor = self
.state
.buffered_blocks
.lowest_ancestor(&block_hash)
.ok_or(BlockchainTreeError::BlockBufferingFailed { block_hash })?;
Ok(BlockStatus::Disconnected {
head: self.state.block_indices.canonical_tip(),
missing_ancestor: lowest_ancestor.parent_num_hash(),
})
}
#[instrument(level = "trace", skip_all, target = "blockchain_tree")]
fn try_append_canonical_chain(
&mut self,
block: SealedBlockWithSenders,
block_validation_kind: BlockValidationKind,
) -> Result<BlockStatus, InsertBlockErrorKind> {
let parent = block.parent_num_hash();
let block_num_hash = block.num_hash();
debug!(target: "blockchain_tree", head = ?block_num_hash.hash, ?parent, "Appending block to canonical chain");
let provider = self.externals.provider_factory.provider()?;
let parent_td = provider
.header_td(&block.parent_hash)?
.ok_or_else(|| BlockchainTreeError::CanonicalChain { block_hash: block.parent_hash })?;
if !self
.externals
.provider_factory
.chain_spec()
.fork(EthereumHardfork::Paris)
.active_at_ttd(parent_td, U256::ZERO)
{
return Err(BlockExecutionError::Validation(BlockValidationError::BlockPreMerge {
hash: block.hash(),
})
.into())
}
let parent_header = provider
.header(&block.parent_hash)?
.ok_or_else(|| BlockchainTreeError::CanonicalChain { block_hash: block.parent_hash })?;
let parent_sealed_header = SealedHeader::new(parent_header, block.parent_hash);
let canonical_chain = self.state.block_indices.canonical_chain();
let block_attachment = if block.parent_hash == canonical_chain.tip().hash {
BlockAttachment::Canonical
} else {
BlockAttachment::HistoricalFork
};
let chain = AppendableChain::new_canonical_fork(
block,
&parent_sealed_header,
canonical_chain.inner(),
parent,
&self.externals,
block_attachment,
block_validation_kind,
)?;
self.insert_chain(chain);
self.try_connect_buffered_blocks(block_num_hash);
Ok(BlockStatus::Valid(block_attachment))
}
#[instrument(level = "trace", skip_all, target = "blockchain_tree")]
fn try_insert_block_into_side_chain(
&mut self,
block: SealedBlockWithSenders,
chain_id: SidechainId,
block_validation_kind: BlockValidationKind,
) -> Result<BlockStatus, InsertBlockErrorKind> {
let block_num_hash = block.num_hash();
debug!(target: "blockchain_tree", ?block_num_hash, ?chain_id, "Inserting block into side chain");
let block_hashes = self.all_chain_hashes(chain_id);
let canonical_fork = self.canonical_fork(chain_id).ok_or_else(|| {
BlockchainTreeError::BlockSideChainIdConsistency { chain_id: chain_id.into() }
})?;
let parent_chain = self.state.chains.get_mut(&chain_id).ok_or_else(|| {
BlockchainTreeError::BlockSideChainIdConsistency { chain_id: chain_id.into() }
})?;
let chain_tip = parent_chain.tip().hash();
let canonical_chain = self.state.block_indices.canonical_chain();
let block_attachment = if chain_tip == block.parent_hash {
let block_attachment = if canonical_fork.hash == canonical_chain.tip().hash {
BlockAttachment::Canonical
} else {
BlockAttachment::HistoricalFork
};
let block_hash = block.hash();
let block_number = block.number;
debug!(target: "blockchain_tree", ?block_hash, ?block_number, "Appending block to side chain");
parent_chain.append_block(
block,
block_hashes,
canonical_chain.inner(),
&self.externals,
canonical_fork,
block_attachment,
block_validation_kind,
)?;
self.state.block_indices.insert_non_fork_block(block_number, block_hash, chain_id);
block_attachment
} else {
debug!(target: "blockchain_tree", ?canonical_fork, "Starting new fork from side chain");
let chain = parent_chain.new_chain_fork(
block,
block_hashes,
canonical_chain.inner(),
canonical_fork,
&self.externals,
block_validation_kind,
)?;
self.insert_chain(chain);
BlockAttachment::HistoricalFork
};
self.try_connect_buffered_blocks(block_num_hash);
Ok(BlockStatus::Valid(block_attachment))
}
fn all_chain_hashes(&self, chain_id: SidechainId) -> BTreeMap<BlockNumber, BlockHash> {
let mut chain_id = chain_id;
let mut hashes = BTreeMap::new();
loop {
let Some(chain) = self.state.chains.get(&chain_id) else { return hashes };
let latest_block_number = hashes
.last_key_value()
.map(|(number, _)| *number)
.unwrap_or_else(|| chain.tip().number);
for block in chain.blocks().values().filter(|b| b.number <= latest_block_number) {
if let Entry::Vacant(e) = hashes.entry(block.number) {
e.insert(block.hash());
}
}
let fork_block = chain.fork_block();
if let Some(next_chain_id) = self.block_indices().get_side_chain_id(&fork_block.hash) {
chain_id = next_chain_id;
} else {
break
}
}
hashes
}
fn canonical_fork(&self, chain_id: SidechainId) -> Option<ForkBlock> {
let mut chain_id = chain_id;
let mut fork;
loop {
fork = self.state.chains.get(&chain_id)?.fork_block();
if let Some(fork_chain_id) = self.block_indices().get_side_chain_id(&fork.hash) {
chain_id = fork_chain_id;
continue
}
break
}
(self.block_indices().canonical_hash(&fork.number) == Some(fork.hash)).then_some(fork)
}
fn insert_chain(&mut self, chain: AppendableChain) -> Option<SidechainId> {
self.state.insert_chain(chain)
}
fn find_all_dependent_chains(&self, block: &BlockHash) -> HashSet<SidechainId> {
let mut dependent_block =
self.block_indices().fork_to_child().get(block).cloned().unwrap_or_default();
let mut dependent_chains = HashSet::default();
while let Some(block) = dependent_block.pop_back() {
let Some(chain_id) = self.block_indices().get_side_chain_id(&block) else {
debug!(target: "blockchain_tree", ?block, "Block not in tree");
return Default::default();
};
let Some(chain) = self.state.chains.get(&chain_id) else {
debug!(target: "blockchain_tree", ?chain_id, "Chain not in tree");
return Default::default();
};
for chain_block in chain.blocks().values() {
if let Some(forks) = self.block_indices().fork_to_child().get(&chain_block.hash()) {
dependent_block.extend(forks);
}
}
dependent_chains.insert(chain_id);
}
dependent_chains
}
fn insert_unwound_chain(&mut self, chain: AppendableChain) -> Option<SidechainId> {
for (number, block) in chain.blocks() {
let hash = block.hash();
let chains_to_bump = self.find_all_dependent_chains(&hash);
if !chains_to_bump.is_empty() {
let mut cloned_execution_outcome = chain.execution_outcome().clone();
cloned_execution_outcome.revert_to(*number);
for chain_id in chains_to_bump {
let Some(chain) = self.state.chains.get_mut(&chain_id) else {
debug!(target: "blockchain_tree", ?chain_id, "Chain not in tree");
return None;
};
debug!(target: "blockchain_tree",
unwound_block= ?block.num_hash(),
chain_id = ?chain_id,
chain_tip = ?chain.tip().num_hash(),
"Prepend unwound block state to blockchain tree chain");
chain.prepend_state(cloned_execution_outcome.state().clone())
}
}
}
self.insert_chain(chain)
}
pub fn get_buffered_block(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> {
self.state.get_buffered_block(hash)
}
pub fn lowest_buffered_ancestor(&self, hash: &BlockHash) -> Option<&SealedBlockWithSenders> {
self.state.lowest_buffered_ancestor(hash)
}
pub fn insert_block_without_senders(
&mut self,
block: SealedBlock,
) -> Result<InsertPayloadOk, InsertBlockError> {
match block.try_seal_with_senders() {
Ok(block) => self.insert_block(block, BlockValidationKind::Exhaustive),
Err(block) => Err(InsertBlockError::sender_recovery_error(block)),
}
}
pub fn buffer_block(&mut self, block: SealedBlockWithSenders) -> Result<(), InsertBlockError> {
if let Err(err) = self.validate_block(&block) {
return Err(InsertBlockError::consensus_error(err, block.block));
}
self.state.buffered_blocks.insert_block(block);
Ok(())
}
fn validate_block(&self, block: &SealedBlockWithSenders) -> Result<(), ConsensusError> {
if let Err(e) =
self.externals.consensus.validate_header_with_total_difficulty(block, U256::MAX)
{
error!(
?block,
"Failed to validate total difficulty for block {}: {e}",
block.header.hash()
);
return Err(e);
}
if let Err(e) = self.externals.consensus.validate_header(block) {
error!(?block, "Failed to validate header {}: {e}", block.header.hash());
return Err(e);
}
if let Err(e) = self.externals.consensus.validate_block_pre_execution(block) {
error!(?block, "Failed to validate block {}: {e}", block.header.hash());
return Err(e);
}
Ok(())
}
#[track_caller]
fn is_block_inside_sidechain(&self, block: &BlockNumHash) -> Option<BlockAttachment> {
if let Some(chain_id) = self.block_indices().get_side_chain_id(&block.hash) {
let Some(canonical_fork) = self.canonical_fork(chain_id) else {
debug!(target: "blockchain_tree", chain_id=?chain_id, block=?block.hash, "Chain id not valid");
return None;
};
return if canonical_fork == self.block_indices().canonical_tip() {
Some(BlockAttachment::Canonical)
} else {
Some(BlockAttachment::HistoricalFork)
};
}
None
}
pub fn insert_block(
&mut self,
block: SealedBlockWithSenders,
block_validation_kind: BlockValidationKind,
) -> Result<InsertPayloadOk, InsertBlockError> {
match self.is_block_known(block.num_hash()) {
Ok(Some(status)) => return Ok(InsertPayloadOk::AlreadySeen(status)),
Err(err) => return Err(InsertBlockError::new(block.block, err)),
_ => {}
}
if let Err(err) = self.validate_block(&block) {
return Err(InsertBlockError::consensus_error(err, block.block));
}
let status = self
.try_insert_validated_block(block.clone(), block_validation_kind)
.map_err(|kind| InsertBlockError::new(block.block, kind))?;
Ok(InsertPayloadOk::Inserted(status))
}
pub fn remove_old_blocks(&mut self, block: BlockNumber) {
self.state.buffered_blocks.remove_old_blocks(block);
}
pub fn finalize_block(&mut self, finalized_block: BlockNumber) -> ProviderResult<()> {
let mut remove_chains = self.state.block_indices.finalize_canonical_blocks(
finalized_block,
self.config.num_of_additional_canonical_block_hashes(),
);
while let Some(chain_id) = remove_chains.pop_first() {
if let Some(chain) = self.state.chains.remove(&chain_id) {
remove_chains.extend(self.state.block_indices.remove_chain(&chain));
}
}
self.remove_old_blocks(finalized_block);
self.externals.save_finalized_block_number(finalized_block)?;
Ok(())
}
pub fn connect_buffered_blocks_to_canonical_hashes_and_finalize(
&mut self,
last_finalized_block: BlockNumber,
) -> ProviderResult<()> {
self.finalize_block(last_finalized_block)?;
let last_canonical_hashes = self.update_block_hashes()?;
self.connect_buffered_blocks_to_hashes(last_canonical_hashes)?;
Ok(())
}
pub fn update_block_hashes(&mut self) -> ProviderResult<BTreeMap<BlockNumber, B256>> {
let last_canonical_hashes = self
.externals
.fetch_latest_canonical_hashes(self.config.num_of_canonical_hashes() as usize)?;
let (mut remove_chains, _) =
self.state.block_indices.update_block_hashes(last_canonical_hashes.clone());
while let Some(chain_id) = remove_chains.first() {
if let Some(chain) = self.state.chains.remove(chain_id) {
remove_chains.extend(self.state.block_indices.remove_chain(&chain));
}
}
Ok(last_canonical_hashes)
}
pub fn update_block_hashes_and_clear_buffered(
&mut self,
) -> ProviderResult<BTreeMap<BlockNumber, BlockHash>> {
let chain = self.update_block_hashes()?;
if let Some((block, _)) = chain.last_key_value() {
self.remove_old_blocks(*block);
}
Ok(chain)
}
pub fn connect_buffered_blocks_to_canonical_hashes(&mut self) -> ProviderResult<()> {
let last_canonical_hashes = self
.externals
.fetch_latest_canonical_hashes(self.config.num_of_canonical_hashes() as usize)?;
self.connect_buffered_blocks_to_hashes(last_canonical_hashes)?;
Ok(())
}
fn connect_buffered_blocks_to_hashes(
&mut self,
hashes: impl IntoIterator<Item = impl Into<BlockNumHash>>,
) -> ProviderResult<()> {
for added_block in hashes {
self.try_connect_buffered_blocks(added_block.into())
}
let mut all_chain_blocks = Vec::new();
for chain in self.state.chains.values() {
all_chain_blocks.reserve_exact(chain.blocks().len());
for (&number, block) in chain.blocks() {
all_chain_blocks.push(BlockNumHash { number, hash: block.hash() })
}
}
for block in all_chain_blocks {
self.try_connect_buffered_blocks(block)
}
Ok(())
}
fn try_connect_buffered_blocks(&mut self, new_block: BlockNumHash) {
trace!(target: "blockchain_tree", ?new_block, "try_connect_buffered_blocks");
let include_blocks = self.state.buffered_blocks.remove_block_with_children(&new_block.hash);
for block in include_blocks {
let _ = self
.try_insert_validated_block(block, BlockValidationKind::SkipStateRootValidation)
.map_err(|err| {
debug!(target: "blockchain_tree", %err, "Failed to insert buffered block");
err
});
}
}
fn remove_and_split_chain(
&mut self,
chain_id: SidechainId,
split_at: ChainSplitTarget,
) -> Option<Chain> {
let chain = self.state.chains.remove(&chain_id)?;
match chain.into_inner().split(split_at) {
ChainSplit::Split { canonical, pending } => {
trace!(target: "blockchain_tree", ?canonical, ?pending, "Split chain");
self.state.block_indices.insert_chain(chain_id, &pending);
self.state.chains.insert(chain_id, AppendableChain::new(pending));
Some(canonical)
}
ChainSplit::NoSplitCanonical(canonical) => {
trace!(target: "blockchain_tree", "No split on canonical chain");
Some(canonical)
}
ChainSplit::NoSplitPending(_) => {
unreachable!("Should not happen as block indices guarantee structure of blocks")
}
}
}
pub fn find_canonical_header(
&self,
hash: &BlockHash,
) -> Result<Option<SealedHeader>, ProviderError> {
let provider = self.externals.provider_factory.provider()?;
let mut header = None;
if let Some(num) = self.block_indices().canonical_number(hash) {
header = provider.header_by_number(num)?;
}
if header.is_none() && self.sidechain_block_by_hash(*hash).is_some() {
return Ok(None)
}
if header.is_none() {
header = provider.header(hash)?
}
Ok(header.map(|header| SealedHeader::new(header, *hash)))
}
pub fn is_block_hash_canonical(&self, hash: &BlockHash) -> Result<bool, ProviderError> {
self.find_canonical_header(hash).map(|header| header.is_some())
}
#[track_caller]
#[instrument(level = "trace", skip(self), target = "blockchain_tree")]
pub fn make_canonical(
&mut self,
block_hash: BlockHash,
) -> Result<CanonicalOutcome, CanonicalError> {
let mut durations_recorder = MakeCanonicalDurationsRecorder::default();
let old_block_indices = self.block_indices().clone();
let old_buffered_blocks = self.state.buffered_blocks.parent_to_child.clone();
durations_recorder.record_relative(MakeCanonicalAction::CloneOldBlocks);
let canonical_header = self.find_canonical_header(&block_hash)?;
durations_recorder.record_relative(MakeCanonicalAction::FindCanonicalHeader);
if let Some(header) = canonical_header {
info!(target: "blockchain_tree", %block_hash, "Block is already canonical, ignoring.");
let td =
self.externals.provider_factory.provider()?.header_td(&block_hash)?.ok_or_else(
|| {
CanonicalError::from(BlockValidationError::MissingTotalDifficulty {
hash: block_hash,
})
},
)?;
if !self
.externals
.provider_factory
.chain_spec()
.fork(EthereumHardfork::Paris)
.active_at_ttd(td, U256::ZERO)
{
return Err(CanonicalError::from(BlockValidationError::BlockPreMerge {
hash: block_hash,
}))
}
let head = self.state.block_indices.canonical_tip();
return Ok(CanonicalOutcome::AlreadyCanonical { header, head });
}
let Some(chain_id) = self.block_indices().get_side_chain_id(&block_hash) else {
debug!(target: "blockchain_tree", ?block_hash, "Block hash not found in block indices");
return Err(CanonicalError::from(BlockchainTreeError::BlockHashNotFoundInChain {
block_hash,
}))
};
let Some(canonical) = self.remove_and_split_chain(chain_id, block_hash.into()) else {
debug!(target: "blockchain_tree", ?block_hash, ?chain_id, "Chain not present");
return Err(CanonicalError::from(BlockchainTreeError::BlockSideChainIdConsistency {
chain_id: chain_id.into(),
}))
};
trace!(target: "blockchain_tree", chain = ?canonical, "Found chain to make canonical");
durations_recorder.record_relative(MakeCanonicalAction::SplitChain);
let mut fork_block = canonical.fork_block();
let mut chains_to_promote = vec![canonical];
while let Some(chain_id) = self.block_indices().get_side_chain_id(&fork_block.hash) {
let Some(canonical) =
self.remove_and_split_chain(chain_id, ChainSplitTarget::Number(fork_block.number))
else {
debug!(target: "blockchain_tree", ?fork_block, ?chain_id, "Fork not present");
return Err(CanonicalError::from(
BlockchainTreeError::BlockSideChainIdConsistency { chain_id: chain_id.into() },
));
};
fork_block = canonical.fork_block();
chains_to_promote.push(canonical);
}
durations_recorder.record_relative(MakeCanonicalAction::SplitChainForks);
let old_tip = self.block_indices().canonical_tip();
let Some(mut new_canon_chain) = chains_to_promote.pop() else {
debug!(target: "blockchain_tree", "No blocks in the chain to make canonical");
return Err(CanonicalError::from(BlockchainTreeError::BlockHashNotFoundInChain {
block_hash: fork_block.hash,
}))
};
trace!(target: "blockchain_tree", ?new_canon_chain, "Merging chains");
let mut chain_appended = false;
for chain in chains_to_promote.into_iter().rev() {
trace!(target: "blockchain_tree", ?chain, "Appending chain");
let block_hash = chain.fork_block().hash;
new_canon_chain.append_chain(chain).map_err(|_| {
CanonicalError::from(BlockchainTreeError::BlockHashNotFoundInChain { block_hash })
})?;
chain_appended = true;
}
durations_recorder.record_relative(MakeCanonicalAction::MergeAllChains);
if chain_appended {
trace!(target: "blockchain_tree", ?new_canon_chain, "Canonical chain appended");
}
self.state.block_indices.canonicalize_blocks(new_canon_chain.blocks());
durations_recorder.record_relative(MakeCanonicalAction::UpdateCanonicalIndex);
debug!(
target: "blockchain_tree",
"Committing new canonical chain: {}", DisplayBlocksChain(new_canon_chain.blocks())
);
let chain_notification = if new_canon_chain.fork_block().hash == old_tip.hash {
self.commit_canonical_to_database(new_canon_chain.clone(), &mut durations_recorder)?;
CanonStateNotification::Commit { new: Arc::new(new_canon_chain) }
} else {
let canon_fork: BlockNumHash = new_canon_chain.fork_block();
if self.block_indices().canonical_hash(&canon_fork.number) != Some(canon_fork.hash) {
error!(
target: "blockchain_tree",
?canon_fork,
block_indices=?self.block_indices(),
"All chains should point to canonical chain"
);
unreachable!("all chains should point to canonical chain.");
}
let old_canon_chain =
self.revert_canonical_from_database(canon_fork.number).inspect_err(|error| {
error!(
target: "blockchain_tree",
"Reverting canonical chain failed with error: {:?}\n\
Old BlockIndices are:{:?}\n\
New BlockIndices are: {:?}\n\
Old BufferedBlocks are:{:?}",
error, old_block_indices, self.block_indices(), old_buffered_blocks
);
})?;
durations_recorder
.record_relative(MakeCanonicalAction::RevertCanonicalChainFromDatabase);
self.commit_canonical_to_database(new_canon_chain.clone(), &mut durations_recorder)?;
if let Some(old_canon_chain) = old_canon_chain {
self.update_reorg_metrics(old_canon_chain.len() as f64);
self.insert_unwound_chain(AppendableChain::new(old_canon_chain.clone()));
durations_recorder.record_relative(MakeCanonicalAction::InsertOldCanonicalChain);
CanonStateNotification::Reorg {
old: Arc::new(old_canon_chain),
new: Arc::new(new_canon_chain),
}
} else {
error!(target: "blockchain_tree", %block_hash, "Nothing was removed from database");
CanonStateNotification::Commit { new: Arc::new(new_canon_chain) }
}
};
debug!(
target: "blockchain_tree",
actions = ?durations_recorder.actions,
"Canonicalization finished"
);
self.block_indices()
.fork_to_child()
.get(&old_tip.hash)
.cloned()
.unwrap_or_default()
.into_iter()
.for_each(|child| {
if let Some(chain_id) = self.block_indices().get_side_chain_id(&child) {
if let Some(chain) = self.state.chains.get_mut(&chain_id) {
chain.clear_trie_updates();
}
}
});
durations_recorder.record_relative(MakeCanonicalAction::ClearTrieUpdatesForOtherChildren);
let outcome = CanonicalOutcome::Committed { head: chain_notification.tip().header.clone() };
let _ = self.canon_state_notification_sender.send(chain_notification);
Ok(outcome)
}
fn commit_canonical_to_database(
&self,
chain: Chain,
recorder: &mut MakeCanonicalDurationsRecorder,
) -> Result<(), CanonicalError> {
let (blocks, state, chain_trie_updates) = chain.into_inner();
let hashed_state = state.hash_state_slow();
let prefix_sets = hashed_state.construct_prefix_sets().freeze();
let hashed_state_sorted = hashed_state.into_sorted();
let block_hash_numbers =
blocks.iter().map(|(number, b)| (number, b.hash())).collect::<Vec<_>>();
let trie_updates = match chain_trie_updates {
Some(updates) => {
debug!(target: "blockchain_tree", blocks = ?block_hash_numbers, "Using cached trie updates");
self.metrics.trie_updates_insert_cached.increment(1);
updates
}
None => {
debug!(target: "blockchain_tree", blocks = ?block_hash_numbers, "Recomputing state root for insert");
let provider = self
.externals
.provider_factory
.provider()?
.disable_long_read_transaction_safety();
let (state_root, trie_updates) = StateRoot::from_tx(provider.tx_ref())
.with_hashed_cursor_factory(HashedPostStateCursorFactory::new(
DatabaseHashedCursorFactory::new(provider.tx_ref()),
&hashed_state_sorted,
))
.with_prefix_sets(prefix_sets)
.root_with_updates()
.map_err(Into::<BlockValidationError>::into)?;
let tip = blocks.tip();
if state_root != tip.state_root {
return Err(ProviderError::StateRootMismatch(Box::new(RootMismatch {
root: GotExpected { got: state_root, expected: tip.state_root },
block_number: tip.number,
block_hash: tip.hash(),
}))
.into())
}
self.metrics.trie_updates_insert_recomputed.increment(1);
trie_updates
}
};
recorder.record_relative(MakeCanonicalAction::RetrieveStateTrieUpdates);
let provider_rw = self.externals.provider_factory.provider_rw()?;
provider_rw
.append_blocks_with_state(
blocks.into_blocks().collect(),
state,
hashed_state_sorted,
trie_updates,
)
.map_err(|e| CanonicalError::CanonicalCommit(e.to_string()))?;
provider_rw.commit()?;
recorder.record_relative(MakeCanonicalAction::CommitCanonicalChainToDatabase);
Ok(())
}
pub fn unwind(&mut self, unwind_to: BlockNumber) -> Result<(), CanonicalError> {
if self.block_indices().canonical_tip().number <= unwind_to {
return Ok(());
}
let old_canon_chain = self.revert_canonical_from_database(unwind_to)?;
if let Some(old_canon_chain) = old_canon_chain {
self.state.block_indices.unwind_canonical_chain(unwind_to);
self.insert_unwound_chain(AppendableChain::new(old_canon_chain));
}
Ok(())
}
fn revert_canonical_from_database(
&self,
revert_until: BlockNumber,
) -> Result<Option<Chain>, CanonicalError> {
if self
.externals
.provider_factory
.static_file_provider()
.get_highest_static_file_block(StaticFileSegment::Headers)
.unwrap_or_default() >
revert_until
{
trace!(
target: "blockchain_tree",
"Reverting optimistic canonical chain to block {}",
revert_until
);
return Err(CanonicalError::OptimisticTargetRevert(revert_until));
}
let provider_rw = self.externals.provider_factory.provider_rw()?;
let tip = provider_rw.last_block_number()?;
let revert_range = (revert_until + 1)..=tip;
info!(target: "blockchain_tree", "REORG: revert canonical from database by unwinding chain blocks {:?}", revert_range);
let blocks_and_execution = provider_rw
.take_block_and_execution_range(revert_range)
.map_err(|e| CanonicalError::CanonicalRevert(e.to_string()))?;
provider_rw.commit()?;
if blocks_and_execution.is_empty() {
Ok(None)
} else {
Ok(Some(blocks_and_execution))
}
}
fn update_reorg_metrics(&self, reorg_depth: f64) {
self.metrics.reorgs.increment(1);
self.metrics.latest_reorg_depth.set(reorg_depth);
}
pub(crate) fn update_chains_metrics(&mut self) {
let height = self.state.block_indices.canonical_tip().number;
let longest_sidechain_height =
self.state.chains.values().map(|chain| chain.tip().number).max();
if let Some(longest_sidechain_height) = longest_sidechain_height {
self.metrics.longest_sidechain_height.set(longest_sidechain_height as f64);
}
self.metrics.sidechains.set(self.state.chains.len() as f64);
self.metrics.canonical_chain_height.set(height as f64);
if let Some(metrics_tx) = self.sync_metrics_tx.as_mut() {
let _ = metrics_tx.send(MetricEvent::SyncHeight { height });
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use alloy_consensus::{Header, TxEip1559, EMPTY_ROOT_HASH};
use alloy_eips::{eip1559::INITIAL_BASE_FEE, eip4895::Withdrawals};
use alloy_genesis::{Genesis, GenesisAccount};
use alloy_primitives::{keccak256, Address, PrimitiveSignature as Signature, B256};
use assert_matches::assert_matches;
use linked_hash_set::LinkedHashSet;
use reth_chainspec::{ChainSpecBuilder, MAINNET, MIN_TRANSACTION_GAS};
use reth_consensus::test_utils::TestConsensus;
use reth_db::tables;
use reth_db_api::transaction::DbTxMut;
use reth_evm::test_utils::MockExecutorProvider;
use reth_evm_ethereum::execute::EthExecutorProvider;
use reth_primitives::{
proofs::{calculate_receipt_root, calculate_transaction_root},
revm_primitives::AccountInfo,
Account, BlockBody, Transaction, TransactionSigned, TransactionSignedEcRecovered,
};
use reth_provider::{
test_utils::{
blocks::BlockchainTestData, create_test_provider_factory_with_chain_spec,
MockNodeTypesWithDB,
},
ProviderFactory,
};
use reth_stages_api::StageCheckpoint;
use reth_trie::{root::state_root_unhashed, StateRoot};
use std::collections::HashMap;
fn setup_externals(
exec_res: Vec<ExecutionOutcome>,
) -> TreeExternals<MockNodeTypesWithDB, MockExecutorProvider> {
let chain_spec = Arc::new(
ChainSpecBuilder::default()
.chain(MAINNET.chain)
.genesis(MAINNET.genesis.clone())
.shanghai_activated()
.build(),
);
let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec);
let consensus = Arc::new(TestConsensus::default());
let executor_factory = MockExecutorProvider::default();
executor_factory.extend(exec_res);
TreeExternals::new(provider_factory, consensus, executor_factory)
}
fn setup_genesis<N: ProviderNodeTypes>(factory: &ProviderFactory<N>, mut genesis: SealedBlock) {
genesis.header.set_block_number(10);
genesis.header.set_state_root(EMPTY_ROOT_HASH);
let provider = factory.provider_rw().unwrap();
provider
.insert_historical_block(
genesis.try_seal_with_senders().expect("invalid tx signature in genesis"),
)
.unwrap();
for i in 0..10 {
provider
.tx_ref()
.put::<tables::CanonicalHeaders>(i, B256::new([100 + i as u8; 32]))
.unwrap();
}
provider
.tx_ref()
.put::<tables::StageCheckpoints>("Finish".to_string(), StageCheckpoint::new(10))
.unwrap();
provider.commit().unwrap();
}
#[derive(Default, Debug)]
struct TreeTester {
chain_num: Option<usize>,
block_to_chain: Option<HashMap<BlockHash, SidechainId>>,
fork_to_child: Option<HashMap<BlockHash, HashSet<BlockHash>>>,
pending_blocks: Option<(BlockNumber, HashSet<BlockHash>)>,
buffered_blocks: Option<HashMap<BlockHash, SealedBlockWithSenders>>,
}
impl TreeTester {
const fn with_chain_num(mut self, chain_num: usize) -> Self {
self.chain_num = Some(chain_num);
self
}
fn with_block_to_chain(mut self, block_to_chain: HashMap<BlockHash, SidechainId>) -> Self {
self.block_to_chain = Some(block_to_chain);
self
}
fn with_fork_to_child(
mut self,
fork_to_child: HashMap<BlockHash, HashSet<BlockHash>>,
) -> Self {
self.fork_to_child = Some(fork_to_child);
self
}
fn with_buffered_blocks(
mut self,
buffered_blocks: HashMap<BlockHash, SealedBlockWithSenders>,
) -> Self {
self.buffered_blocks = Some(buffered_blocks);
self
}
fn with_pending_blocks(
mut self,
pending_blocks: (BlockNumber, HashSet<BlockHash>),
) -> Self {
self.pending_blocks = Some(pending_blocks);
self
}
fn assert<N: NodeTypesWithDB, E: BlockExecutorProvider>(self, tree: &BlockchainTree<N, E>) {
if let Some(chain_num) = self.chain_num {
assert_eq!(tree.state.chains.len(), chain_num);
}
if let Some(block_to_chain) = self.block_to_chain {
assert_eq!(*tree.state.block_indices.blocks_to_chain(), block_to_chain);
}
if let Some(fork_to_child) = self.fork_to_child {
let mut x: HashMap<BlockHash, LinkedHashSet<BlockHash>> =
HashMap::with_capacity(fork_to_child.len());
for (key, hash_set) in fork_to_child {
x.insert(key, hash_set.into_iter().collect());
}
assert_eq!(*tree.state.block_indices.fork_to_child(), x);
}
if let Some(pending_blocks) = self.pending_blocks {
let (num, hashes) = tree.state.block_indices.pending_blocks();
let hashes = hashes.into_iter().collect::<HashSet<_>>();
assert_eq!((num, hashes), pending_blocks);
}
if let Some(buffered_blocks) = self.buffered_blocks {
assert_eq!(*tree.state.buffered_blocks.blocks(), buffered_blocks);
}
}
}
#[test]
fn consecutive_reorgs() {
let signer = Address::random();
let initial_signer_balance = U256::from(10).pow(U256::from(18));
let chain_spec = Arc::new(
ChainSpecBuilder::default()
.chain(MAINNET.chain)
.genesis(Genesis {
alloc: BTreeMap::from([(
signer,
GenesisAccount { balance: initial_signer_balance, ..Default::default() },
)]),
..MAINNET.genesis.clone()
})
.shanghai_activated()
.build(),
);
let provider_factory = create_test_provider_factory_with_chain_spec(chain_spec.clone());
let consensus = Arc::new(TestConsensus::default());
let executor_provider = EthExecutorProvider::ethereum(chain_spec.clone());
{
let provider_rw = provider_factory.provider_rw().unwrap();
provider_rw
.insert_block(
SealedBlock::new(chain_spec.sealed_genesis_header(), Default::default())
.try_seal_with_senders()
.unwrap(),
)
.unwrap();
let account = Account { balance: initial_signer_balance, ..Default::default() };
provider_rw.tx_ref().put::<tables::PlainAccountState>(signer, account).unwrap();
provider_rw.tx_ref().put::<tables::HashedAccounts>(keccak256(signer), account).unwrap();
provider_rw.commit().unwrap();
}
let single_tx_cost = U256::from(INITIAL_BASE_FEE * MIN_TRANSACTION_GAS);
let mock_tx = |nonce: u64| -> TransactionSignedEcRecovered {
TransactionSigned::from_transaction_and_signature(
Transaction::Eip1559(TxEip1559 {
chain_id: chain_spec.chain.id(),
nonce,
gas_limit: MIN_TRANSACTION_GAS,
to: Address::ZERO.into(),
max_fee_per_gas: INITIAL_BASE_FEE as u128,
..Default::default()
}),
Signature::test_signature(),
)
.with_signer(signer)
};
let mock_block = |number: u64,
parent: Option<B256>,
body: Vec<TransactionSignedEcRecovered>,
num_of_signer_txs: u64|
-> SealedBlockWithSenders {
let transactions_root = calculate_transaction_root(&body);
let receipts = body
.iter()
.enumerate()
.map(|(idx, tx)| {
Receipt {
tx_type: tx.tx_type(),
success: true,
cumulative_gas_used: (idx as u64 + 1) * MIN_TRANSACTION_GAS,
..Default::default()
}
.with_bloom()
})
.collect::<Vec<_>>();
let receipts_root = calculate_receipt_root(&receipts);
let header = Header {
number,
parent_hash: parent.unwrap_or_default(),
gas_used: body.len() as u64 * MIN_TRANSACTION_GAS,
gas_limit: chain_spec.max_gas_limit,
mix_hash: B256::random(),
base_fee_per_gas: Some(INITIAL_BASE_FEE),
transactions_root,
receipts_root,
state_root: state_root_unhashed(HashMap::from([(
signer,
(
AccountInfo {
balance: initial_signer_balance -
(single_tx_cost * U256::from(num_of_signer_txs)),
nonce: num_of_signer_txs,
..Default::default()
},
EMPTY_ROOT_HASH,
),
)])),
..Default::default()
};
SealedBlockWithSenders::new(
SealedBlock {
header: SealedHeader::seal(header),
body: BlockBody {
transactions: body.clone().into_iter().map(|tx| tx.into_signed()).collect(),
ommers: Vec::new(),
withdrawals: Some(Withdrawals::default()),
},
},
body.iter().map(|tx| tx.signer()).collect(),
)
.unwrap()
};
let fork_block = mock_block(1, Some(chain_spec.genesis_hash()), Vec::from([mock_tx(0)]), 1);
let canonical_block_1 =
mock_block(2, Some(fork_block.hash()), Vec::from([mock_tx(1), mock_tx(2)]), 3);
let canonical_block_2 = mock_block(3, Some(canonical_block_1.hash()), Vec::new(), 3);
let canonical_block_3 =
mock_block(4, Some(canonical_block_2.hash()), Vec::from([mock_tx(3)]), 4);
let sidechain_block_1 = mock_block(2, Some(fork_block.hash()), Vec::from([mock_tx(1)]), 2);
let sidechain_block_2 =
mock_block(3, Some(sidechain_block_1.hash()), Vec::from([mock_tx(2)]), 3);
let mut tree = BlockchainTree::new(
TreeExternals::new(provider_factory, consensus, executor_provider),
BlockchainTreeConfig::default(),
)
.expect("failed to create tree");
tree.insert_block(fork_block.clone(), BlockValidationKind::Exhaustive).unwrap();
assert_eq!(
tree.make_canonical(fork_block.hash()).unwrap(),
CanonicalOutcome::Committed { head: fork_block.header.clone() }
);
assert_eq!(
tree.insert_block(canonical_block_1.clone(), BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical))
);
assert_eq!(
tree.make_canonical(canonical_block_1.hash()).unwrap(),
CanonicalOutcome::Committed { head: canonical_block_1.header.clone() }
);
assert_eq!(
tree.insert_block(canonical_block_2, BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical))
);
assert_eq!(
tree.insert_block(sidechain_block_1.clone(), BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork))
);
assert_eq!(
tree.make_canonical(sidechain_block_1.hash()).unwrap(),
CanonicalOutcome::Committed { head: sidechain_block_1.header.clone() }
);
assert_eq!(
tree.make_canonical(canonical_block_1.hash()).unwrap(),
CanonicalOutcome::Committed { head: canonical_block_1.header.clone() }
);
assert_eq!(
tree.insert_block(sidechain_block_2.clone(), BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork))
);
assert_eq!(
tree.make_canonical(sidechain_block_2.hash()).unwrap(),
CanonicalOutcome::Committed { head: sidechain_block_2.header.clone() }
);
assert_eq!(
tree.insert_block(canonical_block_3.clone(), BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork))
);
assert_eq!(
tree.make_canonical(canonical_block_3.hash()).unwrap(),
CanonicalOutcome::Committed { head: canonical_block_3.header.clone() }
);
}
#[test]
fn sidechain_block_hashes() {
let data = BlockchainTestData::default_from_number(11);
let (block1, exec1) = data.blocks[0].clone();
let (block2, exec2) = data.blocks[1].clone();
let (block3, exec3) = data.blocks[2].clone();
let (block4, exec4) = data.blocks[3].clone();
let genesis = data.genesis;
let externals =
setup_externals(vec![exec3.clone(), exec2.clone(), exec4, exec3, exec2, exec1]);
setup_genesis(&externals.provider_factory, genesis);
let config = BlockchainTreeConfig::new(1, 2, 3, 2);
let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree");
tree.make_canonical(B256::ZERO).unwrap();
tree.finalize_block(10).unwrap();
assert_eq!(
tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical))
);
assert_eq!(
tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical))
);
assert_eq!(
tree.insert_block(block3.clone(), BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical))
);
assert_eq!(
tree.insert_block(block4, BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical))
);
let mut block2a = block2;
let block2a_hash = B256::new([0x34; 32]);
block2a.set_hash(block2a_hash);
assert_eq!(
tree.insert_block(block2a.clone(), BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork))
);
let mut block3a = block3;
let block3a_hash = B256::new([0x35; 32]);
block3a.set_hash(block3a_hash);
block3a.set_parent_hash(block2a.hash());
assert_eq!(
tree.insert_block(block3a.clone(), BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical)) );
let block3a_chain_id = tree.state.block_indices.get_side_chain_id(&block3a.hash()).unwrap();
assert_eq!(
tree.all_chain_hashes(block3a_chain_id),
BTreeMap::from([
(block1.number, block1.hash()),
(block2a.number, block2a.hash()),
(block3a.number, block3a.hash()),
])
);
}
#[test]
fn cached_trie_updates() {
let data = BlockchainTestData::default_from_number(11);
let (block1, exec1) = data.blocks[0].clone();
let (block2, exec2) = data.blocks[1].clone();
let (block3, exec3) = data.blocks[2].clone();
let (block4, exec4) = data.blocks[3].clone();
let (block5, exec5) = data.blocks[4].clone();
let genesis = data.genesis;
let externals = setup_externals(vec![exec5.clone(), exec4, exec3, exec2, exec1]);
setup_genesis(&externals.provider_factory, genesis);
let config = BlockchainTreeConfig::new(1, 2, 3, 2);
let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree");
tree.make_canonical(B256::ZERO).unwrap();
tree.finalize_block(10).unwrap();
assert_eq!(
tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical))
);
let block1_chain_id = tree.state.block_indices.get_side_chain_id(&block1.hash()).unwrap();
let block1_chain = tree.state.chains.get(&block1_chain_id).unwrap();
assert!(block1_chain.trie_updates().is_some());
assert_eq!(
tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical))
);
let block2_chain_id = tree.state.block_indices.get_side_chain_id(&block2.hash()).unwrap();
let block2_chain = tree.state.chains.get(&block2_chain_id).unwrap();
assert!(block2_chain.trie_updates().is_none());
assert_eq!(
tree.make_canonical(block2.hash()).unwrap(),
CanonicalOutcome::Committed { head: block2.header.clone() }
);
assert_eq!(
tree.insert_block(block3.clone(), BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical))
);
let block3_chain_id = tree.state.block_indices.get_side_chain_id(&block3.hash()).unwrap();
let block3_chain = tree.state.chains.get(&block3_chain_id).unwrap();
assert!(block3_chain.trie_updates().is_some());
assert_eq!(
tree.make_canonical(block3.hash()).unwrap(),
CanonicalOutcome::Committed { head: block3.header.clone() }
);
assert_eq!(
tree.insert_block(block4.clone(), BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical))
);
let block4_chain_id = tree.state.block_indices.get_side_chain_id(&block4.hash()).unwrap();
let block4_chain = tree.state.chains.get(&block4_chain_id).unwrap();
assert!(block4_chain.trie_updates().is_some());
assert_eq!(
tree.insert_block(block5.clone(), BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical))
);
let block5_chain_id = tree.state.block_indices.get_side_chain_id(&block5.hash()).unwrap();
let block5_chain = tree.state.chains.get(&block5_chain_id).unwrap();
assert!(block5_chain.trie_updates().is_none());
assert_eq!(
tree.make_canonical(block5.hash()).unwrap(),
CanonicalOutcome::Committed { head: block5.header.clone() }
);
let provider = tree.externals.provider_factory.provider().unwrap();
let prefix_sets = exec5.hash_state_slow().construct_prefix_sets().freeze();
let state_root =
StateRoot::from_tx(provider.tx_ref()).with_prefix_sets(prefix_sets).root().unwrap();
assert_eq!(state_root, block5.state_root);
}
#[test]
fn test_side_chain_fork() {
let data = BlockchainTestData::default_from_number(11);
let (block1, exec1) = data.blocks[0].clone();
let (block2, exec2) = data.blocks[1].clone();
let genesis = data.genesis;
let externals = setup_externals(vec![exec2.clone(), exec2, exec1]);
setup_genesis(&externals.provider_factory, genesis);
let config = BlockchainTreeConfig::new(1, 2, 3, 2);
let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree");
tree.make_canonical(B256::ZERO).unwrap();
tree.finalize_block(10).unwrap();
assert_eq!(
tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical))
);
assert_eq!(
tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical))
);
TreeTester::default()
.with_chain_num(1)
.with_block_to_chain(HashMap::from([
(block1.hash(), 0.into()),
(block2.hash(), 0.into()),
]))
.with_fork_to_child(HashMap::from([(
block1.parent_hash,
HashSet::from([block1.hash()]),
)]))
.assert(&tree);
let mut block2a = block2.clone();
let block2a_hash = B256::new([0x34; 32]);
block2a.set_hash(block2a_hash);
assert_eq!(
tree.insert_block(block2a.clone(), BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork))
);
TreeTester::default()
.with_chain_num(2)
.with_block_to_chain(HashMap::from([
(block1.hash(), 0.into()),
(block2.hash(), 0.into()),
(block2a.hash(), 1.into()),
]))
.with_fork_to_child(HashMap::from([
(block1.parent_hash, HashSet::from([block1.hash()])),
(block2a.parent_hash, HashSet::from([block2a.hash()])),
]))
.assert(&tree);
let chain0 = tree.state.chains.get(&0.into()).unwrap().execution_outcome();
assert_eq!(chain0.receipts().len(), 2);
assert_eq!(chain0.state().reverts.len(), 2);
assert_eq!(chain0.first_block(), block1.number);
let chain1 = tree.state.chains.get(&1.into()).unwrap().execution_outcome();
assert_eq!(chain1.receipts().len(), 1);
assert_eq!(chain1.state().reverts.len(), 1);
assert_eq!(chain1.first_block(), block2.number);
}
#[test]
fn sanity_path() {
let data = BlockchainTestData::default_from_number(11);
let (block1, exec1) = data.blocks[0].clone();
let (block2, exec2) = data.blocks[1].clone();
let genesis = data.genesis;
let externals = setup_externals(vec![exec2.clone(), exec1.clone(), exec2, exec1]);
setup_genesis(&externals.provider_factory, genesis);
let config = BlockchainTreeConfig::new(1, 2, 3, 2);
let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree");
let mut canon_notif = tree.subscribe_canon_state();
let head = BlockNumHash::new(10, B256::ZERO);
tree.make_canonical(head.hash).unwrap();
tree.is_block_hash_canonical(&B256::ZERO).unwrap();
tree.finalize_block(head.number).unwrap();
assert_eq!(
tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Disconnected {
head,
missing_ancestor: block2.parent_num_hash()
})
);
TreeTester::default()
.with_buffered_blocks(HashMap::from([(block2.hash(), block2.clone())]))
.assert(&tree);
assert_eq!(
tree.is_block_known(block2.num_hash()).unwrap(),
Some(BlockStatus::Disconnected { head, missing_ancestor: block2.parent_num_hash() })
);
let old_block = BlockNumHash::new(1, B256::new([32; 32]));
let err = BlockchainTreeError::PendingBlockIsFinalized { last_finalized: 10 };
assert_eq!(tree.is_block_known(old_block).unwrap_err().as_tree_error(), Some(err));
assert_eq!(
tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical))
);
TreeTester::default()
.with_chain_num(1)
.with_block_to_chain(HashMap::from([
(block1.hash(), 0.into()),
(block2.hash(), 0.into()),
]))
.with_fork_to_child(HashMap::from([(
block1.parent_hash,
HashSet::from([block1.hash()]),
)]))
.with_pending_blocks((block1.number, HashSet::from([block1.hash()])))
.assert(&tree);
assert_eq!(
tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::AlreadySeen(BlockStatus::Valid(BlockAttachment::Canonical))
);
assert_eq!(
tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::AlreadySeen(BlockStatus::Valid(BlockAttachment::Canonical))
);
tree.make_canonical(block1.hash()).unwrap();
assert_matches!(canon_notif.try_recv(), Ok(CanonStateNotification::Commit{ new}) if *new.blocks() == BTreeMap::from([(block1.number,block1.clone())]));
tree.make_canonical(block2.hash()).unwrap();
assert_matches!(canon_notif.try_recv(), Ok(CanonStateNotification::Commit{ new}) if *new.blocks() == BTreeMap::from([(block2.number,block2.clone())]));
TreeTester::default()
.with_chain_num(0)
.with_block_to_chain(HashMap::from([]))
.with_fork_to_child(HashMap::from([]))
.assert(&tree);
let mut block1a = block1.clone();
let block1a_hash = B256::new([0x33; 32]);
block1a.set_hash(block1a_hash);
let mut block2a = block2.clone();
let block2a_hash = B256::new([0x34; 32]);
block2a.set_hash(block2a_hash);
assert_eq!(
tree.insert_block(block1a.clone(), BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork))
);
TreeTester::default()
.with_chain_num(1)
.with_block_to_chain(HashMap::from([(block1a_hash, 1.into())]))
.with_fork_to_child(HashMap::from([(
block1.parent_hash,
HashSet::from([block1a_hash]),
)]))
.with_pending_blocks((block2.number + 1, HashSet::from([])))
.assert(&tree);
assert_eq!(
tree.insert_block(block2a.clone(), BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork))
);
TreeTester::default()
.with_chain_num(2)
.with_block_to_chain(HashMap::from([
(block1a_hash, 1.into()),
(block2a_hash, 2.into()),
]))
.with_fork_to_child(HashMap::from([
(block1.parent_hash, HashSet::from([block1a_hash])),
(block1.hash(), HashSet::from([block2a_hash])),
]))
.with_pending_blocks((block2.number + 1, HashSet::from([])))
.assert(&tree);
assert!(tree.make_canonical(block2a_hash).is_ok());
assert_matches!(canon_notif.try_recv(),
Ok(CanonStateNotification::Reorg{ old, new})
if *old.blocks() == BTreeMap::from([(block2.number,block2.clone())])
&& *new.blocks() == BTreeMap::from([(block2a.number,block2a.clone())]));
TreeTester::default()
.with_chain_num(2)
.with_block_to_chain(HashMap::from([
(block1a_hash, 1.into()),
(block2.hash(), 3.into()),
]))
.with_fork_to_child(HashMap::from([
(block1.parent_hash, HashSet::from([block1a_hash])),
(block1.hash(), HashSet::from([block2.hash()])),
]))
.with_pending_blocks((block2.number + 1, HashSet::default()))
.assert(&tree);
assert_matches!(tree.make_canonical(block1a_hash), Ok(_));
TreeTester::default()
.with_chain_num(2)
.with_block_to_chain(HashMap::from([
(block1.hash(), 4.into()),
(block2a_hash, 4.into()),
(block2.hash(), 3.into()),
]))
.with_fork_to_child(HashMap::from([
(block1.parent_hash, HashSet::from([block1.hash()])),
(block1.hash(), HashSet::from([block2.hash()])),
]))
.with_pending_blocks((block1a.number + 1, HashSet::default()))
.assert(&tree);
assert_matches!(canon_notif.try_recv(),
Ok(CanonStateNotification::Reorg{ old, new})
if *old.blocks() == BTreeMap::from([(block1.number,block1.clone()),(block2a.number,block2a.clone())])
&& *new.blocks() == BTreeMap::from([(block1a.number,block1a.clone())]));
assert!(!tree.is_block_hash_canonical(&block2.hash()).unwrap());
assert!(!tree.is_block_hash_canonical(&block1.hash()).unwrap());
assert!(tree.is_block_hash_canonical(&block1a.hash()).unwrap());
tree.make_canonical(block2.hash()).unwrap();
TreeTester::default()
.with_chain_num(2)
.with_block_to_chain(HashMap::from([
(block1a_hash, 5.into()),
(block2a_hash, 4.into()),
]))
.with_fork_to_child(HashMap::from([
(block1.parent_hash, HashSet::from([block1a_hash])),
(block1.hash(), HashSet::from([block2a_hash])),
]))
.with_pending_blocks((block2.number + 1, HashSet::default()))
.assert(&tree);
assert_matches!(canon_notif.try_recv(),
Ok(CanonStateNotification::Reorg{ old, new})
if *old.blocks() == BTreeMap::from([(block1a.number,block1a.clone())])
&& *new.blocks() == BTreeMap::from([(block1.number,block1.clone()),(block2.number,block2.clone())]));
assert!(tree.is_block_hash_canonical(&block2.hash()).unwrap());
tree.finalize_block(11).unwrap();
TreeTester::default()
.with_chain_num(1)
.with_block_to_chain(HashMap::from([(block2a_hash, 4.into())]))
.with_fork_to_child(HashMap::from([(block1.hash(), HashSet::from([block2a_hash]))]))
.with_pending_blocks((block2.number + 1, HashSet::from([])))
.assert(&tree);
assert!(tree.unwind(block1.number).is_ok());
TreeTester::default()
.with_chain_num(2)
.with_block_to_chain(HashMap::from([
(block2a_hash, 4.into()),
(block2.hash(), 6.into()),
]))
.with_fork_to_child(HashMap::from([(
block1.hash(),
HashSet::from([block2a_hash, block2.hash()]),
)]))
.with_pending_blocks((block2.number, HashSet::from([block2.hash(), block2a.hash()])))
.assert(&tree);
tree.make_canonical(block2.hash()).unwrap();
TreeTester::default()
.with_chain_num(1)
.with_block_to_chain(HashMap::from([(block2a_hash, 4.into())]))
.with_fork_to_child(HashMap::from([(block1.hash(), HashSet::from([block2a_hash]))]))
.with_pending_blocks((block2.number + 1, HashSet::default()))
.assert(&tree);
assert_matches!(canon_notif.try_recv(),
Ok(CanonStateNotification::Commit{ new })
if *new.blocks() == BTreeMap::from([(block2.number,block2.clone())]));
let mut block2b = block2a.clone();
block2b.set_hash(B256::new([0x99; 32]));
block2b.set_parent_hash(B256::new([0x88; 32]));
assert_eq!(
tree.insert_block(block2b.clone(), BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Disconnected {
head: block2.header.num_hash(),
missing_ancestor: block2b.parent_num_hash()
})
);
TreeTester::default()
.with_buffered_blocks(HashMap::from([(block2b.hash(), block2b.clone())]))
.assert(&tree);
assert!(tree.connect_buffered_blocks_to_canonical_hashes_and_finalize(12).is_ok());
assert_eq!(
tree.is_block_known(block2.num_hash()).unwrap(),
Some(BlockStatus::Valid(BlockAttachment::Canonical))
);
TreeTester::default()
.with_chain_num(0)
.with_block_to_chain(HashMap::default())
.with_fork_to_child(HashMap::default())
.with_pending_blocks((block2.number + 1, HashSet::default()))
.with_buffered_blocks(HashMap::default())
.assert(&tree);
}
#[test]
fn last_finalized_block_initialization() {
let data = BlockchainTestData::default_from_number(11);
let (block1, exec1) = data.blocks[0].clone();
let (block2, exec2) = data.blocks[1].clone();
let (block3, exec3) = data.blocks[2].clone();
let genesis = data.genesis;
let externals =
setup_externals(vec![exec3.clone(), exec2.clone(), exec1.clone(), exec3, exec2, exec1]);
let cloned_externals_1 = TreeExternals {
provider_factory: externals.provider_factory.clone(),
executor_factory: externals.executor_factory.clone(),
consensus: externals.consensus.clone(),
};
let cloned_externals_2 = TreeExternals {
provider_factory: externals.provider_factory.clone(),
executor_factory: externals.executor_factory.clone(),
consensus: externals.consensus.clone(),
};
setup_genesis(&externals.provider_factory, genesis);
let config = BlockchainTreeConfig::new(1, 2, 3, 2);
let mut tree = BlockchainTree::new(externals, config).expect("failed to create tree");
assert_eq!(
tree.insert_block(block1.clone(), BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical))
);
assert_eq!(
tree.insert_block(block2.clone(), BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical))
);
assert_eq!(
tree.insert_block(block3, BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::Canonical))
);
tree.make_canonical(block2.hash()).unwrap();
let mut tree =
BlockchainTree::new(cloned_externals_1, config).expect("failed to create tree");
assert_eq!(tree.block_indices().last_finalized_block(), 0);
let mut block1a = block1;
let block1a_hash = B256::new([0x33; 32]);
block1a.set_hash(block1a_hash);
assert_eq!(
tree.insert_block(block1a.clone(), BlockValidationKind::Exhaustive).unwrap(),
InsertPayloadOk::Inserted(BlockStatus::Valid(BlockAttachment::HistoricalFork))
);
tree.make_canonical(block1a.hash()).unwrap();
tree.finalize_block(block1a.number).unwrap();
let tree = BlockchainTree::new(cloned_externals_2, config).expect("failed to create tree");
assert_eq!(tree.block_indices().last_finalized_block(), block1a.number);
}
}