Skip to main content

reth_engine_tree/tree/payload_processor/
multiproof.rs

1//! Multiproof task related functionality.
2
3use metrics::{Gauge, Histogram};
4use reth_metrics::Metrics;
5
6pub use reth_trie_parallel::state_root_task::{
7    evm_state_to_hashed_post_state, Source, StateHookSender, StateRootComputeOutcome,
8    StateRootHandle, StateRootMessage,
9};
10
11/// The default max targets, for limiting the number of account and storage proof targets to be
12/// fetched by a single worker. If exceeded, chunking is forced regardless of worker availability.
13pub(crate) const DEFAULT_MAX_TARGETS_FOR_CHUNKING: usize = 300;
14
15#[derive(Metrics, Clone)]
16#[metrics(scope = "tree.root")]
17pub(crate) struct MultiProofTaskMetrics {
18    /// Histogram of durations spent revealing multiproof results into the sparse trie.
19    pub sparse_trie_reveal_multiproof_duration_histogram: Histogram,
20    /// Histogram of durations spent coalescing multiple proof results from the channel.
21    pub sparse_trie_proof_coalesce_duration_histogram: Histogram,
22    /// Histogram of durations the event loop spent blocked waiting on channels.
23    pub sparse_trie_channel_wait_duration_histogram: Histogram,
24    /// Histogram of durations spent processing trie updates and promoting pending accounts.
25    pub sparse_trie_process_updates_duration_histogram: Histogram,
26    /// Histogram of sparse trie final update durations.
27    pub sparse_trie_final_update_duration_histogram: Histogram,
28    /// Histogram of sparse trie total durations.
29    pub sparse_trie_total_duration_histogram: Histogram,
30    /// Time spent preparing the sparse trie for reuse after state root computation.
31    pub into_trie_for_reuse_duration_histogram: Histogram,
32    /// Time spent waiting for preserved sparse trie cache to become available.
33    pub sparse_trie_cache_wait_duration_histogram: Histogram,
34    /// Histogram for sparse trie task idle time in seconds (waiting for updates or proof
35    /// results). Excludes the final wait after the channel is closed.
36    pub sparse_trie_idle_time_seconds: Histogram,
37    /// Histogram for hashing task idle time in seconds (waiting for messages from execution).
38    /// Excludes the final wait after the channel is closed.
39    pub hashing_task_idle_time_seconds: Histogram,
40
41    /// Number of account leaf updates applied without needing a new proof (cache hits).
42    pub sparse_trie_account_cache_hits: Histogram,
43    /// Number of account leaf updates that required a new proof (cache misses).
44    pub sparse_trie_account_cache_misses: Histogram,
45    /// Number of storage leaf updates applied without needing a new proof (cache hits).
46    pub sparse_trie_storage_cache_hits: Histogram,
47    /// Number of storage leaf updates that required a new proof (cache misses).
48    pub sparse_trie_storage_cache_misses: Histogram,
49
50    /// Retained memory of the preserved sparse trie cache in bytes.
51    pub sparse_trie_retained_memory_bytes: Gauge,
52    /// Number of storage tries retained in the preserved sparse trie cache.
53    pub sparse_trie_retained_storage_tries: Gauge,
54}
55
56/// Dispatches work items as a single unit or in chunks based on target size and worker
57/// availability.
58#[expect(clippy::too_many_arguments)]
59pub(crate) fn dispatch_with_chunking<T, I>(
60    items: T,
61    chunking_len: usize,
62    chunk_size: usize,
63    max_targets_for_chunking: usize,
64    has_multiple_idle_account_workers: bool,
65    has_multiple_idle_storage_workers: bool,
66    chunker: impl FnOnce(T, usize) -> I,
67    mut dispatch: impl FnMut(T),
68) where
69    I: IntoIterator<Item = T>,
70{
71    let should_chunk = chunking_len > max_targets_for_chunking ||
72        has_multiple_idle_account_workers ||
73        has_multiple_idle_storage_workers;
74
75    if should_chunk && chunking_len > chunk_size {
76        for chunk in chunker(items, chunk_size) {
77            dispatch(chunk);
78        }
79        return;
80    }
81
82    dispatch(items);
83}