reth_trie_db/
changesets.rs

1//! Trie changeset computation and caching utilities.
2//!
3//! This module provides functionality to compute trie changesets for a given block,
4//! which represent the old trie node values before the block was processed.
5//!
6//! It also provides an efficient in-memory cache for these changesets, which is essential for:
7//! - **Reorg support**: Quickly access changesets to revert blocks during chain reorganizations
8//! - **Memory efficiency**: Automatic eviction ensures bounded memory usage
9
10use crate::{DatabaseHashedPostState, DatabaseStateRoot, DatabaseTrieCursorFactory};
11use alloy_primitives::{map::B256Map, BlockNumber, B256};
12use parking_lot::RwLock;
13use reth_storage_api::{
14    BlockNumReader, ChangeSetReader, DBProvider, StageCheckpointReader, StorageChangeSetReader,
15};
16use reth_storage_errors::provider::{ProviderError, ProviderResult};
17use reth_trie::{
18    changesets::compute_trie_changesets,
19    trie_cursor::{InMemoryTrieCursorFactory, TrieCursor, TrieCursorFactory},
20    HashedPostStateSorted, KeccakKeyHasher, StateRoot, TrieInputSorted,
21};
22use reth_trie_common::updates::{StorageTrieUpdatesSorted, TrieUpdatesSorted};
23use std::{
24    collections::{BTreeMap, HashMap},
25    ops::RangeInclusive,
26    sync::Arc,
27    time::Instant,
28};
29use tracing::debug;
30
31#[cfg(feature = "metrics")]
32use reth_metrics::{
33    metrics::{Counter, Gauge},
34    Metrics,
35};
36
37/// Computes trie changesets for a block.
38///
39/// # Algorithm
40///
41/// For block N:
42/// 1. Query cumulative `HashedPostState` revert for block N-1 (from db tip to after N-1)
43/// 2. Use that to calculate cumulative `TrieUpdates` revert for block N-1
44/// 3. Query per-block `HashedPostState` revert for block N
45/// 4. Create prefix sets from the per-block revert (step 3)
46/// 5. Create overlay with cumulative trie updates and cumulative state revert for N-1
47/// 6. Calculate trie updates for block N using the overlay and per-block `HashedPostState`.
48/// 7. Compute changesets using the N-1 overlay and the newly calculated trie updates for N
49///
50/// # Arguments
51///
52/// * `provider` - Database provider with changeset access
53/// * `block_number` - Block number to compute changesets for
54///
55/// # Returns
56///
57/// Changesets (old trie node values) for the specified block
58///
59/// # Errors
60///
61/// Returns error if:
62/// - Block number exceeds database tip (based on Finish stage checkpoint)
63/// - Database access fails
64/// - State root computation fails
65pub fn compute_block_trie_changesets<Provider>(
66    provider: &Provider,
67    block_number: BlockNumber,
68) -> Result<TrieUpdatesSorted, ProviderError>
69where
70    Provider: DBProvider
71        + StageCheckpointReader
72        + ChangeSetReader
73        + StorageChangeSetReader
74        + BlockNumReader,
75{
76    debug!(
77        target: "trie::changeset_cache",
78        block_number,
79        "Computing block trie changesets from database state"
80    );
81
82    // Step 1: Collect/calculate state reverts
83
84    // This is just the changes from this specific block
85    let individual_state_revert = HashedPostStateSorted::from_reverts::<KeccakKeyHasher>(
86        provider,
87        block_number..=block_number,
88    )?;
89
90    // This reverts all changes from db tip back to just after block was processed
91    let cumulative_state_revert =
92        HashedPostStateSorted::from_reverts::<KeccakKeyHasher>(provider, (block_number + 1)..)?;
93
94    // This reverts all changes from db tip back to just after block-1 was processed
95    let mut cumulative_state_revert_prev = cumulative_state_revert.clone();
96    cumulative_state_revert_prev.extend_ref_and_sort(&individual_state_revert);
97
98    // Step 2: Calculate cumulative trie updates revert for block-1
99    // This gives us the trie state as it was after block-1 was processed
100    let prefix_sets_prev = cumulative_state_revert_prev.construct_prefix_sets();
101    let input_prev = TrieInputSorted::new(
102        Arc::default(),
103        Arc::new(cumulative_state_revert_prev),
104        prefix_sets_prev,
105    );
106
107    let cumulative_trie_updates_prev =
108        StateRoot::overlay_root_from_nodes_with_updates(provider.tx_ref(), input_prev)
109            .map_err(ProviderError::other)?
110            .1
111            .into_sorted();
112
113    // Step 2: Create prefix sets from individual revert (only paths changed by this block)
114    let prefix_sets = individual_state_revert.construct_prefix_sets();
115
116    // Step 3: Calculate trie updates for block
117    // Use cumulative trie updates for block-1 as the node overlay and cumulative state for block
118    let input = TrieInputSorted::new(
119        Arc::new(cumulative_trie_updates_prev.clone()),
120        Arc::new(cumulative_state_revert),
121        prefix_sets,
122    );
123
124    let trie_updates = StateRoot::overlay_root_from_nodes_with_updates(provider.tx_ref(), input)
125        .map_err(ProviderError::other)?
126        .1
127        .into_sorted();
128
129    // Step 4: Compute changesets using cumulative trie updates for block-1 as overlay
130    // Create an overlay cursor factory that has the trie state from after block-1
131    let db_cursor_factory = DatabaseTrieCursorFactory::new(provider.tx_ref());
132    let overlay_factory =
133        InMemoryTrieCursorFactory::new(db_cursor_factory, &cumulative_trie_updates_prev);
134
135    let changesets =
136        compute_trie_changesets(&overlay_factory, &trie_updates).map_err(ProviderError::other)?;
137
138    debug!(
139        target: "trie::changeset_cache",
140        block_number,
141        num_account_nodes = changesets.account_nodes_ref().len(),
142        num_storage_tries = changesets.storage_tries_ref().len(),
143        "Computed block trie changesets successfully"
144    );
145
146    Ok(changesets)
147}
148
149/// Computes block trie updates using the changeset cache.
150///
151/// # Algorithm
152///
153/// For block N:
154/// 1. Get cumulative trie reverts from block N+1 to db tip using the cache
155/// 2. Create an overlay cursor factory with these reverts (representing trie state after block N)
156/// 3. Walk through account trie changesets for block N
157/// 4. For each changed path, look up the current value using the overlay cursor
158/// 5. Walk through storage trie changesets for block N
159/// 6. For each changed path, look up the current value using the overlay cursor
160/// 7. Return the collected trie updates
161///
162/// # Arguments
163///
164/// * `cache` - Handle to the changeset cache for retrieving trie reverts
165/// * `provider` - Database provider for accessing changesets and block data
166/// * `block_number` - Block number to compute trie updates for
167///
168/// # Returns
169///
170/// Trie updates representing the state of trie nodes after the block was processed
171///
172/// # Errors
173///
174/// Returns error if:
175/// - Block number exceeds database tip
176/// - Database access fails
177/// - Cache retrieval fails
178pub fn compute_block_trie_updates<Provider>(
179    cache: &ChangesetCache,
180    provider: &Provider,
181    block_number: BlockNumber,
182) -> ProviderResult<TrieUpdatesSorted>
183where
184    Provider: DBProvider
185        + StageCheckpointReader
186        + ChangeSetReader
187        + StorageChangeSetReader
188        + BlockNumReader,
189{
190    let tx = provider.tx_ref();
191
192    // Get the database tip block number
193    let db_tip_block = provider
194        .get_stage_checkpoint(reth_stages_types::StageId::Finish)?
195        .as_ref()
196        .map(|chk| chk.block_number)
197        .ok_or_else(|| ProviderError::InsufficientChangesets {
198            requested: block_number,
199            available: 0..=0,
200        })?;
201
202    // Step 1: Get the block hash for the target block
203    let block_hash = provider.block_hash(block_number)?.ok_or_else(|| {
204        ProviderError::other(std::io::Error::new(
205            std::io::ErrorKind::NotFound,
206            format!("block hash not found for block number {}", block_number),
207        ))
208    })?;
209
210    // Step 2: Get the trie changesets for the target block from cache
211    let changesets = cache.get_or_compute(block_hash, block_number, provider)?;
212
213    // Step 3: Get the trie reverts for the state after the target block using the cache
214    let reverts = cache.get_or_compute_range(provider, (block_number + 1)..=db_tip_block)?;
215
216    // Step 4: Create an InMemoryTrieCursorFactory with the reverts
217    // This gives us the trie state as it was after the target block was processed
218    let db_cursor_factory = DatabaseTrieCursorFactory::new(tx);
219    let cursor_factory = InMemoryTrieCursorFactory::new(db_cursor_factory, &reverts);
220
221    // Step 5: Collect all account trie nodes that changed in the target block
222    let mut account_nodes = Vec::new();
223    let mut account_cursor = cursor_factory.account_trie_cursor()?;
224
225    // Iterate over the account nodes from the changesets
226    for (nibbles, _old_node) in changesets.account_nodes_ref() {
227        // Look up the current value of this trie node using the overlay cursor
228        let node_value = account_cursor.seek_exact(*nibbles)?.map(|(_, node)| node);
229        account_nodes.push((*nibbles, node_value));
230    }
231
232    // Step 6: Collect all storage trie nodes that changed in the target block
233    let mut storage_tries = B256Map::default();
234
235    // Iterate over the storage tries from the changesets
236    for (hashed_address, storage_changeset) in changesets.storage_tries_ref() {
237        let mut storage_cursor = cursor_factory.storage_trie_cursor(*hashed_address)?;
238        let mut storage_nodes = Vec::new();
239
240        // Iterate over the storage nodes for this account
241        for (nibbles, _old_node) in storage_changeset.storage_nodes_ref() {
242            // Look up the current value of this storage trie node
243            let node_value = storage_cursor.seek_exact(*nibbles)?.map(|(_, node)| node);
244            storage_nodes.push((*nibbles, node_value));
245        }
246
247        storage_tries.insert(
248            *hashed_address,
249            StorageTrieUpdatesSorted { storage_nodes, is_deleted: storage_changeset.is_deleted },
250        );
251    }
252
253    Ok(TrieUpdatesSorted::new(account_nodes, storage_tries))
254}
255
256/// Thread-safe changeset cache.
257///
258/// This type wraps a shared, mutable reference to the cache inner.
259/// The `RwLock` enables concurrent reads while ensuring exclusive access for writes.
260#[derive(Debug, Clone)]
261pub struct ChangesetCache {
262    inner: Arc<RwLock<ChangesetCacheInner>>,
263}
264
265impl Default for ChangesetCache {
266    fn default() -> Self {
267        Self::new()
268    }
269}
270
271impl ChangesetCache {
272    /// Creates a new cache.
273    ///
274    /// The cache has no capacity limit and relies on explicit eviction
275    /// via the `evict()` method to manage memory usage.
276    pub fn new() -> Self {
277        Self { inner: Arc::new(RwLock::new(ChangesetCacheInner::new())) }
278    }
279
280    /// Retrieves changesets for a block by hash.
281    ///
282    /// Returns `None` if the block is not in the cache (either evicted or never computed).
283    /// Updates hit/miss metrics accordingly.
284    pub fn get(&self, block_hash: &B256) -> Option<Arc<TrieUpdatesSorted>> {
285        self.inner.read().get(block_hash)
286    }
287
288    /// Inserts changesets for a block into the cache.
289    ///
290    /// This method does not perform any eviction. Eviction must be explicitly
291    /// triggered by calling `evict()`.
292    ///
293    /// # Arguments
294    ///
295    /// * `block_hash` - Hash of the block
296    /// * `block_number` - Block number for tracking and eviction
297    /// * `changesets` - Trie changesets to cache
298    pub fn insert(&self, block_hash: B256, block_number: u64, changesets: Arc<TrieUpdatesSorted>) {
299        self.inner.write().insert(block_hash, block_number, changesets)
300    }
301
302    /// Evicts changesets for blocks below the given block number.
303    ///
304    /// This should be called after blocks are persisted to the database to free
305    /// memory for changesets that are no longer needed in the cache.
306    ///
307    /// # Arguments
308    ///
309    /// * `up_to_block` - Evict blocks with number < this value. Blocks with number >= this value
310    ///   are retained.
311    pub fn evict(&self, up_to_block: BlockNumber) {
312        self.inner.write().evict(up_to_block)
313    }
314
315    /// Gets changesets from cache, or computes them on-the-fly if missing.
316    ///
317    /// This is the primary API for retrieving changesets. On cache miss,
318    /// it computes changesets from the database state and populates the cache.
319    ///
320    /// # Arguments
321    ///
322    /// * `block_hash` - Hash of the block to get changesets for
323    /// * `block_number` - Block number (for cache insertion and logging)
324    /// * `provider` - Database provider for DB access
325    ///
326    /// # Returns
327    ///
328    /// Changesets for the block, either from cache or computed on-the-fly
329    pub fn get_or_compute<P>(
330        &self,
331        block_hash: B256,
332        block_number: u64,
333        provider: &P,
334    ) -> ProviderResult<Arc<TrieUpdatesSorted>>
335    where
336        P: DBProvider
337            + StageCheckpointReader
338            + ChangeSetReader
339            + StorageChangeSetReader
340            + BlockNumReader,
341    {
342        // Try cache first (with read lock)
343        {
344            let cache = self.inner.read();
345            if let Some(changesets) = cache.get(&block_hash) {
346                debug!(
347                    target: "trie::changeset_cache",
348                    ?block_hash,
349                    block_number,
350                    "Changeset cache HIT"
351                );
352                return Ok(changesets);
353            }
354        }
355
356        // Cache miss - compute from database
357        debug!(
358            target: "trie::changeset_cache",
359            ?block_hash,
360            block_number,
361            "Changeset cache MISS, computing from database"
362        );
363
364        let start = Instant::now();
365
366        // Compute changesets
367        let changesets =
368            compute_block_trie_changesets(provider, block_number).map_err(ProviderError::other)?;
369
370        let changesets = Arc::new(changesets);
371        let elapsed = start.elapsed();
372
373        debug!(
374            target: "trie::changeset_cache",
375            ?elapsed,
376            block_number,
377            ?block_hash,
378            "Changeset computed from database and inserting into cache"
379        );
380
381        // Store in cache (with write lock)
382        {
383            let mut cache = self.inner.write();
384            cache.insert(block_hash, block_number, Arc::clone(&changesets));
385        }
386
387        debug!(
388            target: "trie::changeset_cache",
389            ?block_hash,
390            block_number,
391            "Changeset successfully cached"
392        );
393
394        Ok(changesets)
395    }
396
397    /// Gets or computes accumulated trie reverts for a range of blocks.
398    ///
399    /// This method retrieves and accumulates all trie changesets (reverts) for the specified
400    /// block range (inclusive). The changesets are accumulated in reverse order (newest to oldest)
401    /// so that older values take precedence when there are conflicts.
402    ///
403    /// # Arguments
404    ///
405    /// * `provider` - Database provider for DB access and block lookups
406    /// * `range` - Block range to accumulate reverts for (inclusive)
407    ///
408    /// # Returns
409    ///
410    /// Accumulated trie reverts for all blocks in the specified range
411    ///
412    /// # Errors
413    ///
414    /// Returns error if:
415    /// - Any block in the range is beyond the database tip
416    /// - Database access fails
417    /// - Block hash lookup fails
418    /// - Changeset computation fails
419    pub fn get_or_compute_range<P>(
420        &self,
421        provider: &P,
422        range: RangeInclusive<BlockNumber>,
423    ) -> ProviderResult<TrieUpdatesSorted>
424    where
425        P: DBProvider
426            + StageCheckpointReader
427            + ChangeSetReader
428            + StorageChangeSetReader
429            + BlockNumReader,
430    {
431        // Get the database tip block number
432        let db_tip_block = provider
433            .get_stage_checkpoint(reth_stages_types::StageId::Finish)?
434            .as_ref()
435            .map(|chk| chk.block_number)
436            .ok_or_else(|| ProviderError::InsufficientChangesets {
437                requested: *range.start(),
438                available: 0..=0,
439            })?;
440
441        let start_block = *range.start();
442        let end_block = *range.end();
443
444        // If range end is beyond the tip, return an error
445        if end_block > db_tip_block {
446            return Err(ProviderError::InsufficientChangesets {
447                requested: end_block,
448                available: 0..=db_tip_block,
449            });
450        }
451
452        let timer = Instant::now();
453
454        debug!(
455            target: "trie::changeset_cache",
456            start_block,
457            end_block,
458            db_tip_block,
459            "Starting get_or_compute_range"
460        );
461
462        // Use changeset cache to retrieve and accumulate reverts block by block.
463        // Iterate in reverse order (newest to oldest) so that older changesets
464        // take precedence when there are conflicting updates.
465        let mut accumulated_reverts = TrieUpdatesSorted::default();
466
467        for block_number in range.rev() {
468            // Get the block hash for this block number
469            let block_hash = provider.block_hash(block_number)?.ok_or_else(|| {
470                ProviderError::other(std::io::Error::new(
471                    std::io::ErrorKind::NotFound,
472                    format!("block hash not found for block number {}", block_number),
473                ))
474            })?;
475
476            debug!(
477                target: "trie::changeset_cache",
478                block_number,
479                ?block_hash,
480                "Looked up block hash for block number in range"
481            );
482
483            // Get changesets from cache (or compute on-the-fly)
484            let changesets = self.get_or_compute(block_hash, block_number, provider)?;
485
486            // Overlay this block's changesets on top of accumulated reverts.
487            // Since we iterate newest to oldest, older values are added last
488            // and overwrite any conflicting newer values (oldest changeset values take
489            // precedence).
490            accumulated_reverts.extend_ref_and_sort(&changesets);
491        }
492
493        let elapsed = timer.elapsed();
494
495        let num_account_nodes = accumulated_reverts.account_nodes_ref().len();
496        let num_storage_tries = accumulated_reverts.storage_tries_ref().len();
497
498        debug!(
499            target: "trie::changeset_cache",
500            ?elapsed,
501            start_block,
502            end_block,
503            num_blocks = end_block.saturating_sub(start_block).saturating_add(1),
504            num_account_nodes,
505            num_storage_tries,
506            "Finished accumulating trie reverts for block range"
507        );
508
509        Ok(accumulated_reverts)
510    }
511}
512
513/// In-memory cache for trie changesets with explicit eviction policy.
514///
515/// Holds changesets for blocks that have been validated but not yet persisted.
516/// Keyed by block hash for fast lookup during reorgs. Eviction is controlled
517/// explicitly by the engine API tree handler when persistence completes.
518///
519/// ## Eviction Policy
520///
521/// Unlike traditional caches with automatic eviction, this cache requires explicit
522/// eviction calls. The engine API tree handler calls `evict(block_number)` after
523/// blocks are persisted to the database, ensuring changesets remain available
524/// until their corresponding blocks are safely on disk.
525///
526/// ## Metrics
527///
528/// The cache maintains several metrics for observability:
529/// - `hits`: Number of successful cache lookups
530/// - `misses`: Number of failed cache lookups
531/// - `evictions`: Number of blocks evicted
532/// - `size`: Current number of cached blocks
533#[derive(Debug)]
534struct ChangesetCacheInner {
535    /// Cache entries: block hash -> (block number, changesets)
536    entries: HashMap<B256, (u64, Arc<TrieUpdatesSorted>)>,
537
538    /// Block number to hashes mapping for eviction
539    block_numbers: BTreeMap<u64, Vec<B256>>,
540
541    /// Metrics for monitoring cache behavior
542    #[cfg(feature = "metrics")]
543    metrics: ChangesetCacheMetrics,
544}
545
546#[cfg(feature = "metrics")]
547/// Metrics for the changeset cache.
548///
549/// These metrics provide visibility into cache performance and help identify
550/// potential issues like high miss rates.
551#[derive(Metrics, Clone)]
552#[metrics(scope = "trie.changeset_cache")]
553struct ChangesetCacheMetrics {
554    /// Cache hit counter
555    hits: Counter,
556
557    /// Cache miss counter
558    misses: Counter,
559
560    /// Eviction counter
561    evictions: Counter,
562
563    /// Current cache size (number of entries)
564    size: Gauge,
565}
566
567impl Default for ChangesetCacheInner {
568    fn default() -> Self {
569        Self::new()
570    }
571}
572
573impl ChangesetCacheInner {
574    /// Creates a new empty changeset cache.
575    ///
576    /// The cache has no capacity limit and relies on explicit eviction
577    /// via the `evict()` method to manage memory usage.
578    fn new() -> Self {
579        Self {
580            entries: HashMap::new(),
581            block_numbers: BTreeMap::new(),
582            #[cfg(feature = "metrics")]
583            metrics: Default::default(),
584        }
585    }
586
587    fn get(&self, block_hash: &B256) -> Option<Arc<TrieUpdatesSorted>> {
588        match self.entries.get(block_hash) {
589            Some((_, changesets)) => {
590                #[cfg(feature = "metrics")]
591                self.metrics.hits.increment(1);
592                Some(Arc::clone(changesets))
593            }
594            None => {
595                #[cfg(feature = "metrics")]
596                self.metrics.misses.increment(1);
597                None
598            }
599        }
600    }
601
602    fn insert(&mut self, block_hash: B256, block_number: u64, changesets: Arc<TrieUpdatesSorted>) {
603        debug!(
604            target: "trie::changeset_cache",
605            ?block_hash,
606            block_number,
607            cache_size_before = self.entries.len(),
608            "Inserting changeset into cache"
609        );
610
611        // Insert the entry
612        self.entries.insert(block_hash, (block_number, changesets));
613
614        // Add block hash to block_numbers mapping
615        self.block_numbers.entry(block_number).or_default().push(block_hash);
616
617        // Update size metric
618        #[cfg(feature = "metrics")]
619        self.metrics.size.set(self.entries.len() as f64);
620
621        debug!(
622            target: "trie::changeset_cache",
623            ?block_hash,
624            block_number,
625            cache_size_after = self.entries.len(),
626            "Changeset inserted into cache"
627        );
628    }
629
630    fn evict(&mut self, up_to_block: BlockNumber) {
631        debug!(
632            target: "trie::changeset_cache",
633            up_to_block,
634            cache_size_before = self.entries.len(),
635            "Starting cache eviction"
636        );
637
638        // Find all block numbers that should be evicted (< up_to_block)
639        let blocks_to_evict: Vec<u64> =
640            self.block_numbers.range(..up_to_block).map(|(num, _)| *num).collect();
641
642        // Remove entries for each block number below threshold
643        #[cfg(feature = "metrics")]
644        let mut evicted_count = 0;
645        #[cfg(not(feature = "metrics"))]
646        let mut evicted_count = 0;
647
648        for block_number in &blocks_to_evict {
649            if let Some(hashes) = self.block_numbers.remove(block_number) {
650                debug!(
651                    target: "trie::changeset_cache",
652                    block_number,
653                    num_hashes = hashes.len(),
654                    "Evicting block from cache"
655                );
656                for hash in hashes {
657                    if self.entries.remove(&hash).is_some() {
658                        evicted_count += 1;
659                    }
660                }
661            }
662        }
663
664        debug!(
665            target: "trie::changeset_cache",
666            up_to_block,
667            evicted_count,
668            cache_size_after = self.entries.len(),
669            "Finished cache eviction"
670        );
671
672        // Update metrics if we evicted anything
673        #[cfg(feature = "metrics")]
674        if evicted_count > 0 {
675            self.metrics.evictions.increment(evicted_count as u64);
676            self.metrics.size.set(self.entries.len() as f64);
677        }
678    }
679}
680
681#[cfg(test)]
682mod tests {
683    use super::*;
684    use alloy_primitives::map::B256Map;
685
686    // Helper function to create empty TrieUpdatesSorted for testing
687    fn create_test_changesets() -> Arc<TrieUpdatesSorted> {
688        Arc::new(TrieUpdatesSorted::new(vec![], B256Map::default()))
689    }
690
691    #[test]
692    fn test_insert_and_retrieve_single_entry() {
693        let mut cache = ChangesetCacheInner::new();
694        let hash = B256::random();
695        let changesets = create_test_changesets();
696
697        cache.insert(hash, 100, Arc::clone(&changesets));
698
699        // Should be able to retrieve it
700        let retrieved = cache.get(&hash);
701        assert!(retrieved.is_some());
702        assert_eq!(cache.entries.len(), 1);
703    }
704
705    #[test]
706    fn test_insert_multiple_entries() {
707        let mut cache = ChangesetCacheInner::new();
708
709        // Insert 10 blocks
710        let mut hashes = Vec::new();
711        for i in 0..10 {
712            let hash = B256::random();
713            cache.insert(hash, 100 + i, create_test_changesets());
714            hashes.push(hash);
715        }
716
717        // Should be able to retrieve all
718        assert_eq!(cache.entries.len(), 10);
719        for hash in &hashes {
720            assert!(cache.get(hash).is_some());
721        }
722    }
723
724    #[test]
725    fn test_eviction_when_explicitly_called() {
726        let mut cache = ChangesetCacheInner::new();
727
728        // Insert 15 blocks (0-14)
729        let mut hashes = Vec::new();
730        for i in 0..15 {
731            let hash = B256::random();
732            cache.insert(hash, i, create_test_changesets());
733            hashes.push((i, hash));
734        }
735
736        // All blocks should be present (no automatic eviction)
737        assert_eq!(cache.entries.len(), 15);
738
739        // Explicitly evict blocks < 4
740        cache.evict(4);
741
742        // Blocks 0-3 should be evicted
743        assert_eq!(cache.entries.len(), 11); // blocks 4-14 = 11 blocks
744
745        // Verify blocks 0-3 are evicted
746        for i in 0..4 {
747            assert!(cache.get(&hashes[i as usize].1).is_none(), "Block {} should be evicted", i);
748        }
749
750        // Verify blocks 4-14 are still present
751        for i in 4..15 {
752            assert!(cache.get(&hashes[i as usize].1).is_some(), "Block {} should be present", i);
753        }
754    }
755
756    #[test]
757    fn test_eviction_with_persistence_watermark() {
758        let mut cache = ChangesetCacheInner::new();
759
760        // Insert blocks 100-165
761        let mut hashes = std::collections::HashMap::new();
762        for i in 100..=165 {
763            let hash = B256::random();
764            cache.insert(hash, i, create_test_changesets());
765            hashes.insert(i, hash);
766        }
767
768        // All blocks should be present (no automatic eviction)
769        assert_eq!(cache.entries.len(), 66);
770
771        // Simulate persistence up to block 164, with 64-block retention window
772        // Eviction threshold = 164 - 64 = 100
773        cache.evict(100);
774
775        // Blocks 100-165 should remain (66 blocks)
776        assert_eq!(cache.entries.len(), 66);
777
778        // Simulate persistence up to block 165
779        // Eviction threshold = 165 - 64 = 101
780        cache.evict(101);
781
782        // Blocks 101-165 should remain (65 blocks)
783        assert_eq!(cache.entries.len(), 65);
784        assert!(cache.get(&hashes[&100]).is_none());
785        assert!(cache.get(&hashes[&101]).is_some());
786    }
787
788    #[test]
789    fn test_out_of_order_inserts_with_explicit_eviction() {
790        let mut cache = ChangesetCacheInner::new();
791
792        // Insert blocks in random order
793        let hash_10 = B256::random();
794        cache.insert(hash_10, 10, create_test_changesets());
795
796        let hash_5 = B256::random();
797        cache.insert(hash_5, 5, create_test_changesets());
798
799        let hash_15 = B256::random();
800        cache.insert(hash_15, 15, create_test_changesets());
801
802        let hash_3 = B256::random();
803        cache.insert(hash_3, 3, create_test_changesets());
804
805        // All blocks should be present (no automatic eviction)
806        assert_eq!(cache.entries.len(), 4);
807
808        // Explicitly evict blocks < 5
809        cache.evict(5);
810
811        assert!(cache.get(&hash_3).is_none(), "Block 3 should be evicted");
812        assert!(cache.get(&hash_5).is_some(), "Block 5 should be present");
813        assert!(cache.get(&hash_10).is_some(), "Block 10 should be present");
814        assert!(cache.get(&hash_15).is_some(), "Block 15 should be present");
815    }
816
817    #[test]
818    fn test_multiple_blocks_same_number() {
819        let mut cache = ChangesetCacheInner::new();
820
821        // Insert multiple blocks with same number (side chains)
822        let hash_1a = B256::random();
823        let hash_1b = B256::random();
824        cache.insert(hash_1a, 100, create_test_changesets());
825        cache.insert(hash_1b, 100, create_test_changesets());
826
827        // Both should be retrievable
828        assert!(cache.get(&hash_1a).is_some());
829        assert!(cache.get(&hash_1b).is_some());
830        assert_eq!(cache.entries.len(), 2);
831    }
832
833    #[test]
834    fn test_eviction_removes_all_side_chains() {
835        let mut cache = ChangesetCacheInner::new();
836
837        // Insert multiple blocks at same height (side chains)
838        let hash_10a = B256::random();
839        let hash_10b = B256::random();
840        let hash_10c = B256::random();
841        cache.insert(hash_10a, 10, create_test_changesets());
842        cache.insert(hash_10b, 10, create_test_changesets());
843        cache.insert(hash_10c, 10, create_test_changesets());
844
845        let hash_20 = B256::random();
846        cache.insert(hash_20, 20, create_test_changesets());
847
848        assert_eq!(cache.entries.len(), 4);
849
850        // Evict blocks < 15 - should remove all three side chains at height 10
851        cache.evict(15);
852
853        assert_eq!(cache.entries.len(), 1);
854        assert!(cache.get(&hash_10a).is_none());
855        assert!(cache.get(&hash_10b).is_none());
856        assert!(cache.get(&hash_10c).is_none());
857        assert!(cache.get(&hash_20).is_some());
858    }
859}