1use crate::{
11 DatabaseHashedCursorFactory, DatabaseStateRoot, DatabaseTrieCursorFactory, TrieTableAdapter,
12};
13use alloy_primitives::{map::B256Map, BlockNumber, B256};
14use parking_lot::RwLock;
15use reth_primitives_traits::FastInstant as Instant;
16use reth_storage_api::{
17 BlockNumReader, ChangeSetReader, DBProvider, StageCheckpointReader, StorageChangeSetReader,
18 StorageSettingsCache,
19};
20use reth_storage_errors::provider::{ProviderError, ProviderResult};
21use reth_trie::{
22 changesets::compute_trie_changesets,
23 trie_cursor::{InMemoryTrieCursorFactory, TrieCursor, TrieCursorFactory},
24 TrieInputSorted,
25};
26use reth_trie_common::updates::{StorageTrieUpdatesSorted, TrieUpdatesSorted};
27use std::{
28 collections::BTreeMap,
29 fmt,
30 ops::RangeInclusive,
31 sync::{Arc, OnceLock},
32};
33use tracing::{debug, debug_span, warn};
34
35#[cfg(feature = "metrics")]
36use reth_metrics::{
37 metrics::{Counter, Gauge},
38 Metrics,
39};
40
41pub fn compute_block_trie_changesets<Provider>(
70 provider: &Provider,
71 block_number: BlockNumber,
72) -> Result<TrieUpdatesSorted, ProviderError>
73where
74 Provider: DBProvider
75 + StageCheckpointReader
76 + ChangeSetReader
77 + StorageChangeSetReader
78 + BlockNumReader
79 + StorageSettingsCache,
80{
81 crate::with_adapter!(provider, |A| {
82 compute_block_trie_changesets_inner::<_, A>(provider, block_number)
83 })
84}
85
86fn compute_block_trie_changesets_inner<Provider, A>(
87 provider: &Provider,
88 block_number: BlockNumber,
89) -> Result<TrieUpdatesSorted, ProviderError>
90where
91 Provider: DBProvider
92 + StageCheckpointReader
93 + ChangeSetReader
94 + StorageChangeSetReader
95 + BlockNumReader
96 + StorageSettingsCache,
97 A: TrieTableAdapter,
98{
99 debug!(
100 target: "trie::changeset_cache",
101 block_number,
102 "Computing block trie changesets from database state"
103 );
104
105 let individual_state_revert =
109 crate::state::from_reverts_auto(provider, block_number..=block_number)?;
110
111 let cumulative_state_revert = crate::state::from_reverts_auto(provider, (block_number + 1)..)?;
113
114 let mut cumulative_state_revert_prev = cumulative_state_revert.clone();
116 cumulative_state_revert_prev.extend_ref_and_sort(&individual_state_revert);
117
118 let prefix_sets_prev = cumulative_state_revert_prev.construct_prefix_sets();
121 let input_prev = TrieInputSorted::new(
122 Arc::default(),
123 Arc::new(cumulative_state_revert_prev),
124 prefix_sets_prev,
125 );
126
127 type DbStateRoot<'a, TX, A> = reth_trie::StateRoot<
128 DatabaseTrieCursorFactory<&'a TX, A>,
129 DatabaseHashedCursorFactory<&'a TX>,
130 >;
131
132 let cumulative_trie_updates_prev =
133 DbStateRoot::<_, A>::overlay_root_from_nodes_with_updates(provider.tx_ref(), input_prev)
134 .map_err(ProviderError::other)?
135 .1
136 .into_sorted();
137
138 let prefix_sets = individual_state_revert.construct_prefix_sets();
140
141 let input = TrieInputSorted::new(
144 Arc::new(cumulative_trie_updates_prev.clone()),
145 Arc::new(cumulative_state_revert),
146 prefix_sets,
147 );
148
149 let trie_updates =
150 DbStateRoot::<_, A>::overlay_root_from_nodes_with_updates(provider.tx_ref(), input)
151 .map_err(ProviderError::other)?
152 .1
153 .into_sorted();
154
155 let db_cursor_factory = DatabaseTrieCursorFactory::<_, A>::new(provider.tx_ref());
158 let overlay_factory =
159 InMemoryTrieCursorFactory::new(db_cursor_factory, &cumulative_trie_updates_prev);
160
161 let changesets =
162 compute_trie_changesets(&overlay_factory, &trie_updates).map_err(ProviderError::other)?;
163
164 debug!(
165 target: "trie::changeset_cache",
166 block_number,
167 num_account_nodes = changesets.account_nodes_ref().len(),
168 num_storage_tries = changesets.storage_tries_ref().len(),
169 "Computed block trie changesets successfully"
170 );
171
172 Ok(changesets)
173}
174
175pub fn compute_block_trie_updates<Provider>(
205 cache: &ChangesetCache,
206 provider: &Provider,
207 block_number: BlockNumber,
208) -> ProviderResult<TrieUpdatesSorted>
209where
210 Provider: DBProvider
211 + StageCheckpointReader
212 + ChangeSetReader
213 + StorageChangeSetReader
214 + BlockNumReader
215 + StorageSettingsCache,
216{
217 crate::with_adapter!(provider, |A| {
218 compute_block_trie_updates_inner::<_, A>(cache, provider, block_number)
219 })
220}
221
222fn compute_block_trie_updates_inner<Provider, A>(
223 cache: &ChangesetCache,
224 provider: &Provider,
225 block_number: BlockNumber,
226) -> ProviderResult<TrieUpdatesSorted>
227where
228 Provider: DBProvider
229 + StageCheckpointReader
230 + ChangeSetReader
231 + StorageChangeSetReader
232 + BlockNumReader
233 + StorageSettingsCache,
234 A: TrieTableAdapter,
235{
236 let tx = provider.tx_ref();
237
238 let db_tip_block = provider
240 .get_stage_checkpoint(reth_stages_types::StageId::Finish)?
241 .as_ref()
242 .map(|chk| chk.block_number)
243 .ok_or_else(|| ProviderError::InsufficientChangesets {
244 requested: block_number,
245 available: 0..=0,
246 })?;
247
248 let block_hash = provider.block_hash(block_number)?.ok_or_else(|| {
250 ProviderError::other(std::io::Error::new(
251 std::io::ErrorKind::NotFound,
252 format!("block hash not found for block number {}", block_number),
253 ))
254 })?;
255
256 let changesets = cache.get_or_compute(block_hash, block_number, provider)?;
258
259 let reverts = cache.get_or_compute_range(provider, (block_number + 1)..=db_tip_block)?;
261
262 let db_cursor_factory = DatabaseTrieCursorFactory::<_, A>::new(tx);
265 let cursor_factory = InMemoryTrieCursorFactory::new(db_cursor_factory, &reverts);
266
267 let account_nodes_ref = changesets.account_nodes_ref();
269 let mut account_nodes = Vec::with_capacity(account_nodes_ref.len());
270 let mut account_cursor = cursor_factory.account_trie_cursor()?;
271
272 for (nibbles, _old_node) in account_nodes_ref {
274 let node_value = account_cursor.seek_exact(*nibbles)?.map(|(_, node)| node);
276 account_nodes.push((*nibbles, node_value));
277 }
278
279 let mut storage_tries = B256Map::default();
281
282 for (hashed_address, storage_changeset) in changesets.storage_tries_ref() {
284 let mut storage_cursor = cursor_factory.storage_trie_cursor(*hashed_address)?;
285 let storage_nodes_ref = storage_changeset.storage_nodes_ref();
286 let mut storage_nodes = Vec::with_capacity(storage_nodes_ref.len());
287
288 for (nibbles, _old_node) in storage_nodes_ref {
290 let node_value = storage_cursor.seek_exact(*nibbles)?.map(|(_, node)| node);
292 storage_nodes.push((*nibbles, node_value));
293 }
294
295 storage_tries.insert(
296 *hashed_address,
297 StorageTrieUpdatesSorted { storage_nodes, is_deleted: storage_changeset.is_deleted },
298 );
299 }
300
301 Ok(TrieUpdatesSorted::new(account_nodes, storage_tries))
302}
303
304struct PendingChangeset {
311 result: OnceLock<Option<Arc<TrieUpdatesSorted>>>,
313}
314
315impl PendingChangeset {
316 const fn new() -> Self {
317 Self { result: OnceLock::new() }
318 }
319
320 fn wait(&self) -> Option<Arc<TrieUpdatesSorted>> {
323 let _span =
324 debug_span!(target: "trie::changeset_cache", "waiting_for_pending_changeset").entered();
325 self.result.wait().clone()
326 }
327
328 fn resolve(&self, changesets: Arc<TrieUpdatesSorted>) {
330 let _ = self.result.set(Some(changesets));
331 }
332
333 fn cancel(&self) {
336 let _ = self.result.set(None);
337 }
338}
339
340impl fmt::Debug for PendingChangeset {
341 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
342 let is_resolved = self.result.get().is_some();
343 f.debug_struct("PendingChangeset").field("resolved", &is_resolved).finish()
344 }
345}
346
347#[derive(Debug, Clone)]
352pub struct ChangesetCache {
353 inner: Arc<RwLock<ChangesetCacheInner>>,
354}
355
356impl Default for ChangesetCache {
357 fn default() -> Self {
358 Self::new()
359 }
360}
361
362impl ChangesetCache {
363 pub fn new() -> Self {
368 Self { inner: Arc::new(RwLock::new(ChangesetCacheInner::new())) }
369 }
370
371 pub fn get(&self, block_hash: &B256) -> Option<Arc<TrieUpdatesSorted>> {
376 self.inner.read().get(block_hash)
377 }
378
379 fn insert(&self, block_hash: B256, block_number: u64, changesets: Arc<TrieUpdatesSorted>) {
393 let pending = {
394 let mut cache = self.inner.write();
395 cache.insert(block_hash, block_number, Arc::clone(&changesets));
396 cache.pending.remove(&block_hash)
397 };
398
399 if let Some(pending) = pending {
402 pending.resolve(changesets);
403 }
404 }
405
406 pub fn register_pending(&self, block_hash: B256) -> PendingChangesetGuard {
417 let pending = Arc::new(PendingChangeset::new());
418 let prev = self.inner.write().pending.insert(block_hash, Arc::clone(&pending));
419 debug_assert!(prev.is_none(), "duplicate pending changeset for {block_hash:?}");
420 PendingChangesetGuard { cache: self.clone(), block_hash, pending: Some(pending) }
421 }
422
423 pub fn evict(&self, up_to_block: BlockNumber) {
433 self.inner.write().evict(up_to_block)
434 }
435
436 pub fn get_or_compute<P>(
453 &self,
454 block_hash: B256,
455 block_number: u64,
456 provider: &P,
457 ) -> ProviderResult<Arc<TrieUpdatesSorted>>
458 where
459 P: DBProvider
460 + StageCheckpointReader
461 + ChangeSetReader
462 + StorageChangeSetReader
463 + BlockNumReader
464 + StorageSettingsCache,
465 {
466 let pending = {
468 let cache = self.inner.read();
469 if let Some(changesets) = cache.get(&block_hash) {
470 debug!(
471 target: "trie::changeset_cache",
472 ?block_hash,
473 block_number,
474 "Changeset cache HIT"
475 );
476 return Ok(changesets);
477 }
478 cache.pending.get(&block_hash).cloned()
479 };
480
481 if let Some(pending) = pending {
483 debug!(
484 target: "trie::changeset_cache",
485 ?block_hash,
486 block_number,
487 "Changeset cache MISS but pending computation found, waiting"
488 );
489
490 let start = Instant::now();
491
492 if let Some(changesets) = pending.wait() {
493 debug!(
494 target: "trie::changeset_cache",
495 ?block_hash,
496 block_number,
497 elapsed = ?start.elapsed(),
498 "Pending changeset resolved"
499 );
500 return Ok(changesets);
501 }
502
503 debug!(
504 target: "trie::changeset_cache",
505 ?block_hash,
506 block_number,
507 elapsed = ?start.elapsed(),
508 "Pending changeset was cancelled, falling through to DB computation"
509 );
510 }
511
512 warn!(
514 target: "trie::changeset_cache",
515 ?block_hash,
516 block_number,
517 "Changeset cache MISS, falling back to DB-based computation"
518 );
519
520 let start = Instant::now();
521
522 let changesets = compute_block_trie_changesets(provider, block_number)?;
524
525 let changesets = Arc::new(changesets);
526 let elapsed = start.elapsed();
527
528 debug!(
529 target: "trie::changeset_cache",
530 ?elapsed,
531 block_number,
532 ?block_hash,
533 "Changeset computed from database and inserting into cache"
534 );
535
536 self.insert(block_hash, block_number, Arc::clone(&changesets));
538
539 debug!(
540 target: "trie::changeset_cache",
541 ?block_hash,
542 block_number,
543 "Changeset successfully cached"
544 );
545
546 Ok(changesets)
547 }
548
549 pub fn get_or_compute_range<P>(
572 &self,
573 provider: &P,
574 range: RangeInclusive<BlockNumber>,
575 ) -> ProviderResult<TrieUpdatesSorted>
576 where
577 P: DBProvider
578 + StageCheckpointReader
579 + ChangeSetReader
580 + StorageChangeSetReader
581 + BlockNumReader
582 + StorageSettingsCache,
583 {
584 let db_tip_block = provider
586 .get_stage_checkpoint(reth_stages_types::StageId::Finish)?
587 .as_ref()
588 .map(|chk| chk.block_number)
589 .ok_or_else(|| ProviderError::InsufficientChangesets {
590 requested: *range.start(),
591 available: 0..=0,
592 })?;
593
594 let start_block = *range.start();
595 let end_block = *range.end();
596
597 if end_block > db_tip_block {
599 return Err(ProviderError::InsufficientChangesets {
600 requested: end_block,
601 available: 0..=db_tip_block,
602 });
603 }
604
605 let timer = Instant::now();
606
607 debug!(
608 target: "trie::changeset_cache",
609 start_block,
610 end_block,
611 db_tip_block,
612 "Starting get_or_compute_range"
613 );
614
615 let mut accumulated_reverts = TrieUpdatesSorted::default();
619
620 for block_number in range.rev() {
621 let block_hash = provider.block_hash(block_number)?.ok_or_else(|| {
623 ProviderError::other(std::io::Error::new(
624 std::io::ErrorKind::NotFound,
625 format!("block hash not found for block number {}", block_number),
626 ))
627 })?;
628
629 debug!(
630 target: "trie::changeset_cache",
631 block_number,
632 ?block_hash,
633 "Looked up block hash for block number in range"
634 );
635
636 let changesets = self.get_or_compute(block_hash, block_number, provider)?;
638
639 accumulated_reverts.extend_ref_and_sort(&changesets);
644 }
645
646 let elapsed = timer.elapsed();
647
648 let num_account_nodes = accumulated_reverts.account_nodes_ref().len();
649 let num_storage_tries = accumulated_reverts.storage_tries_ref().len();
650
651 debug!(
652 target: "trie::changeset_cache",
653 ?elapsed,
654 start_block,
655 end_block,
656 num_blocks = end_block.saturating_sub(start_block).saturating_add(1),
657 num_account_nodes,
658 num_storage_tries,
659 "Finished accumulating trie reverts for block range"
660 );
661
662 Ok(accumulated_reverts)
663 }
664}
665
666#[must_use = "call .resolve() to insert changesets into the cache"]
674pub struct PendingChangesetGuard {
675 cache: ChangesetCache,
676 block_hash: B256,
677 pending: Option<Arc<PendingChangeset>>,
679}
680
681impl PendingChangesetGuard {
682 pub fn resolve(mut self, block_number: u64, changesets: Arc<TrieUpdatesSorted>) {
685 self.cache.insert(self.block_hash, block_number, changesets);
686 self.pending = None;
687 }
688}
689
690impl fmt::Debug for PendingChangesetGuard {
691 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
692 f.debug_struct("PendingChangesetGuard").field("block_hash", &self.block_hash).finish()
693 }
694}
695
696impl Drop for PendingChangesetGuard {
697 fn drop(&mut self) {
698 let Some(pending) = self.pending.take() else {
699 return
701 };
702
703 let removed = self.cache.inner.write().pending.remove(&self.block_hash);
704 if let Some(removed) = removed {
705 if Arc::ptr_eq(&removed, &pending) {
706 debug!(
707 target: "trie::changeset_cache",
708 block_hash = ?self.block_hash,
709 "Pending changeset dropped without resolution, cancelling"
710 );
711 removed.cancel();
712 } else {
713 self.cache.inner.write().pending.insert(self.block_hash, removed);
715 }
716 }
717 }
718}
719
720#[derive(Debug)]
741struct ChangesetCacheInner {
742 entries: B256Map<(u64, Arc<TrieUpdatesSorted>)>,
744
745 block_numbers: BTreeMap<u64, Vec<B256>>,
747
748 pending: B256Map<Arc<PendingChangeset>>,
751
752 #[cfg(feature = "metrics")]
754 metrics: ChangesetCacheMetrics,
755}
756
757#[cfg(feature = "metrics")]
758#[derive(Metrics, Clone)]
763#[metrics(scope = "trie.changeset_cache")]
764struct ChangesetCacheMetrics {
765 hits: Counter,
767
768 misses: Counter,
770
771 evictions: Counter,
773
774 size: Gauge,
776}
777
778impl Default for ChangesetCacheInner {
779 fn default() -> Self {
780 Self::new()
781 }
782}
783
784impl ChangesetCacheInner {
785 fn new() -> Self {
790 Self {
791 entries: B256Map::default(),
792 block_numbers: BTreeMap::new(),
793 pending: B256Map::default(),
794 #[cfg(feature = "metrics")]
795 metrics: Default::default(),
796 }
797 }
798
799 fn get(&self, block_hash: &B256) -> Option<Arc<TrieUpdatesSorted>> {
800 match self.entries.get(block_hash) {
801 Some((_, changesets)) => {
802 #[cfg(feature = "metrics")]
803 self.metrics.hits.increment(1);
804 Some(Arc::clone(changesets))
805 }
806 None => {
807 #[cfg(feature = "metrics")]
808 self.metrics.misses.increment(1);
809 None
810 }
811 }
812 }
813
814 fn insert(&mut self, block_hash: B256, block_number: u64, changesets: Arc<TrieUpdatesSorted>) {
815 debug!(
816 target: "trie::changeset_cache",
817 ?block_hash,
818 block_number,
819 cache_size_before = self.entries.len(),
820 "Inserting changeset into cache"
821 );
822
823 self.entries.insert(block_hash, (block_number, changesets));
825
826 self.block_numbers.entry(block_number).or_default().push(block_hash);
828
829 #[cfg(feature = "metrics")]
831 self.metrics.size.set(self.entries.len() as f64);
832
833 debug!(
834 target: "trie::changeset_cache",
835 ?block_hash,
836 block_number,
837 cache_size_after = self.entries.len(),
838 "Changeset inserted into cache"
839 );
840 }
841
842 fn evict(&mut self, up_to_block: BlockNumber) {
843 debug!(
844 target: "trie::changeset_cache",
845 up_to_block,
846 cache_size_before = self.entries.len(),
847 "Starting cache eviction"
848 );
849
850 let blocks_to_evict: Vec<u64> =
852 self.block_numbers.range(..up_to_block).map(|(num, _)| *num).collect();
853
854 #[cfg(feature = "metrics")]
856 let mut evicted_count = 0;
857 #[cfg(not(feature = "metrics"))]
858 let mut evicted_count = 0;
859
860 for block_number in &blocks_to_evict {
861 if let Some(hashes) = self.block_numbers.remove(block_number) {
862 debug!(
863 target: "trie::changeset_cache",
864 block_number,
865 num_hashes = hashes.len(),
866 "Evicting block from cache"
867 );
868 for hash in hashes {
869 if self.entries.remove(&hash).is_some() {
870 evicted_count += 1;
871 }
872 }
873 }
874 }
875
876 debug!(
877 target: "trie::changeset_cache",
878 up_to_block,
879 evicted_count,
880 cache_size_after = self.entries.len(),
881 "Finished cache eviction"
882 );
883
884 #[cfg(feature = "metrics")]
886 if evicted_count > 0 {
887 self.metrics.evictions.increment(evicted_count as u64);
888 self.metrics.size.set(self.entries.len() as f64);
889 }
890 }
891}
892
893#[cfg(test)]
894mod tests {
895 use super::*;
896 use alloy_primitives::map::{B256Map, HashMap};
897
898 fn create_test_changesets() -> Arc<TrieUpdatesSorted> {
900 Arc::new(TrieUpdatesSorted::new(vec![], B256Map::default()))
901 }
902
903 #[test]
904 fn test_insert_and_retrieve_single_entry() {
905 let mut cache = ChangesetCacheInner::new();
906 let hash = B256::random();
907 let changesets = create_test_changesets();
908
909 cache.insert(hash, 100, Arc::clone(&changesets));
910
911 let retrieved = cache.get(&hash);
913 assert!(retrieved.is_some());
914 assert_eq!(cache.entries.len(), 1);
915 }
916
917 #[test]
918 fn test_insert_multiple_entries() {
919 let mut cache = ChangesetCacheInner::new();
920
921 let mut hashes = Vec::new();
923 for i in 0..10 {
924 let hash = B256::random();
925 cache.insert(hash, 100 + i, create_test_changesets());
926 hashes.push(hash);
927 }
928
929 assert_eq!(cache.entries.len(), 10);
931 for hash in &hashes {
932 assert!(cache.get(hash).is_some());
933 }
934 }
935
936 #[test]
937 fn test_eviction_when_explicitly_called() {
938 let mut cache = ChangesetCacheInner::new();
939
940 let mut hashes = Vec::new();
942 for i in 0..15 {
943 let hash = B256::random();
944 cache.insert(hash, i, create_test_changesets());
945 hashes.push((i, hash));
946 }
947
948 assert_eq!(cache.entries.len(), 15);
950
951 cache.evict(4);
953
954 assert_eq!(cache.entries.len(), 11); for i in 0..4 {
959 assert!(cache.get(&hashes[i as usize].1).is_none(), "Block {} should be evicted", i);
960 }
961
962 for i in 4..15 {
964 assert!(cache.get(&hashes[i as usize].1).is_some(), "Block {} should be present", i);
965 }
966 }
967
968 #[test]
969 fn test_eviction_with_persistence_watermark() {
970 let mut cache = ChangesetCacheInner::new();
971
972 let mut hashes = HashMap::new();
974 for i in 100..=165 {
975 let hash = B256::random();
976 cache.insert(hash, i, create_test_changesets());
977 hashes.insert(i, hash);
978 }
979
980 assert_eq!(cache.entries.len(), 66);
982
983 cache.evict(100);
986
987 assert_eq!(cache.entries.len(), 66);
989
990 cache.evict(101);
993
994 assert_eq!(cache.entries.len(), 65);
996 assert!(cache.get(&hashes[&100]).is_none());
997 assert!(cache.get(&hashes[&101]).is_some());
998 }
999
1000 #[test]
1001 fn test_out_of_order_inserts_with_explicit_eviction() {
1002 let mut cache = ChangesetCacheInner::new();
1003
1004 let hash_10 = B256::random();
1006 cache.insert(hash_10, 10, create_test_changesets());
1007
1008 let hash_5 = B256::random();
1009 cache.insert(hash_5, 5, create_test_changesets());
1010
1011 let hash_15 = B256::random();
1012 cache.insert(hash_15, 15, create_test_changesets());
1013
1014 let hash_3 = B256::random();
1015 cache.insert(hash_3, 3, create_test_changesets());
1016
1017 assert_eq!(cache.entries.len(), 4);
1019
1020 cache.evict(5);
1022
1023 assert!(cache.get(&hash_3).is_none(), "Block 3 should be evicted");
1024 assert!(cache.get(&hash_5).is_some(), "Block 5 should be present");
1025 assert!(cache.get(&hash_10).is_some(), "Block 10 should be present");
1026 assert!(cache.get(&hash_15).is_some(), "Block 15 should be present");
1027 }
1028
1029 #[test]
1030 fn test_multiple_blocks_same_number() {
1031 let mut cache = ChangesetCacheInner::new();
1032
1033 let hash_1a = B256::random();
1035 let hash_1b = B256::random();
1036 cache.insert(hash_1a, 100, create_test_changesets());
1037 cache.insert(hash_1b, 100, create_test_changesets());
1038
1039 assert!(cache.get(&hash_1a).is_some());
1041 assert!(cache.get(&hash_1b).is_some());
1042 assert_eq!(cache.entries.len(), 2);
1043 }
1044
1045 #[test]
1046 fn test_eviction_removes_all_side_chains() {
1047 let mut cache = ChangesetCacheInner::new();
1048
1049 let hash_10a = B256::random();
1051 let hash_10b = B256::random();
1052 let hash_10c = B256::random();
1053 cache.insert(hash_10a, 10, create_test_changesets());
1054 cache.insert(hash_10b, 10, create_test_changesets());
1055 cache.insert(hash_10c, 10, create_test_changesets());
1056
1057 let hash_20 = B256::random();
1058 cache.insert(hash_20, 20, create_test_changesets());
1059
1060 assert_eq!(cache.entries.len(), 4);
1061
1062 cache.evict(15);
1064
1065 assert_eq!(cache.entries.len(), 1);
1066 assert!(cache.get(&hash_10a).is_none());
1067 assert!(cache.get(&hash_10b).is_none());
1068 assert!(cache.get(&hash_10c).is_none());
1069 assert!(cache.get(&hash_20).is_some());
1070 }
1071}