reth_engine_tree/tree/
cached_state.rs

1//! Execution cache implementation for block processing.
2use alloy_primitives::{Address, StorageKey, StorageValue, B256};
3use metrics::Gauge;
4use mini_moka::sync::CacheBuilder;
5use reth_errors::ProviderResult;
6use reth_metrics::Metrics;
7use reth_primitives_traits::{Account, Bytecode};
8use reth_provider::{
9    AccountReader, BlockHashReader, BytecodeReader, HashedPostStateProvider, StateProofProvider,
10    StateProvider, StateRootProvider, StorageRootProvider,
11};
12use reth_revm::db::BundleState;
13use reth_trie::{
14    updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof,
15    MultiProofTargets, StorageMultiProof, StorageProof, TrieInput,
16};
17use revm_primitives::map::DefaultHashBuilder;
18use std::{sync::Arc, time::Duration};
19use tracing::{debug_span, instrument, trace};
20
21pub(crate) type Cache<K, V> =
22    mini_moka::sync::Cache<K, V, alloy_primitives::map::DefaultHashBuilder>;
23
24/// A wrapper of a state provider and a shared cache.
25pub(crate) struct CachedStateProvider<S> {
26    /// The state provider
27    state_provider: S,
28
29    /// The caches used for the provider
30    caches: ExecutionCache,
31
32    /// Metrics for the cached state provider
33    metrics: CachedStateMetrics,
34}
35
36impl<S> CachedStateProvider<S>
37where
38    S: StateProvider,
39{
40    /// Creates a new [`CachedStateProvider`] from an [`ExecutionCache`], state provider, and
41    /// [`CachedStateMetrics`].
42    pub(crate) const fn new_with_caches(
43        state_provider: S,
44        caches: ExecutionCache,
45        metrics: CachedStateMetrics,
46    ) -> Self {
47        Self { state_provider, caches, metrics }
48    }
49}
50
51/// Metrics for the cached state provider, showing hits / misses for each cache
52#[derive(Metrics, Clone)]
53#[metrics(scope = "sync.caching")]
54pub(crate) struct CachedStateMetrics {
55    /// Code cache hits
56    code_cache_hits: Gauge,
57
58    /// Code cache misses
59    code_cache_misses: Gauge,
60
61    /// Code cache size
62    ///
63    /// NOTE: this uses the moka caches' `entry_count`, NOT the `weighted_size` method to calculate
64    /// size.
65    code_cache_size: Gauge,
66
67    /// Storage cache hits
68    storage_cache_hits: Gauge,
69
70    /// Storage cache misses
71    storage_cache_misses: Gauge,
72
73    /// Storage cache size
74    ///
75    /// NOTE: this uses the moka caches' `entry_count`, NOT the `weighted_size` method to calculate
76    /// size.
77    storage_cache_size: Gauge,
78
79    /// Account cache hits
80    account_cache_hits: Gauge,
81
82    /// Account cache misses
83    account_cache_misses: Gauge,
84
85    /// Account cache size
86    ///
87    /// NOTE: this uses the moka caches' `entry_count`, NOT the `weighted_size` method to calculate
88    /// size.
89    account_cache_size: Gauge,
90}
91
92impl CachedStateMetrics {
93    /// Sets all values to zero, indicating that a new block is being executed.
94    pub(crate) fn reset(&self) {
95        // code cache
96        self.code_cache_hits.set(0);
97        self.code_cache_misses.set(0);
98
99        // storage cache
100        self.storage_cache_hits.set(0);
101        self.storage_cache_misses.set(0);
102
103        // account cache
104        self.account_cache_hits.set(0);
105        self.account_cache_misses.set(0);
106    }
107
108    /// Returns a new zeroed-out instance of [`CachedStateMetrics`].
109    pub(crate) fn zeroed() -> Self {
110        let zeroed = Self::default();
111        zeroed.reset();
112        zeroed
113    }
114}
115
116impl<S: AccountReader> AccountReader for CachedStateProvider<S> {
117    fn basic_account(&self, address: &Address) -> ProviderResult<Option<Account>> {
118        if let Some(res) = self.caches.account_cache.get(address) {
119            self.metrics.account_cache_hits.increment(1);
120            return Ok(res)
121        }
122
123        self.metrics.account_cache_misses.increment(1);
124
125        let res = self.state_provider.basic_account(address)?;
126        self.caches.account_cache.insert(*address, res);
127        Ok(res)
128    }
129}
130
131/// Represents the status of a storage slot in the cache.
132#[derive(Debug, Clone, PartialEq, Eq)]
133pub(crate) enum SlotStatus {
134    /// The account's storage cache doesn't exist.
135    NotCached,
136    /// The storage slot exists in cache and is empty (value is zero).
137    Empty,
138    /// The storage slot exists in cache and has a specific non-zero value.
139    Value(StorageValue),
140}
141
142impl<S: StateProvider> StateProvider for CachedStateProvider<S> {
143    fn storage(
144        &self,
145        account: Address,
146        storage_key: StorageKey,
147    ) -> ProviderResult<Option<StorageValue>> {
148        match self.caches.get_storage(&account, &storage_key) {
149            SlotStatus::NotCached => {
150                self.metrics.storage_cache_misses.increment(1);
151                let final_res = self.state_provider.storage(account, storage_key)?;
152                self.caches.insert_storage(account, storage_key, final_res);
153                Ok(final_res)
154            }
155            SlotStatus::Empty => {
156                self.metrics.storage_cache_hits.increment(1);
157                Ok(None)
158            }
159            SlotStatus::Value(value) => {
160                self.metrics.storage_cache_hits.increment(1);
161                Ok(Some(value))
162            }
163        }
164    }
165}
166
167impl<S: BytecodeReader> BytecodeReader for CachedStateProvider<S> {
168    fn bytecode_by_hash(&self, code_hash: &B256) -> ProviderResult<Option<Bytecode>> {
169        if let Some(res) = self.caches.code_cache.get(code_hash) {
170            self.metrics.code_cache_hits.increment(1);
171            return Ok(res)
172        }
173
174        self.metrics.code_cache_misses.increment(1);
175
176        let final_res = self.state_provider.bytecode_by_hash(code_hash)?;
177        self.caches.code_cache.insert(*code_hash, final_res.clone());
178        Ok(final_res)
179    }
180}
181
182impl<S: StateRootProvider> StateRootProvider for CachedStateProvider<S> {
183    fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult<B256> {
184        self.state_provider.state_root(hashed_state)
185    }
186
187    fn state_root_from_nodes(&self, input: TrieInput) -> ProviderResult<B256> {
188        self.state_provider.state_root_from_nodes(input)
189    }
190
191    fn state_root_with_updates(
192        &self,
193        hashed_state: HashedPostState,
194    ) -> ProviderResult<(B256, TrieUpdates)> {
195        self.state_provider.state_root_with_updates(hashed_state)
196    }
197
198    fn state_root_from_nodes_with_updates(
199        &self,
200        input: TrieInput,
201    ) -> ProviderResult<(B256, TrieUpdates)> {
202        self.state_provider.state_root_from_nodes_with_updates(input)
203    }
204}
205
206impl<S: StateProofProvider> StateProofProvider for CachedStateProvider<S> {
207    fn proof(
208        &self,
209        input: TrieInput,
210        address: Address,
211        slots: &[B256],
212    ) -> ProviderResult<AccountProof> {
213        self.state_provider.proof(input, address, slots)
214    }
215
216    fn multiproof(
217        &self,
218        input: TrieInput,
219        targets: MultiProofTargets,
220    ) -> ProviderResult<MultiProof> {
221        self.state_provider.multiproof(input, targets)
222    }
223
224    fn witness(
225        &self,
226        input: TrieInput,
227        target: HashedPostState,
228    ) -> ProviderResult<Vec<alloy_primitives::Bytes>> {
229        self.state_provider.witness(input, target)
230    }
231}
232
233impl<S: StorageRootProvider> StorageRootProvider for CachedStateProvider<S> {
234    fn storage_root(
235        &self,
236        address: Address,
237        hashed_storage: HashedStorage,
238    ) -> ProviderResult<B256> {
239        self.state_provider.storage_root(address, hashed_storage)
240    }
241
242    fn storage_proof(
243        &self,
244        address: Address,
245        slot: B256,
246        hashed_storage: HashedStorage,
247    ) -> ProviderResult<StorageProof> {
248        self.state_provider.storage_proof(address, slot, hashed_storage)
249    }
250
251    /// Generate a storage multiproof for multiple storage slots.
252    ///
253    /// A **storage multiproof** is a cryptographic proof that can verify the values
254    /// of multiple storage slots for a single account in a single verification step.
255    /// Instead of generating separate proofs for each slot (which would be inefficient),
256    /// a multiproof bundles the necessary trie nodes to prove all requested slots.
257    ///
258    /// ## How it works:
259    /// 1. Takes an account address and a list of storage slot keys
260    /// 2. Traverses the account's storage trie to collect proof nodes
261    /// 3. Returns a [`StorageMultiProof`] containing the minimal set of trie nodes needed to verify
262    ///    all the requested storage slots
263    fn storage_multiproof(
264        &self,
265        address: Address,
266        slots: &[B256],
267        hashed_storage: HashedStorage,
268    ) -> ProviderResult<StorageMultiProof> {
269        self.state_provider.storage_multiproof(address, slots, hashed_storage)
270    }
271}
272
273impl<S: BlockHashReader> BlockHashReader for CachedStateProvider<S> {
274    fn block_hash(&self, number: alloy_primitives::BlockNumber) -> ProviderResult<Option<B256>> {
275        self.state_provider.block_hash(number)
276    }
277
278    fn canonical_hashes_range(
279        &self,
280        start: alloy_primitives::BlockNumber,
281        end: alloy_primitives::BlockNumber,
282    ) -> ProviderResult<Vec<B256>> {
283        self.state_provider.canonical_hashes_range(start, end)
284    }
285}
286
287impl<S: HashedPostStateProvider> HashedPostStateProvider for CachedStateProvider<S> {
288    fn hashed_post_state(&self, bundle_state: &reth_revm::db::BundleState) -> HashedPostState {
289        self.state_provider.hashed_post_state(bundle_state)
290    }
291}
292
293/// Execution cache used during block processing.
294///
295/// Optimizes state access by maintaining in-memory copies of frequently accessed
296/// accounts, storage slots, and bytecode. Works in conjunction with prewarming
297/// to reduce database I/O during block execution.
298#[derive(Debug, Clone)]
299pub(crate) struct ExecutionCache {
300    /// Cache for contract bytecode, keyed by code hash.
301    code_cache: Cache<B256, Option<Bytecode>>,
302
303    /// Per-account storage cache: outer cache keyed by Address, inner cache tracks that account’s
304    /// storage slots.
305    storage_cache: Cache<Address, Arc<AccountStorageCache>>,
306
307    /// Cache for basic account information (nonce, balance, code hash).
308    account_cache: Cache<Address, Option<Account>>,
309}
310
311impl ExecutionCache {
312    /// Get storage value from hierarchical cache.
313    ///
314    /// Returns a `SlotStatus` indicating whether:
315    /// - `NotCached`: The account's storage cache doesn't exist
316    /// - `Empty`: The slot exists in the account's cache but is empty
317    /// - `Value`: The slot exists and has a specific value
318    pub(crate) fn get_storage(&self, address: &Address, key: &StorageKey) -> SlotStatus {
319        match self.storage_cache.get(address) {
320            None => SlotStatus::NotCached,
321            Some(account_cache) => account_cache.get_storage(key),
322        }
323    }
324
325    /// Insert storage value into hierarchical cache
326    pub(crate) fn insert_storage(
327        &self,
328        address: Address,
329        key: StorageKey,
330        value: Option<StorageValue>,
331    ) {
332        self.insert_storage_bulk(address, [(key, value)]);
333    }
334
335    /// Insert multiple storage values into hierarchical cache for a single account
336    ///
337    /// This method is optimized for inserting multiple storage values for the same address
338    /// by doing the account cache lookup only once instead of for each key-value pair.
339    pub(crate) fn insert_storage_bulk<I>(&self, address: Address, storage_entries: I)
340    where
341        I: IntoIterator<Item = (StorageKey, Option<StorageValue>)>,
342    {
343        let account_cache = self.storage_cache.get(&address).unwrap_or_default();
344
345        for (key, value) in storage_entries {
346            account_cache.insert_storage(key, value);
347        }
348
349        // Insert to the cache so that moka picks up on the changed size, even though the actual
350        // value (the Arc<AccountStorageCache>) is the same
351        self.storage_cache.insert(address, account_cache);
352    }
353
354    /// Invalidate storage for specific account
355    pub(crate) fn invalidate_account_storage(&self, address: &Address) {
356        self.storage_cache.invalidate(address);
357    }
358
359    /// Returns the total number of storage slots cached across all accounts
360    pub(crate) fn total_storage_slots(&self) -> usize {
361        self.storage_cache.iter().map(|addr| addr.len()).sum()
362    }
363
364    /// Inserts the post-execution state changes into the cache.
365    ///
366    /// This method is called after transaction execution to update the cache with
367    /// the touched and modified state. The insertion order is critical:
368    ///
369    /// 1. Bytecodes: Insert contract code first
370    /// 2. Storage slots: Update storage values for each account
371    /// 3. Accounts: Update account info (nonce, balance, code hash)
372    ///
373    /// ## Why This Order Matters
374    ///
375    /// Account information references bytecode via code hash. If we update accounts
376    /// before bytecode, we might create cache entries pointing to non-existent code.
377    /// The current order ensures cache consistency.
378    ///
379    /// ## Error Handling
380    ///
381    /// Returns an error if the state updates are inconsistent and should be discarded.
382    #[instrument(level = "debug", target = "engine::caching", skip_all)]
383    pub(crate) fn insert_state(&self, state_updates: &BundleState) -> Result<(), ()> {
384        let _enter =
385            debug_span!(target: "engine::tree", "contracts", len = state_updates.contracts.len())
386                .entered();
387        // Insert bytecodes
388        for (code_hash, bytecode) in &state_updates.contracts {
389            self.code_cache.insert(*code_hash, Some(Bytecode(bytecode.clone())));
390        }
391        drop(_enter);
392
393        let _enter = debug_span!(
394            target: "engine::tree",
395            "accounts",
396            accounts = state_updates.state.len(),
397            storages =
398                state_updates.state.values().map(|account| account.storage.len()).sum::<usize>()
399        )
400        .entered();
401        for (addr, account) in &state_updates.state {
402            // If the account was not modified, as in not changed and not destroyed, then we have
403            // nothing to do w.r.t. this particular account and can move on
404            if account.status.is_not_modified() {
405                continue
406            }
407
408            // If the account was destroyed, invalidate from the account / storage caches
409            if account.was_destroyed() {
410                // Invalidate the account cache entry if destroyed
411                self.account_cache.invalidate(addr);
412
413                self.invalidate_account_storage(addr);
414                continue
415            }
416
417            // If we have an account that was modified, but it has a `None` account info, some wild
418            // error has occurred because this state should be unrepresentable. An account with
419            // `None` current info, should be destroyed.
420            let Some(ref account_info) = account.info else {
421                trace!(target: "engine::caching", ?account, "Account with None account info found in state updates");
422                return Err(())
423            };
424
425            // Now we iterate over all storage and make updates to the cached storage values
426            // Use bulk insertion to optimize cache lookups - only lookup the account cache once
427            // instead of for each storage key
428            let storage_entries = account.storage.iter().map(|(storage_key, slot)| {
429                // We convert the storage key from U256 to B256 because that is how it's represented
430                // in the cache
431                ((*storage_key).into(), Some(slot.present_value))
432            });
433            self.insert_storage_bulk(*addr, storage_entries);
434
435            // Insert will update if present, so we just use the new account info as the new value
436            // for the account cache
437            self.account_cache.insert(*addr, Some(Account::from(account_info)));
438        }
439
440        Ok(())
441    }
442}
443
444/// A builder for [`ExecutionCache`].
445#[derive(Debug)]
446pub(crate) struct ExecutionCacheBuilder {
447    /// Code cache entries
448    code_cache_entries: u64,
449
450    /// Storage cache entries
451    storage_cache_entries: u64,
452
453    /// Account cache entries
454    account_cache_entries: u64,
455}
456
457impl ExecutionCacheBuilder {
458    /// Build an [`ExecutionCache`] struct, so that execution caches can be easily cloned.
459    pub(crate) fn build_caches(self, total_cache_size: u64) -> ExecutionCache {
460        let storage_cache_size = (total_cache_size * 8888) / 10000; // 88.88% of total
461        let account_cache_size = (total_cache_size * 556) / 10000; // 5.56% of total
462        let code_cache_size = (total_cache_size * 556) / 10000; // 5.56% of total
463
464        const EXPIRY_TIME: Duration = Duration::from_secs(7200); // 2 hours
465        const TIME_TO_IDLE: Duration = Duration::from_secs(3600); // 1 hour
466
467        let storage_cache = CacheBuilder::new(self.storage_cache_entries)
468            .weigher(|_key: &Address, value: &Arc<AccountStorageCache>| -> u32 {
469                // values based on results from measure_storage_cache_overhead test
470                let base_weight = 39_000;
471                let slots_weight = value.len() * 218;
472                (base_weight + slots_weight) as u32
473            })
474            .max_capacity(storage_cache_size)
475            .time_to_live(EXPIRY_TIME)
476            .time_to_idle(TIME_TO_IDLE)
477            .build_with_hasher(DefaultHashBuilder::default());
478
479        let account_cache = CacheBuilder::new(self.account_cache_entries)
480            .weigher(|_key: &Address, value: &Option<Account>| -> u32 {
481                // Account has a fixed size (none, balance,code_hash)
482                20 + size_of_val(value) as u32
483            })
484            .max_capacity(account_cache_size)
485            .time_to_live(EXPIRY_TIME)
486            .time_to_idle(TIME_TO_IDLE)
487            .build_with_hasher(DefaultHashBuilder::default());
488
489        let code_cache = CacheBuilder::new(self.code_cache_entries)
490            .weigher(|_key: &B256, value: &Option<Bytecode>| -> u32 {
491                let code_size = match value {
492                    Some(bytecode) => {
493                        // base weight + actual (padded) bytecode size + size of the jump table
494                        (size_of_val(value) +
495                            bytecode.bytecode().len() +
496                            bytecode
497                                .legacy_jump_table()
498                                .map(|table| table.as_slice().len())
499                                .unwrap_or_default()) as u32
500                    }
501                    None => size_of_val(value) as u32,
502                };
503                32 + code_size
504            })
505            .max_capacity(code_cache_size)
506            .time_to_live(EXPIRY_TIME)
507            .time_to_idle(TIME_TO_IDLE)
508            .build_with_hasher(DefaultHashBuilder::default());
509
510        ExecutionCache { code_cache, storage_cache, account_cache }
511    }
512}
513
514impl Default for ExecutionCacheBuilder {
515    fn default() -> Self {
516        // With weigher and max_capacity in place, these numbers represent
517        // the maximum number of entries that can be stored, not the actual
518        // memory usage which is controlled by max_capacity.
519        //
520        // Code cache: up to 10M entries but limited to 0.5GB
521        // Storage cache: up to 10M accounts but limited to 8GB
522        // Account cache: up to 10M accounts but limited to 0.5GB
523        Self {
524            code_cache_entries: 10_000_000,
525            storage_cache_entries: 10_000_000,
526            account_cache_entries: 10_000_000,
527        }
528    }
529}
530
531/// A saved cache that has been used for executing a specific block, which has been updated for its
532/// execution.
533#[derive(Debug, Clone)]
534pub(crate) struct SavedCache {
535    /// The hash of the block these caches were used to execute.
536    hash: B256,
537
538    /// The caches used for the provider.
539    caches: ExecutionCache,
540
541    /// Metrics for the cached state provider
542    metrics: CachedStateMetrics,
543
544    /// A guard to track in-flight usage of this cache.
545    /// The cache is considered available if the strong count is 1.
546    usage_guard: Arc<()>,
547}
548
549impl SavedCache {
550    /// Creates a new instance with the internals
551    pub(super) fn new(hash: B256, caches: ExecutionCache, metrics: CachedStateMetrics) -> Self {
552        Self { hash, caches, metrics, usage_guard: Arc::new(()) }
553    }
554
555    /// Returns the hash for this cache
556    pub(crate) const fn executed_block_hash(&self) -> B256 {
557        self.hash
558    }
559
560    /// Splits the cache into its caches and metrics, consuming it.
561    pub(crate) fn split(self) -> (ExecutionCache, CachedStateMetrics) {
562        (self.caches, self.metrics)
563    }
564
565    /// Returns true if the cache is available for use (no other tasks are currently using it).
566    pub(crate) fn is_available(&self) -> bool {
567        Arc::strong_count(&self.usage_guard) == 1
568    }
569
570    /// Returns the [`ExecutionCache`] belonging to the tracked hash.
571    pub(crate) const fn cache(&self) -> &ExecutionCache {
572        &self.caches
573    }
574
575    /// Returns the metrics associated with this cache.
576    pub(crate) const fn metrics(&self) -> &CachedStateMetrics {
577        &self.metrics
578    }
579
580    /// Updates the metrics for the [`ExecutionCache`].
581    pub(crate) fn update_metrics(&self) {
582        self.metrics.storage_cache_size.set(self.caches.total_storage_slots() as f64);
583        self.metrics.account_cache_size.set(self.caches.account_cache.entry_count() as f64);
584        self.metrics.code_cache_size.set(self.caches.code_cache.entry_count() as f64);
585    }
586}
587
588#[cfg(test)]
589impl SavedCache {
590    fn clone_guard_for_test(&self) -> Arc<()> {
591        self.usage_guard.clone()
592    }
593}
594
595/// Cache for an individual account's storage slots.
596///
597/// This represents the second level of the hierarchical storage cache.
598/// Each account gets its own `AccountStorageCache` to store accessed storage slots.
599#[derive(Debug, Clone)]
600pub(crate) struct AccountStorageCache {
601    /// Map of storage keys to their cached values.
602    slots: Cache<StorageKey, Option<StorageValue>>,
603}
604
605impl AccountStorageCache {
606    /// Create a new [`AccountStorageCache`]
607    pub(crate) fn new(max_slots: u64) -> Self {
608        Self {
609            slots: CacheBuilder::new(max_slots).build_with_hasher(DefaultHashBuilder::default()),
610        }
611    }
612
613    /// Get a storage value from this account's cache.
614    /// - `NotCached`: The slot is not in the cache
615    /// - `Empty`: The slot is empty
616    /// - `Value`: The slot has a specific value
617    pub(crate) fn get_storage(&self, key: &StorageKey) -> SlotStatus {
618        match self.slots.get(key) {
619            None => SlotStatus::NotCached,
620            Some(None) => SlotStatus::Empty,
621            Some(Some(value)) => SlotStatus::Value(value),
622        }
623    }
624
625    /// Insert a storage value
626    pub(crate) fn insert_storage(&self, key: StorageKey, value: Option<StorageValue>) {
627        self.slots.insert(key, value);
628    }
629
630    /// Returns the number of slots in the cache
631    pub(crate) fn len(&self) -> usize {
632        self.slots.entry_count() as usize
633    }
634}
635
636impl Default for AccountStorageCache {
637    fn default() -> Self {
638        // With weigher and max_capacity in place, this number represents
639        // the maximum number of entries that can be stored, not the actual
640        // memory usage which is controlled by storage cache's max_capacity.
641        Self::new(1_000_000)
642    }
643}
644
645#[cfg(test)]
646mod tests {
647    use super::*;
648    use alloy_primitives::{B256, U256};
649    use rand::Rng;
650    use reth_provider::test_utils::{ExtendedAccount, MockEthProvider};
651    use std::mem::size_of;
652
653    mod tracking_allocator {
654        use std::{
655            alloc::{GlobalAlloc, Layout, System},
656            sync::atomic::{AtomicUsize, Ordering},
657        };
658
659        #[derive(Debug)]
660        pub(crate) struct TrackingAllocator {
661            allocated: AtomicUsize,
662            total_allocated: AtomicUsize,
663            inner: System,
664        }
665
666        impl TrackingAllocator {
667            pub(crate) const fn new() -> Self {
668                Self {
669                    allocated: AtomicUsize::new(0),
670                    total_allocated: AtomicUsize::new(0),
671                    inner: System,
672                }
673            }
674
675            pub(crate) fn reset(&self) {
676                self.allocated.store(0, Ordering::SeqCst);
677                self.total_allocated.store(0, Ordering::SeqCst);
678            }
679
680            pub(crate) fn total_allocated(&self) -> usize {
681                self.total_allocated.load(Ordering::SeqCst)
682            }
683        }
684
685        unsafe impl GlobalAlloc for TrackingAllocator {
686            unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
687                let ret = unsafe { self.inner.alloc(layout) };
688                if !ret.is_null() {
689                    self.allocated.fetch_add(layout.size(), Ordering::SeqCst);
690                    self.total_allocated.fetch_add(layout.size(), Ordering::SeqCst);
691                }
692                ret
693            }
694
695            unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
696                self.allocated.fetch_sub(layout.size(), Ordering::SeqCst);
697                unsafe { self.inner.dealloc(ptr, layout) }
698            }
699        }
700    }
701
702    use tracking_allocator::TrackingAllocator;
703
704    #[global_allocator]
705    static ALLOCATOR: TrackingAllocator = TrackingAllocator::new();
706
707    fn measure_allocation<T, F>(f: F) -> (usize, T)
708    where
709        F: FnOnce() -> T,
710    {
711        ALLOCATOR.reset();
712        let result = f();
713        let total = ALLOCATOR.total_allocated();
714        (total, result)
715    }
716
717    #[test]
718    fn measure_storage_cache_overhead() {
719        let (base_overhead, cache) = measure_allocation(|| AccountStorageCache::new(1000));
720        println!("Base AccountStorageCache overhead: {base_overhead} bytes");
721        let mut rng = rand::rng();
722
723        let key = StorageKey::random();
724        let value = StorageValue::from(rng.random::<u128>());
725        let (first_slot, _) = measure_allocation(|| {
726            cache.insert_storage(key, Some(value));
727        });
728        println!("First slot insertion overhead: {first_slot} bytes");
729
730        const TOTAL_SLOTS: usize = 10_000;
731        let (test_slots, _) = measure_allocation(|| {
732            for _ in 0..TOTAL_SLOTS {
733                let key = StorageKey::random();
734                let value = StorageValue::from(rng.random::<u128>());
735                cache.insert_storage(key, Some(value));
736            }
737        });
738        println!("Average overhead over {} slots: {} bytes", TOTAL_SLOTS, test_slots / TOTAL_SLOTS);
739
740        println!("\nTheoretical sizes:");
741        println!("StorageKey size: {} bytes", size_of::<StorageKey>());
742        println!("StorageValue size: {} bytes", size_of::<StorageValue>());
743        println!("Option<StorageValue> size: {} bytes", size_of::<Option<StorageValue>>());
744        println!("Option<B256> size: {} bytes", size_of::<Option<B256>>());
745    }
746
747    #[test]
748    fn test_empty_storage_cached_state_provider() {
749        // make sure when we have an empty value in storage, we return `Empty` and not `NotCached`
750        let address = Address::random();
751        let storage_key = StorageKey::random();
752        let account = ExtendedAccount::new(0, U256::ZERO);
753
754        // note there is no storage here
755        let provider = MockEthProvider::default();
756        provider.extend_accounts(vec![(address, account)]);
757
758        let caches = ExecutionCacheBuilder::default().build_caches(1000);
759        let state_provider =
760            CachedStateProvider::new_with_caches(provider, caches, CachedStateMetrics::zeroed());
761
762        // check that the storage is empty
763        let res = state_provider.storage(address, storage_key);
764        assert!(res.is_ok());
765        assert_eq!(res.unwrap(), None);
766    }
767
768    #[test]
769    fn test_uncached_storage_cached_state_provider() {
770        // make sure when we have something uncached, we get the cached value
771        let address = Address::random();
772        let storage_key = StorageKey::random();
773        let storage_value = U256::from(1);
774        let account =
775            ExtendedAccount::new(0, U256::ZERO).extend_storage(vec![(storage_key, storage_value)]);
776
777        // note that we extend storage here with one value
778        let provider = MockEthProvider::default();
779        provider.extend_accounts(vec![(address, account)]);
780
781        let caches = ExecutionCacheBuilder::default().build_caches(1000);
782        let state_provider =
783            CachedStateProvider::new_with_caches(provider, caches, CachedStateMetrics::zeroed());
784
785        // check that the storage returns the expected value
786        let res = state_provider.storage(address, storage_key);
787        assert!(res.is_ok());
788        assert_eq!(res.unwrap(), Some(storage_value));
789    }
790
791    #[test]
792    fn test_get_storage_populated() {
793        // make sure when we have something cached, we get the cached value in the `SlotStatus`
794        let address = Address::random();
795        let storage_key = StorageKey::random();
796        let storage_value = U256::from(1);
797
798        // insert into caches directly
799        let caches = ExecutionCacheBuilder::default().build_caches(1000);
800        caches.insert_storage(address, storage_key, Some(storage_value));
801
802        // check that the storage returns the cached value
803        let slot_status = caches.get_storage(&address, &storage_key);
804        assert_eq!(slot_status, SlotStatus::Value(storage_value));
805    }
806
807    #[test]
808    fn test_get_storage_not_cached() {
809        // make sure when we have nothing cached, we get the `NotCached` value in the `SlotStatus`
810        let storage_key = StorageKey::random();
811        let address = Address::random();
812
813        // just create empty caches
814        let caches = ExecutionCacheBuilder::default().build_caches(1000);
815
816        // check that the storage is not cached
817        let slot_status = caches.get_storage(&address, &storage_key);
818        assert_eq!(slot_status, SlotStatus::NotCached);
819    }
820
821    #[test]
822    fn test_get_storage_empty() {
823        // make sure when we insert an empty value to the cache, we get the `Empty` value in the
824        // `SlotStatus`
825        let address = Address::random();
826        let storage_key = StorageKey::random();
827
828        // insert into caches directly
829        let caches = ExecutionCacheBuilder::default().build_caches(1000);
830        caches.insert_storage(address, storage_key, None);
831
832        // check that the storage is empty
833        let slot_status = caches.get_storage(&address, &storage_key);
834        assert_eq!(slot_status, SlotStatus::Empty);
835    }
836
837    // Tests for SavedCache locking mechanism
838    #[test]
839    fn test_saved_cache_is_available() {
840        let execution_cache = ExecutionCacheBuilder::default().build_caches(1000);
841        let cache = SavedCache::new(B256::ZERO, execution_cache, CachedStateMetrics::zeroed());
842
843        // Initially, the cache should be available (only one reference)
844        assert!(cache.is_available(), "Cache should be available initially");
845
846        // Clone the usage guard (simulating it being handed out)
847        let _guard = cache.clone_guard_for_test();
848
849        // Now the cache should not be available (two references)
850        assert!(!cache.is_available(), "Cache should not be available with active guard");
851    }
852
853    #[test]
854    fn test_saved_cache_multiple_references() {
855        let execution_cache = ExecutionCacheBuilder::default().build_caches(1000);
856        let cache =
857            SavedCache::new(B256::from([2u8; 32]), execution_cache, CachedStateMetrics::zeroed());
858
859        // Create multiple references to the usage guard
860        let guard1 = cache.clone_guard_for_test();
861        let guard2 = cache.clone_guard_for_test();
862        let guard3 = guard1.clone();
863
864        // Cache should not be available with multiple guards
865        assert!(!cache.is_available());
866
867        // Drop guards one by one
868        drop(guard1);
869        assert!(!cache.is_available()); // Still not available
870
871        drop(guard2);
872        assert!(!cache.is_available()); // Still not available
873
874        drop(guard3);
875        assert!(cache.is_available()); // Now available
876    }
877}