Skip to main content

reth_trie/proof_v2/
mod.rs

1//! Proof calculation version 2: Leaf-only implementation.
2//!
3//! This module provides a rewritten proof calculator that:
4//! - Uses only leaf data (HashedAccounts/Storages) to generate proofs
5//! - Returns proof nodes sorted lexicographically by path
6//! - Automatically resets after each calculation
7//! - Re-uses cursors across calculations
8//! - Supports generic value types with lazy evaluation
9
10use crate::{
11    hashed_cursor::{HashedCursor, HashedStorageCursor},
12    trie_cursor::{depth_first, TrieCursor, TrieStorageCursor},
13};
14use alloy_primitives::{keccak256, B256, U256};
15use alloy_rlp::Encodable;
16use alloy_trie::{BranchNodeCompact, TrieMask};
17use reth_execution_errors::trie::StateProofError;
18use reth_trie_common::{
19    prefix_set::PrefixSet, BranchNodeMasks, BranchNodeRef, BranchNodeV2, Nibbles, ProofTrieNodeV2,
20    ProofV2Target, RlpNode, TrieNodeV2,
21};
22use std::cmp::Ordering;
23use tracing::{error, instrument, trace};
24
25mod value;
26pub use value::*;
27
28mod node;
29use node::*;
30
31mod target;
32pub(crate) use target::*;
33
34/// Target to use with the `tracing` crate.
35static TRACE_TARGET: &str = "trie::proof_v2";
36
37/// Number of bytes to pre-allocate for [`ProofCalculator`]'s `rlp_encode_buf` field.
38const RLP_ENCODE_BUF_SIZE: usize = 1024;
39
40/// A proof calculator that generates merkle proofs using only leaf data.
41///
42/// The calculator:
43/// - Accepts one or more B256 proof targets sorted lexicographically
44/// - Returns proof nodes sorted lexicographically by path
45/// - Automatically resets after each calculation
46/// - Re-uses cursors from one calculation to the next
47#[derive(Debug)]
48pub struct ProofCalculator<TC, HC, VE: LeafValueEncoder> {
49    /// Trie cursor for traversing stored branch nodes.
50    trie_cursor: TC,
51    /// Hashed cursor for iterating over leaf data.
52    hashed_cursor: HC,
53    /// Branches which are currently in the process of being constructed, each being a child of
54    /// the previous one.
55    branch_stack: Vec<ProofTrieBranch>,
56    /// The path of the last branch in `branch_stack`.
57    branch_path: Nibbles,
58    /// Children of branches in the `branch_stack`.
59    ///
60    /// Each branch in `branch_stack` tracks which children are in this stack using its
61    /// `state_mask`; the number of children the branch has in this stack is equal to the number of
62    /// bits set in its `state_mask`.
63    ///
64    /// The children for the bottom branch in `branch_stack` are found at the bottom of this stack,
65    /// and so on. When a branch is removed from `branch_stack` its children are removed from this
66    /// one, and the branch is pushed onto this stack in their place (see [`Self::pop_branch`].
67    ///
68    /// Children on the `child_stack` are converted to [`ProofTrieBranchChild::RlpNode`]s via the
69    /// [`Self::commit_child`] method. Committing a child indicates that no further changes are
70    /// expected to happen to it (e.g. splitting its short key when inserting a new branch). Given
71    /// that keys are consumed in lexicographical order, only the last child on the stack can
72    /// ever be modified, and therefore all children besides the last are expected to be
73    /// [`ProofTrieBranchChild::RlpNode`]s.
74    child_stack: Vec<ProofTrieBranchChild<VE::DeferredEncoder>>,
75    /// Cached branch data pulled from the `trie_cursor`. The calculator will use the cached
76    /// [`BranchNodeCompact::hashes`] to skip over the calculation of sub-tries in the overall
77    /// trie. The cached hashes cannot be used for any paths which are prefixes of a proof target.
78    cached_branch_stack: Vec<(Nibbles, BranchNodeCompact)>,
79    /// The proofs which will be returned from the calculation. This gets taken at the end of every
80    /// proof call.
81    retained_proofs: Vec<ProofTrieNodeV2>,
82    /// Free-list of re-usable buffers of [`RlpNode`]s, used for encoding branch nodes to RLP.
83    ///
84    /// We are generally able to re-use these buffers across different branch nodes for the
85    /// duration of a proof calculation, but occasionally we will lose one when a branch
86    /// node is returned as a `ProofTrieNode`.
87    rlp_nodes_bufs: Vec<Vec<RlpNode>>,
88    /// Re-usable byte buffer, used for RLP encoding.
89    rlp_encode_buf: Vec<u8>,
90    /// Prefix set for tracking changed keys.
91    prefix_set: PrefixSet,
92}
93
94impl<TC, HC, VE: LeafValueEncoder> ProofCalculator<TC, HC, VE> {
95    /// Create a new [`ProofCalculator`] instance for calculating account proofs.
96    pub fn new(trie_cursor: TC, hashed_cursor: HC) -> Self {
97        Self {
98            trie_cursor,
99            hashed_cursor,
100            branch_stack: Vec::<_>::with_capacity(64),
101            branch_path: Nibbles::new(),
102            child_stack: Vec::<_>::with_capacity(64),
103            cached_branch_stack: Vec::<_>::with_capacity(64),
104            retained_proofs: Vec::<_>::with_capacity(32),
105            rlp_nodes_bufs: Vec::<_>::with_capacity(8),
106            rlp_encode_buf: Vec::<_>::with_capacity(RLP_ENCODE_BUF_SIZE),
107            prefix_set: PrefixSet::default(),
108        }
109    }
110
111    /// Sets the prefix set and returns `self`.
112    ///
113    /// When given, all cached hashes matching the [`PrefixSet`] will be invalidated. When all but
114    /// one of a branch's children match the prefix set then that remaining child's cached hash, if
115    /// any, will also be invalidated. This allows for properly handling branch collapse situations,
116    /// where all but one child of a branch is deleted and the remaining child is required to be
117    /// unrevealed in order to collapse the branch.
118    pub fn with_prefix_set(mut self, prefix_set: PrefixSet) -> Self {
119        self.prefix_set = prefix_set;
120        self
121    }
122}
123
124impl<TC, HC, VE> ProofCalculator<TC, HC, VE>
125where
126    TC: TrieCursor,
127    HC: HashedCursor,
128    VE: LeafValueEncoder<Value = HC::Value>,
129{
130    /// Takes a re-usable `RlpNode` buffer from the internal free-list, or allocates a new one if
131    /// the free-list is empty.
132    ///
133    /// The returned Vec will have a length of zero.
134    fn take_rlp_nodes_buf(&mut self) -> Vec<RlpNode> {
135        self.rlp_nodes_bufs
136            .pop()
137            .map(|mut buf| {
138                buf.clear();
139                buf
140            })
141            .unwrap_or_else(|| Vec::with_capacity(16))
142    }
143
144    // Returns zero if `branch_stack` is empty, one otherwise.
145    //
146    // This is used when working with the `ext_len` field of [`ProofTrieBranch`]. The `ext_len` is
147    // calculated by taking the difference of the current `branch_path` and the new branch's path;
148    // if the new branch has a parent branch (ie `branch_stack` is not empty) then 1 is subtracted
149    // from the `ext_len` to account for the child's nibble on the parent.
150    #[inline]
151    const fn maybe_parent_nibble(&self) -> usize {
152        !self.branch_stack.is_empty() as usize
153    }
154
155    /// Returns true if the proof of a node at the given path should be retained. A node is retained
156    /// if its path is a prefix of any target.
157    ///
158    /// This may move the `targets` iterator forward if the given path comes after the current
159    /// target.
160    ///
161    /// This method takes advantage of the [`std::slice::Iter`] component of [`TargetsCursor`] to
162    /// check the minimum number of targets. In general it looks at a current target and the next
163    /// target simultaneously, forming an end-exclusive range.
164    ///
165    /// ```text
166    /// * Given targets: [ 0x012, 0x045, 0x678 ]
167    /// * targets.current() returns:
168    ///     - (0x012, Some(0x045)): covers (0x012..0x045)
169    ///     - (0x045, Some(0x678)): covers (0x045..0x678)
170    ///     - (0x678, None): covers (0x678..)
171    /// ```
172    ///
173    /// As long as the path which is passed in lies within that range we can continue to use the
174    /// current target. Once the path goes beyond that range (ie path >= next target) then we can be
175    /// sure that no further paths will be in the range, and we can iterate forward.
176    ///
177    /// ```text
178    /// * Given:
179    ///     - path: 0x04
180    ///     - targets.current() returns (0x012, Some(0x045))
181    ///
182    /// * 0x04 comes _after_ 0x045 in depth-first order, so (0x012..0x045) does not contain 0x04.
183    ///
184    /// * targets.next() is called.
185    ///
186    /// * targets.current() now returns (0x045, Some(0x678)). This does contain 0x04.
187    ///
188    /// * 0x04 is a prefix of 0x045, and so is retained.
189    /// ```
190    #[instrument(
191        target = TRACE_TARGET,
192        level = "trace",
193        skip_all,
194        fields(?path, ?check_min_len),
195        ret,
196    )]
197    fn should_retain<'a>(
198        &self,
199        targets: &mut Option<TargetsCursor<'a>>,
200        path: &Nibbles,
201        check_min_len: bool,
202    ) -> bool {
203        // If no targets are given then we never retain anything
204        let Some(targets) = targets.as_mut() else { return false };
205
206        let (mut lower, mut upper) = targets.current();
207
208        debug_assert!(self.retained_proofs.last().is_none_or(
209                |ProofTrieNodeV2 { path: last_retained_path, .. }| {
210                    depth_first::cmp(path, last_retained_path) == Ordering::Greater
211                }
212            ),
213            "should_retain called with path {path:?} which is not after previously retained node {:?} in depth-first order",
214            self.retained_proofs.last().map(|n| n.path),
215        );
216
217        loop {
218            // If the node in question is a prefix of the target then we do not iterate targets
219            // further.
220            //
221            // Even if the node is a prefix of the target's key, if the target has a non-zero
222            // `min_len` it indicates that the node should only be retained if it is
223            // longer than that value.
224            //
225            // _However_ even if the node doesn't match the target due to the target's `min_len`, it
226            // may match other targets whose keys match this node. So we search forwards and
227            // backwards for all targets which might match this node, and check against the
228            // `min_len` of each.
229            //
230            // For example, given a branch 0xabc, with children at 0, 1, and 2, and targets:
231            // - key: 0xabc0, min_len: 2
232            // - key: 0xabc1, min_len: 1
233            // - key: 0xabc2, min_len: 4 <-- current
234            // - key: 0xabc3, min_len: 3
235            //
236            // When the branch node at 0xabc is visited it will be after the targets has iterated
237            // forward to 0xabc2 (because all children will have been visited already). At this
238            // point the target for 0xabc2 will not match the branch due to its prefix, but any of
239            // the other targets would, so we need to check those as well.
240            if lower.key_nibbles.starts_with(path) {
241                return !check_min_len ||
242                    (path.len() >= lower.min_len as usize ||
243                        targets
244                            .skip_iter()
245                            .take_while(|target| target.key_nibbles.starts_with(path))
246                            .any(|target| path.len() >= target.min_len as usize) ||
247                        targets
248                            .rev_iter()
249                            .take_while(|target| target.key_nibbles.starts_with(path))
250                            .any(|target| path.len() >= target.min_len as usize))
251            }
252
253            // If the path isn't in the current range then iterate forward until it is (or until
254            // there is no upper bound, indicating unbounded).
255            if upper
256                .is_some_and(|upper| depth_first::cmp(path, &upper.key_nibbles) != Ordering::Less)
257            {
258                (lower, upper) = targets.next();
259                trace!(target: TRACE_TARGET, target = ?lower, "upper target <= path, next target");
260            } else {
261                return false
262            }
263        }
264    }
265
266    /// Takes a child which has been removed from the `child_stack` and converts it to an
267    /// [`RlpNode`].
268    ///
269    /// Calling this method indicates that the child will not undergo any further modifications, and
270    /// therefore can be retained as a proof node if applicable.
271    fn commit_child<'a>(
272        &mut self,
273        targets: &mut Option<TargetsCursor<'a>>,
274        child_path: Nibbles,
275        child: ProofTrieBranchChild<VE::DeferredEncoder>,
276    ) -> Result<RlpNode, StateProofError> {
277        // If the child is already an `RlpNode` then there is nothing to do.
278        if let ProofTrieBranchChild::RlpNode(rlp_node) = child {
279            return Ok(rlp_node)
280        }
281
282        // If we should retain the child then do so.
283        if self.should_retain(targets, &child_path, true) {
284            trace!(target: TRACE_TARGET, ?child_path, "Retaining child");
285
286            // Convert to `ProofTrieNodeV2`, which will be what is retained.
287            //
288            // If this node is a branch then its `rlp_nodes_buf` will be taken and not returned to
289            // the `rlp_nodes_bufs` free-list.
290            self.rlp_encode_buf.clear();
291            let proof_node = child.into_proof_trie_node(child_path, &mut self.rlp_encode_buf)?;
292
293            // Use the `ProofTrieNodeV2` to encode the `RlpNode`, and then push it onto retained
294            // nodes before returning.
295            self.rlp_encode_buf.clear();
296            proof_node.node.encode(&mut self.rlp_encode_buf);
297
298            self.retained_proofs.push(proof_node);
299            return Ok(RlpNode::from_rlp(&self.rlp_encode_buf));
300        }
301
302        // If the child path is not being retained then we convert directly to an `RlpNode`
303        // using `into_rlp`. Since we are not retaining the node we can recover any `RlpNode`
304        // buffers for the free-list here, hence why we do this as a separate logical branch.
305        self.rlp_encode_buf.clear();
306        let (child_rlp_node, freed_rlp_nodes_buf) = child.into_rlp(&mut self.rlp_encode_buf)?;
307
308        // If there is an `RlpNode` buffer which can be re-used then push it onto the free-list.
309        if let Some(buf) = freed_rlp_nodes_buf {
310            self.rlp_nodes_bufs.push(buf);
311        }
312
313        Ok(child_rlp_node)
314    }
315
316    /// Returns the path of the child of the currently under-construction branch at the given
317    /// nibble.
318    #[inline]
319    fn child_path_at(&self, nibble: u8) -> Nibbles {
320        let mut child_path = self.branch_path;
321        debug_assert!(child_path.len() < 64);
322        child_path.push_unchecked(nibble);
323        child_path
324    }
325
326    /// Returns index of the highest nibble which is set in the mask.
327    ///
328    /// # Panics
329    ///
330    /// Will panic in debug mode if the mask is empty.
331    #[inline]
332    fn highest_set_nibble(mask: TrieMask) -> u8 {
333        debug_assert!(!mask.is_empty());
334        (u16::BITS - mask.leading_zeros() - 1) as u8
335    }
336
337    /// Returns the path of the child on top of the `child_stack`, or the root path if the stack is
338    /// empty. Returns None if the current branch has not yet pushed a child (empty `state_mask`).
339    fn last_child_path(&self) -> Option<Nibbles> {
340        // If there is no branch under construction then the top child must be the root child.
341        let Some(branch) = self.branch_stack.last() else {
342            return Some(Nibbles::new());
343        };
344
345        (!branch.state_mask.is_empty())
346            .then(|| self.child_path_at(Self::highest_set_nibble(branch.state_mask)))
347    }
348
349    /// Calls [`Self::commit_child`] on the last child of `child_stack`, replacing it with a
350    /// [`ProofTrieBranchChild::RlpNode`].
351    ///
352    /// If `child_stack` is empty then this is a no-op.
353    ///
354    /// NOTE that this method call relies on the `state_mask` of the top branch of the
355    /// `branch_stack` to determine the last child's path. When committing the last child prior to
356    /// pushing a new child, it's important to set the new child's `state_mask` bit _after_ the call
357    /// to this method.
358    #[instrument(
359        target = TRACE_TARGET,
360        level = "trace",
361        skip_all,
362        fields(child_path = ?self.last_child_path()),
363    )]
364    fn commit_last_child<'a>(
365        &mut self,
366        targets: &mut Option<TargetsCursor<'a>>,
367    ) -> Result<(), StateProofError> {
368        let Some(child_path) = self.last_child_path() else { return Ok(()) };
369        let child =
370            self.child_stack.pop().expect("child_stack can't be empty if there's a child path");
371
372        // If the child is already an `RlpNode` then there is nothing to do, push it back on with no
373        // changes.
374        if let ProofTrieBranchChild::RlpNode(_rlp_node) = &child {
375            trace!(target: TRACE_TARGET, ?_rlp_node, "Already RlpNode, pushing onto stack");
376            self.child_stack.push(child);
377            return Ok(())
378        }
379
380        // Only commit immediately if retained for the proof. Otherwise, defer conversion
381        // to pop_branch() to give DeferredEncoder time for async work.
382        if self.should_retain(targets, &child_path, true) {
383            let child_rlp_node = self.commit_child(targets, child_path, child)?;
384            trace!(target: TRACE_TARGET, ?child_rlp_node, "Pushing committed child RlpNode onto stack");
385            self.child_stack.push(ProofTrieBranchChild::RlpNode(child_rlp_node));
386        } else {
387            trace!(target: TRACE_TARGET, "Pushing uncommitted child onto stack");
388            self.child_stack.push(child);
389        }
390
391        Ok(())
392    }
393
394    /// Creates a new leaf node on a branch, setting its `state_mask` bit and pushing the leaf onto
395    /// the `child_stack`.
396    ///
397    /// # Panics
398    ///
399    /// - If `branch_stack` is empty
400    /// - If the leaf's nibble is already set in the branch's `state_mask`.
401    fn push_new_leaf<'a>(
402        &mut self,
403        targets: &mut Option<TargetsCursor<'a>>,
404        leaf_nibble: u8,
405        leaf_short_key: Nibbles,
406        leaf_val: VE::DeferredEncoder,
407    ) -> Result<(), StateProofError> {
408        // Before pushing the new leaf onto the `child_stack` we need to commit the previous last
409        // child, so that only `child_stack`'s final child is a non-RlpNode.
410        self.commit_last_child(targets)?;
411
412        // Once the last child is committed we set the new child's bit on the top branch's
413        // `state_mask` and push that new child.
414        let branch = self.branch_stack.last_mut().expect("branch_stack cannot be empty");
415
416        debug_assert!(!branch.state_mask.is_bit_set(leaf_nibble));
417        branch.state_mask.set_bit(leaf_nibble);
418
419        self.child_stack
420            .push(ProofTrieBranchChild::Leaf { short_key: leaf_short_key, value: leaf_val });
421
422        Ok(())
423    }
424
425    /// Pushes a new branch onto the `branch_stack` based on the path and short key of the last
426    /// child on the `child_stack` and the path of the next child which will be pushed on to the
427    /// stack after this call.
428    ///
429    /// Returns the nibble of the branch's `state_mask` which should be set for the new child, and
430    /// short key that the next child should use.
431    fn push_new_branch(&mut self, new_child_path: Nibbles) -> (u8, Nibbles) {
432        // First determine the new child's shortkey relative to the current branch. If there is no
433        // current branch then the short key is the full path.
434        let new_child_short_key = if self.branch_stack.is_empty() {
435            new_child_path
436        } else {
437            // When there is a current branch then trim off its path as well as the nibble that it
438            // has set for this leaf.
439            trim_nibbles_prefix(&new_child_path, self.branch_path.len() + 1)
440        };
441
442        // Get the new branch's first child, which is the child on the top of the stack with which
443        // the new child shares the same nibble on the current branch.
444        let first_child = self
445            .child_stack
446            .last_mut()
447            .expect("push_new_branch can't be called with empty child_stack");
448
449        let first_child_short_key = first_child.short_key();
450        debug_assert!(
451            !first_child_short_key.is_empty(),
452            "push_new_branch called when top child on stack is not a leaf or extension with a short key",
453        );
454
455        // Determine how many nibbles are shared between the new branch's first child and the new
456        // child. This common prefix will be the extension of the new branch
457        let common_prefix_len = first_child_short_key.common_prefix_length(&new_child_short_key);
458
459        // Trim off the common prefix from the first child's short key, plus one nibble which will
460        // stored by the new branch itself in its state mask.
461        let first_child_nibble = first_child_short_key.get_unchecked(common_prefix_len);
462        first_child.trim_short_key_prefix(common_prefix_len + 1);
463
464        // Similarly, trim off the common prefix, plus one nibble for the new branch, from the new
465        // child's short key.
466        let new_child_nibble = new_child_short_key.get_unchecked(common_prefix_len);
467        let new_child_short_key = trim_nibbles_prefix(&new_child_short_key, common_prefix_len + 1);
468
469        // Update the branch path to reflect the new branch about to be pushed. Its path will be
470        // the path of the previous branch, plus the nibble shared by each child, plus the parent
471        // extension (denoted by a non-zero `ext_len`). Since the new branch's path is a prefix of
472        // the original new_child_path we can just slice that.
473        //
474        // If the new branch is the first branch then we do not add the extra 1, as there is no
475        // nibble in a parent branch to account for.
476        let branch_path_len =
477            self.branch_path.len() + common_prefix_len + self.maybe_parent_nibble();
478        self.branch_path = new_child_path.slice_unchecked(0, branch_path_len);
479
480        // Push the new branch onto the `branch_stack`. We do not yet set the `state_mask` bit of
481        // the new child; whatever actually pushes the child onto the `child_stack` is expected to
482        // do that.
483        self.branch_stack.push(ProofTrieBranch {
484            ext_len: common_prefix_len as u8,
485            state_mask: TrieMask::new(1 << first_child_nibble),
486            masks: None,
487        });
488
489        trace!(
490            target: TRACE_TARGET,
491            ?new_child_path,
492            ?common_prefix_len,
493            ?first_child_nibble,
494            branch_path = ?self.branch_path,
495            "Pushed new branch",
496        );
497
498        (new_child_nibble, new_child_short_key)
499    }
500
501    /// Pops the top branch off of the `branch_stack`, hashes its children on the `child_stack`, and
502    /// replaces those children on the `child_stack`. The `branch_path` field will be updated
503    /// accordingly.
504    ///
505    /// # Panics
506    ///
507    /// This method panics if `branch_stack` is empty.
508    #[instrument(target = TRACE_TARGET, level = "trace", skip_all)]
509    fn pop_branch<'a>(
510        &mut self,
511        targets: &mut Option<TargetsCursor<'a>>,
512    ) -> Result<(), StateProofError> {
513        trace!(
514            target: TRACE_TARGET,
515            branch = ?self.branch_stack.last(),
516            branch_path = ?self.branch_path,
517            child_stack_len = ?self.child_stack.len(),
518            "called",
519        );
520
521        // Ensure the final child on the child stack has been committed, as this method expects all
522        // children of the branch to have been committed.
523        self.commit_last_child(targets)?;
524
525        let mut rlp_nodes_buf = self.take_rlp_nodes_buf();
526        let branch = self.branch_stack.pop().expect("branch_stack cannot be empty");
527
528        // Take the branch's children off the stack, using the state mask to determine how many
529        // there are.
530        let num_children = branch.state_mask.count_ones() as usize;
531        debug_assert!(
532            self.child_stack.len() >= num_children,
533            "Stack is missing necessary children ({num_children:?})"
534        );
535        debug_assert!(
536            num_children >= 2,
537            "A branch must have at least two children, got {num_children}"
538        );
539
540        // Collect children into RlpNode Vec. Children are in lexicographic order.
541        for child in self.child_stack.drain(self.child_stack.len() - num_children..) {
542            let child_rlp_node = match child {
543                ProofTrieBranchChild::RlpNode(rlp_node) => rlp_node,
544                uncommitted_child => {
545                    // Convert uncommitted child (not retained for proof) to RlpNode now.
546                    self.rlp_encode_buf.clear();
547                    let (rlp_node, freed_buf) =
548                        uncommitted_child.into_rlp(&mut self.rlp_encode_buf)?;
549                    if let Some(buf) = freed_buf {
550                        self.rlp_nodes_bufs.push(buf);
551                    }
552                    rlp_node
553                }
554            };
555            rlp_nodes_buf.push(child_rlp_node);
556        }
557
558        debug_assert_eq!(
559            rlp_nodes_buf.len(),
560            branch.state_mask.count_ones() as usize,
561            "children length must match number of bits set in state_mask"
562        );
563
564        // Calculate the short key of the parent extension (if the branch has a parent extension).
565        // It's important to calculate this short key prior to modifying the `branch_path`.
566        let short_key = trim_nibbles_prefix(
567            &self.branch_path,
568            self.branch_path.len() - branch.ext_len as usize,
569        );
570
571        // Compute hash for the branch node if it has a parent extension.
572        let rlp_node = if short_key.is_empty() {
573            None
574        } else {
575            self.rlp_encode_buf.clear();
576            BranchNodeRef::new(&rlp_nodes_buf, branch.state_mask).encode(&mut self.rlp_encode_buf);
577            Some(RlpNode::from_rlp(&self.rlp_encode_buf))
578        };
579
580        // Wrap the `BranchNodeV2` so it can be pushed onto the child stack.
581        let branch_as_child = ProofTrieBranchChild::Branch {
582            node: BranchNodeV2::new(short_key, rlp_nodes_buf, branch.state_mask, rlp_node),
583            masks: branch.masks,
584        };
585
586        self.child_stack.push(branch_as_child);
587
588        // Update the branch_path. If this branch is the only branch then only its extension needs
589        // to be trimmed, otherwise we also need to remove its nibble from its parent.
590        let new_path_len =
591            self.branch_path.len() - branch.ext_len as usize - self.maybe_parent_nibble();
592
593        debug_assert!(self.branch_path.len() >= new_path_len);
594        self.branch_path = self.branch_path.slice_unchecked(0, new_path_len);
595
596        Ok(())
597    }
598
599    /// Adds a single leaf for a key to the stack, possibly collapsing an existing branch and/or
600    /// creating a new one depending on the path of the key.
601    fn push_leaf<'a>(
602        &mut self,
603        targets: &mut Option<TargetsCursor<'a>>,
604        key: Nibbles,
605        val: VE::DeferredEncoder,
606    ) -> Result<(), StateProofError> {
607        loop {
608            trace!(
609                target: TRACE_TARGET,
610                ?key,
611                branch_stack_len = ?self.branch_stack.len(),
612                branch_path = ?self.branch_path,
613                child_stack_len = ?self.child_stack.len(),
614                "push_leaf: loop",
615            );
616
617            // Get the `state_mask` of the branch currently being built. If there are no branches
618            // on the stack then it means either the trie is empty or only a single leaf has been
619            // added previously.
620            let curr_branch_state_mask = match self.branch_stack.last() {
621                Some(curr_branch) => curr_branch.state_mask,
622                None if self.child_stack.is_empty() => {
623                    // If the child stack is empty then this is the first leaf, push it and be done
624                    self.child_stack
625                        .push(ProofTrieBranchChild::Leaf { short_key: key, value: val });
626                    return Ok(())
627                }
628                None => {
629                    // If the child stack is not empty then it must only have a single other child
630                    // which is either a leaf or extension with a non-zero short key.
631                    debug_assert_eq!(self.child_stack.len(), 1);
632                    debug_assert!(!self
633                        .child_stack
634                        .last()
635                        .expect("already checked for emptiness")
636                        .short_key()
637                        .is_empty());
638                    let (nibble, short_key) = self.push_new_branch(key);
639                    self.push_new_leaf(targets, nibble, short_key, val)?;
640                    return Ok(())
641                }
642            };
643
644            // Find the common prefix length, which is the number of nibbles shared between the
645            // current branch and the key.
646            let common_prefix_len = self.branch_path.common_prefix_length(&key);
647
648            // If the current branch does not share all of its nibbles with the new key then it is
649            // not the parent of the new key. In this case the current branch will have no more
650            // children. We can pop it and loop back to the top to try again with its parent branch.
651            if common_prefix_len < self.branch_path.len() {
652                self.pop_branch(targets)?;
653                continue
654            }
655
656            // If the current branch is a prefix of the new key then the leaf is a child of the
657            // branch. If the branch doesn't have the leaf's nibble set then the leaf can be added
658            // directly, otherwise a new branch must be created in-between this branch and that
659            // existing child.
660            let nibble = key.get_unchecked(common_prefix_len);
661            if curr_branch_state_mask.is_bit_set(nibble) {
662                // Push a new branch which splits the short key of the existing child at this
663                // nibble.
664                let (nibble, short_key) = self.push_new_branch(key);
665                // Push the new leaf onto the new branch.
666                self.push_new_leaf(targets, nibble, short_key, val)?;
667            } else {
668                let short_key = key.slice_unchecked(common_prefix_len + 1, key.len());
669                self.push_new_leaf(targets, nibble, short_key, val)?;
670            }
671
672            return Ok(())
673        }
674    }
675
676    /// Given the lower and upper bounds (exclusive) of a range of keys, iterates over the
677    /// `hashed_cursor` and calculates all trie nodes possible based on those keys. If the upper
678    /// bound is None then it is considered unbounded.
679    ///
680    /// It is expected that this method is "driven" by `next_uncached_key_range`, which decides
681    /// which ranges of keys need to be calculated based on what cached trie data is available.
682    #[instrument(
683        target = TRACE_TARGET,
684        level = "trace",
685        skip_all,
686        fields(?lower_bound, ?upper_bound),
687    )]
688    fn calculate_key_range<'a>(
689        &mut self,
690        value_encoder: &mut VE,
691        targets: &mut Option<TargetsCursor<'a>>,
692        hashed_cursor_current: &mut Option<(Nibbles, VE::DeferredEncoder)>,
693        lower_bound: Nibbles,
694        upper_bound: Option<Nibbles>,
695    ) -> Result<(), StateProofError> {
696        // A helper closure for mapping entries returned from the `hashed_cursor`, converting the
697        // key to Nibbles and immediately creating the DeferredValueEncoder so that encoding of the
698        // leaf value can begin ASAP.
699        let mut map_hashed_cursor_entry = |(key_b256, val): (B256, _)| {
700            debug_assert_eq!(key_b256.len(), 32);
701            let key = Nibbles::unpack_array(key_b256.as_ref());
702            let val = value_encoder.deferred_encoder(key_b256, val);
703            (key, val)
704        };
705
706        // If the cursor hasn't been used, or the last iterated key is prior to this range's
707        // key range, then seek forward to at least the first key.
708        if hashed_cursor_current.as_ref().is_none_or(|(key, _)| key < &lower_bound) {
709            trace!(
710                target: TRACE_TARGET,
711                current=?hashed_cursor_current.as_ref().map(|(k, _)| k),
712                "Seeking hashed cursor to meet lower bound",
713            );
714
715            let lower_key = B256::right_padding_from(&lower_bound.pack());
716            *hashed_cursor_current =
717                self.hashed_cursor.seek(lower_key)?.map(&mut map_hashed_cursor_entry);
718        }
719
720        // Loop over all keys in the range, calling `push_leaf` on each.
721        while let Some((key, _)) = hashed_cursor_current.as_ref() &&
722            upper_bound.is_none_or(|upper_bound| key < &upper_bound)
723        {
724            let (key, val) =
725                core::mem::take(hashed_cursor_current).expect("while-let checks for Some");
726            self.push_leaf(targets, key, val)?;
727            *hashed_cursor_current = self.hashed_cursor.next()?.map(&mut map_hashed_cursor_entry);
728        }
729
730        trace!(target: TRACE_TARGET, "No further keys within range");
731        Ok(())
732    }
733
734    /// Constructs and returns a new [`ProofTrieBranch`] based on an existing [`BranchNodeCompact`].
735    #[inline]
736    const fn new_from_cached_branch(
737        cached_branch: &BranchNodeCompact,
738        ext_len: u8,
739    ) -> ProofTrieBranch {
740        ProofTrieBranch {
741            ext_len,
742            state_mask: TrieMask::new(0),
743            masks: Some(BranchNodeMasks {
744                tree_mask: cached_branch.tree_mask,
745                hash_mask: cached_branch.hash_mask,
746            }),
747        }
748    }
749
750    /// Pushes a new branch onto the `branch_stack` which is based on a cached branch obtained via
751    /// the trie cursor.
752    ///
753    /// If there is already a child at the top branch of `branch_stack` occupying this new branch's
754    /// nibble then that child will have its short-key split with another new branch, and this
755    /// cached branch will be a child of that splitting branch.
756    fn push_cached_branch<'a>(
757        &mut self,
758        targets: &mut Option<TargetsCursor<'a>>,
759        cached_path: Nibbles,
760        cached_branch: &BranchNodeCompact,
761    ) -> Result<(), StateProofError> {
762        debug_assert!(
763            cached_path.starts_with(&self.branch_path),
764            "push_cached_branch called with path {cached_path:?} which is not a child of current branch {:?}",
765            self.branch_path,
766        );
767
768        let parent_branch = self.branch_stack.last();
769
770        // If both stacks are empty then there were no leaves before this cached branch, push it and
771        // be done; the extension of the branch will be its full path.
772        if self.child_stack.is_empty() && parent_branch.is_none() {
773            self.branch_path = cached_path;
774            self.branch_stack
775                .push(Self::new_from_cached_branch(cached_branch, cached_path.len() as u8));
776            return Ok(())
777        }
778
779        // Get the nibble which should be set in the parent branch's `state_mask` for this new
780        // branch.
781        let cached_branch_nibble = cached_path.get_unchecked(self.branch_path.len());
782
783        // We calculate the `ext_len` of the new branch, and potentially update its nibble if a new
784        // parent branch is inserted here, based on the state of the parent branch.
785        let (cached_branch_nibble, ext_len) = if parent_branch
786            .is_none_or(|parent_branch| parent_branch.state_mask.is_bit_set(cached_branch_nibble))
787        {
788            // If the `child_stack` is not empty but the `branch_stack` is then it implies that
789            // there must be a leaf or extension at the root of the trie whose short-key will get
790            // split by a new branch, which will become the parent of both that leaf/extension and
791            // this new branch.
792            //
793            // Similarly, if there is a branch on the `branch_stack` but its `state_mask` bit for
794            // this new branch is already set, then there must be a leaf/extension with a short-key
795            // to be split.
796            debug_assert!(!self
797                .child_stack
798                .last()
799                .expect("already checked for emptiness")
800                .short_key()
801                .is_empty());
802
803            // Split that leaf/extension's short key with a new branch.
804            let (nibble, short_key) = self.push_new_branch(cached_path);
805            (nibble, short_key.len())
806        } else {
807            // If there is a parent branch but its `state_mask` bit for this branch is not set
808            // then we can simply calculate the `ext_len` based on the difference of each, minus
809            // 1 to account for the nibble in the `state_mask`.
810            (cached_branch_nibble, cached_path.len() - self.branch_path.len() - 1)
811        };
812
813        // `commit_last_child` relies on the last set bit of the parent branch's `state_mask` to
814        // determine the path of the last child on the `child_stack`. Since we are about to
815        // change that mask we need to commit that last child first.
816        self.commit_last_child(targets)?;
817
818        // When pushing a new branch we need to set its child nibble in the `state_mask` of
819        // its parent, if there is one.
820        if let Some(parent_branch) = self.branch_stack.last_mut() {
821            parent_branch.state_mask.set_bit(cached_branch_nibble);
822        }
823
824        // Finally update the `branch_path` and push the new branch.
825        self.branch_path = cached_path;
826        self.branch_stack.push(Self::new_from_cached_branch(cached_branch, ext_len as u8));
827
828        trace!(
829            target: TRACE_TARGET,
830            branch=?self.branch_stack.last(),
831            branch_path=?self.branch_path,
832            "Pushed cached branch",
833        );
834
835        Ok(())
836    }
837
838    /// Wraps [`TrieCursor::seek`], skipping cached branches whose sub-tries must be recalculated
839    /// from leaves.
840    ///
841    /// A cached branch is skipped when all but at most one of its children match the prefix set.
842    /// In that case those children might all be deleted, leaving a branch with a single child.
843    /// A single-child branch must be collapsed, but collapsing requires the child to be a full
844    /// node (not a cached hash). Skipping the branch avoids this by forcing recalculation.
845    fn trie_cursor_seek(
846        &mut self,
847        key: Nibbles,
848    ) -> Result<Option<(Nibbles, BranchNodeCompact)>, StateProofError> {
849        let mut entry = self.trie_cursor.seek(key)?;
850        while let Some((ref path, ref branch)) = entry {
851            if !self.should_skip_cached_branch(path, branch) {
852                break
853            }
854            entry = self.trie_cursor.next()?;
855        }
856        Ok(entry)
857    }
858
859    /// Returns true if the cached branch should be skipped entirely and its sub-trie recalculated
860    /// from leaves.
861    fn should_skip_cached_branch(
862        &mut self,
863        cached_path: &Nibbles,
864        cached_branch: &BranchNodeCompact,
865    ) -> bool {
866        if !self.prefix_set.contains(cached_path) {
867            return false
868        }
869
870        let mut num_unmatched = 0u32;
871        let mut child_path = *cached_path;
872        for nibble in 0u8..16 {
873            if cached_branch.state_mask.is_bit_set(nibble) {
874                child_path.truncate(cached_path.len());
875                child_path.push_unchecked(nibble);
876                if !self.prefix_set.contains(&child_path) {
877                    num_unmatched += 1;
878                }
879            }
880        }
881
882        if num_unmatched <= 1 {
883            trace!(
884                target: TRACE_TARGET,
885                ?cached_path,
886                ?num_unmatched,
887                "Skipping cached branch: all but <=1 children match prefix set, branch may collapse",
888            );
889            true
890        } else {
891            false
892        }
893    }
894
895    /// Attempts to pop off the top branch of the `cached_branch_stack`, returning
896    /// [`PopCachedBranchOutcome::Popped`] on success. Returns other variants to indicate that the
897    /// stack is empty and what to do about it.
898    ///
899    /// This method only returns [`PopCachedBranchOutcome::CalculateLeaves`] if there is a cached
900    /// branch on top of the stack.
901    #[inline]
902    fn try_pop_cached_branch(
903        &mut self,
904        trie_cursor_state: &mut TrieCursorState,
905        sub_trie_prefix: &Nibbles,
906        uncalculated_lower_bound: &Option<Nibbles>,
907    ) -> Result<PopCachedBranchOutcome, StateProofError> {
908        // If the `uncalculated_lower_bound` is None it indicates that there can be no more
909        // leaf data, so similarly there can be no more cached branch data.
910        let Some(uncalculated_lower_bound) = uncalculated_lower_bound else {
911            return Ok(PopCachedBranchOutcome::Exhausted)
912        };
913
914        // If there is a branch on top of the stack we use that.
915        if let Some(cached) = self.cached_branch_stack.pop() {
916            return Ok(PopCachedBranchOutcome::Popped(cached));
917        }
918
919        // There is no cached branch on the stack. It's possible that another one exists
920        // farther on in the trie, but we perform some checks first to prevent unnecessary
921        // attempts to find it.
922
923        // If [`TrieCursorState::path`] returns None it means that the cursor has been
924        // exhausted, so there can be no more cached data.
925        let Some(mut trie_cursor_path) = trie_cursor_state.path() else {
926            return Ok(PopCachedBranchOutcome::Exhausted)
927        };
928
929        // If the trie cursor is seeked to a branch whose leaves have already been processed
930        // then we can't use it, instead we seek forward and try again.
931        if trie_cursor_path < uncalculated_lower_bound {
932            *trie_cursor_state =
933                TrieCursorState::seeked(self.trie_cursor_seek(*uncalculated_lower_bound)?);
934
935            // Having just seeked forward we need to check if the cursor is now exhausted,
936            // extracting the new path at the same time.
937            if let Some(new_trie_cursor_path) = trie_cursor_state.path() {
938                trie_cursor_path = new_trie_cursor_path
939            } else {
940                return Ok(PopCachedBranchOutcome::Exhausted)
941            };
942        }
943
944        // If the trie cursor has exceeded the sub-trie then we consider it to be exhausted.
945        if !trie_cursor_path.starts_with(sub_trie_prefix) {
946            return Ok(PopCachedBranchOutcome::Exhausted)
947        }
948
949        // At this point we can be sure that the cursor is in an `Available` state. We know for
950        // sure it's not `Exhausted` because of the calls to `path` above, and we know it's not
951        // `Taken` because we push all taken branches onto the `cached_branch_stack`, and the
952        // stack is empty.
953        //
954        // We will use this `Available` cached branch as our next branch.
955        let cached = trie_cursor_state.take();
956        trace!(target: TRACE_TARGET, cached=?cached, "Pushed next trie node onto cached_branch_stack");
957
958        // If the calculated range is not caught up to the next cached branch it means there
959        // are portions of the trie prior to that branch which may need to be calculated;
960        // return the uncalculated range up to that branch to make that happen.
961        //
962        // If the next cached branch's path is all zeros then we can skip this catch-up step,
963        // because there cannot be any keys prior to that range.
964        let cached_path = &cached.0;
965        if uncalculated_lower_bound < cached_path && !cached_path.is_zeroes() {
966            let range = (*uncalculated_lower_bound, Some(*cached_path));
967            trace!(target: TRACE_TARGET, ?range, "Returning key range to calculate in order to catch up to cached branch");
968
969            // Push the cached branch onto the stack so it's available once the leaf range is done
970            // being calculated.
971            self.cached_branch_stack.push(cached);
972
973            return Ok(PopCachedBranchOutcome::CalculateLeaves(range));
974        }
975
976        Ok(PopCachedBranchOutcome::Popped(cached))
977    }
978
979    // Pop any under-construction branches that are now complete. Assumes that all trie data prior
980    // to `next_path`, if any, has been computed. Any branches which were under-construction
981    // previously, and which do not share a prefix with `next_path`, can be assumed to be completed;
982    // they will not have any further keys added to them.
983    fn commit_branches<'a>(
984        &mut self,
985        targets: &mut Option<TargetsCursor<'a>>,
986        next_path: &Nibbles,
987    ) -> Result<(), StateProofError> {
988        while !next_path.starts_with(&self.branch_path) {
989            self.pop_branch(targets)?;
990        }
991        Ok(())
992    }
993
994    /// Accepts the current state of both hashed and trie cursors, and determines the next range of
995    /// hashed keys which need to be processed using [`Self::push_leaf`].
996    ///
997    /// This method will use cached branch node data from the trie cursor to skip over all possible
998    /// ranges of keys, to reduce computation as much as possible.
999    ///
1000    /// # Returns
1001    ///
1002    /// - `None`: No more data to process, finish computation
1003    ///
1004    /// - `Some(lower, None)`: Indicates to call `push_leaf` on all keys starting at `lower`, with
1005    ///   no upper bound. This method won't be called again after this.
1006    ///
1007    /// - `Some(lower, Some(upper))`: Indicates to call `push_leaf` on all keys starting at `lower`,
1008    ///   up to but excluding `upper`, and then call this method once done.
1009    ///
1010    /// Once returned the `branch_stack` will be in the correct state to start calculating leaves
1011    /// for the given range, if any.
1012    #[instrument(target = TRACE_TARGET, level = "trace", skip_all)]
1013    fn next_uncached_key_range<'a>(
1014        &mut self,
1015        targets: &mut Option<TargetsCursor<'a>>,
1016        trie_cursor_state: &mut TrieCursorState,
1017        sub_trie_prefix: &Nibbles,
1018        sub_trie_upper_bound: Option<&Nibbles>,
1019        mut uncalculated_lower_bound: Option<Nibbles>,
1020    ) -> Result<Option<(Nibbles, Option<Nibbles>)>, StateProofError> {
1021        loop {
1022            // Pop the currently cached branch node.
1023            //
1024            // NOTE we pop off the `cached_branch_stack` because cloning the `BranchNodeCompact`
1025            // means cloning an Arc, which incurs synchronization overhead. We have to be sure to
1026            // push the cached branch back onto the stack once done.
1027            let (cached_path, cached_branch) = match self.try_pop_cached_branch(
1028                trie_cursor_state,
1029                sub_trie_prefix,
1030                &uncalculated_lower_bound,
1031            )? {
1032                PopCachedBranchOutcome::Popped(cached) => cached,
1033                PopCachedBranchOutcome::Exhausted => {
1034                    // If cached branches are exhausted it's possible that there is still an
1035                    // unbounded range of leaves to be processed. `uncalculated_lower_bound` is
1036                    // used to return that range.
1037                    trace!(target: TRACE_TARGET, ?uncalculated_lower_bound, "Exhausted cached trie nodes");
1038                    if let Some(lower) = uncalculated_lower_bound {
1039                        self.commit_branches(targets, &lower)?;
1040                        return Ok(Some((lower, sub_trie_upper_bound.copied())));
1041                    }
1042                    return Ok(None)
1043                }
1044                PopCachedBranchOutcome::CalculateLeaves(range) => {
1045                    self.commit_branches(targets, &range.0)?;
1046                    return Ok(Some(range));
1047                }
1048            };
1049
1050            let uncalculated_lower_bound_ref = uncalculated_lower_bound
1051                .as_ref()
1052                .expect("try_pop_cached_branch would return Exhausted if this were None");
1053
1054            trace!(
1055                target: TRACE_TARGET,
1056                branch_path = ?self.branch_path,
1057                branch_state_mask = ?self.branch_stack.last().map(|b| b.state_mask),
1058                ?cached_path,
1059                cached_branch_state_mask = ?cached_branch.state_mask,
1060                cached_branch_hash_mask = ?cached_branch.hash_mask,
1061                "loop",
1062            );
1063
1064            self.commit_branches(targets, &cached_path)?;
1065
1066            // Since we've popped all branches which don't start with cached_path, branch_path at
1067            // this point must be equal to or shorter than cached_path.
1068            debug_assert!(
1069                self.branch_path.len() < cached_path.len() || self.branch_path == cached_path,
1070                "branch_path {:?} is different-or-longer-than cached_path {cached_path:?}",
1071                self.branch_path
1072            );
1073
1074            // If the branch_path != cached_path it means the branch_stack is either empty, or the
1075            // top branch is the parent of this cached branch. Either way we push a branch
1076            // corresponding to the cached one onto the stack, so we can begin constructing it.
1077            if self.branch_path != cached_path {
1078                self.push_cached_branch(targets, cached_path, &cached_branch)?;
1079            }
1080
1081            // At this point the top of the branch stack is the same branch which was found in the
1082            // cache.
1083            let curr_branch =
1084                self.branch_stack.last().expect("top of branch_stack corresponds to cached branch");
1085
1086            let cached_state_mask = cached_branch.state_mask;
1087            let curr_state_mask = curr_branch.state_mask;
1088
1089            // Determine all child nibbles which are set in the cached branch but not the
1090            // under-construction branch.
1091            let mut next_child_nibbles = curr_state_mask ^ cached_state_mask;
1092
1093            // Also include child nibbles indicated by the prefix set. The prefix set can
1094            // indicate children that need recalculation from leaves (e.g. new keys inserted
1095            // under this branch). Skip nibbles already set in `curr_state_mask` since those
1096            // children have already been constructed.
1097            if self.prefix_set.contains(&self.branch_path) {
1098                let branch_path_len = self.branch_path.len();
1099                let mut child_path = self.branch_path;
1100                for nibble in 0u8..16 {
1101                    if !curr_state_mask.is_bit_set(nibble) {
1102                        child_path.truncate(branch_path_len);
1103                        child_path.push_unchecked(nibble);
1104                        if self.prefix_set.contains(&child_path) {
1105                            next_child_nibbles.set_bit(nibble);
1106                        }
1107                    }
1108                }
1109            }
1110
1111            let _orig_next_child_nibbles = next_child_nibbles;
1112
1113            // Mask out any child nibbles whose ranges have already been fully processed.
1114            // This can happen when `calculate_key_range` finds no keys for a child's range,
1115            // leaving the child's bit unset in `state_mask`. Without this, re-entering this
1116            // function would select the same child again.
1117            if uncalculated_lower_bound_ref.starts_with(&self.branch_path) &&
1118                uncalculated_lower_bound_ref.len() > self.branch_path.len()
1119            {
1120                let lower_nibble =
1121                    uncalculated_lower_bound_ref.get_unchecked(self.branch_path.len());
1122                // Clear all nibbles strictly below `lower_nibble` since they've been processed.
1123                let already_processed_mask = TrieMask::new((1u16 << lower_nibble) - 1);
1124                next_child_nibbles &= !already_processed_mask;
1125                trace!(
1126                    target: TRACE_TARGET,
1127                    branch_path = ?self.branch_path,
1128                    ?_orig_next_child_nibbles,
1129                    ?already_processed_mask,
1130                    ?next_child_nibbles,
1131                    "Unset already processed key nibbles from next_child_nibbles",
1132                );
1133            } else if !uncalculated_lower_bound_ref.starts_with(&self.branch_path) &&
1134                uncalculated_lower_bound_ref > &self.branch_path
1135            {
1136                // The lower bound has moved entirely past this branch (e.g. branch is 0x6 but
1137                // lower is 0x7). All remaining children have been processed.
1138                next_child_nibbles = TrieMask::default();
1139                trace!(
1140                    target: TRACE_TARGET,
1141                    branch_path = ?self.branch_path,
1142                    ?_orig_next_child_nibbles,
1143                    ?next_child_nibbles,
1144                    "Unset all nibbles from next_child_nibbles due to branch_path being outside this subtrie",
1145                );
1146            }
1147
1148            // If there are no further children to construct for this branch then pop it off both
1149            // stacks and loop using the parent branch.
1150            if next_child_nibbles.is_empty() {
1151                trace!(
1152                    target: TRACE_TARGET,
1153                    path=?cached_path,
1154                    ?curr_branch,
1155                    ?cached_branch,
1156                    "No further children, popping branch",
1157                );
1158                self.pop_branch(targets)?;
1159
1160                // no need to pop from `cached_branch_stack`, the current cached branch is already
1161                // popped (see note at the top of the loop).
1162
1163                // The just-popped branch is completely processed; we know there can be no more keys
1164                // with that prefix. Set the lower bound which can be returned from this method to
1165                // be the next possible prefix, if any.
1166                uncalculated_lower_bound = cached_path.next_without_prefix();
1167
1168                continue
1169            }
1170
1171            // Determine the next nibble of the branch which has not yet been constructed, and
1172            // determine the child's full path.
1173            let child_nibble = next_child_nibbles.trailing_zeros() as u8;
1174            let child_path = self.child_path_at(child_nibble);
1175
1176            // If the previous child was a cached branch with a short key (extension), then the new
1177            // uncalculated_lower_bound will be the increment of that branch's path. If there are
1178            // any dirty leaves between that path and this child, it indicates there may be leaves
1179            // which would split that extension node. In that case we return the range to process
1180            // the leaves.
1181            if uncalculated_lower_bound_ref < &child_path &&
1182                self.prefix_set.contains_range(uncalculated_lower_bound_ref..&child_path)
1183            {
1184                self.cached_branch_stack.push((cached_path, cached_branch));
1185                return Ok(Some((*uncalculated_lower_bound_ref, Some(child_path))));
1186            }
1187
1188            // If the `hash_mask` bit is set for the next child it means the child's hash is cached
1189            // in the `cached_branch`. We can use that instead of re-calculating the hash of the
1190            // entire sub-trie.
1191            //
1192            // If the child needs to be retained for a proof then we should not use the cached
1193            // hash, and instead continue on to calculate its node manually.
1194            //
1195            // If the child's path is in the prefix set then the cached hash is stale and must
1196            // not be used.
1197            if cached_branch.hash_mask.is_bit_set(child_nibble) &&
1198                !self.prefix_set.contains(&child_path)
1199            {
1200                // Commit the last child. We do this here for two reasons:
1201                // - `commit_last_child` will check if the last child needs to be retained. We need
1202                //   to check that before the subsequent `should_retain` call here to prevent
1203                //   `targets` from being moved beyond the last child before it is checked.
1204                // - If we do end up using the cached hash value, then we will need to commit the
1205                //   last child before pushing a new one onto the stack anyway.
1206                self.commit_last_child(targets)?;
1207
1208                if !self.should_retain(targets, &child_path, false) {
1209                    // Pull this child's hash out of the cached branch node. The hash index
1210                    // is the number of hash_mask bits set below this child's nibble.
1211                    let lower_bits = TrieMask::new((1u16 << child_nibble) - 1);
1212                    let hash_idx = (cached_branch.hash_mask & lower_bits).count_ones() as usize;
1213                    let hash = cached_branch.hashes[hash_idx];
1214
1215                    trace!(
1216                        target: TRACE_TARGET,
1217                        ?child_path,
1218                        ?hash_idx,
1219                        ?hash,
1220                        "Using cached hash for child",
1221                    );
1222
1223                    self.child_stack.push(ProofTrieBranchChild::RlpNode(RlpNode::word_rlp(&hash)));
1224                    self.branch_stack
1225                        .last_mut()
1226                        .expect("already asserted there is a last branch")
1227                        .state_mask
1228                        .set_bit(child_nibble);
1229
1230                    // Update the `uncalculated_lower_bound` to indicate that the child whose bit
1231                    // was just set is completely processed.
1232                    uncalculated_lower_bound = child_path.next_without_prefix();
1233
1234                    // Push the current cached branch back onto the stack before looping.
1235                    self.cached_branch_stack.push((cached_path, cached_branch));
1236
1237                    continue
1238                }
1239            }
1240
1241            // We now want to check if there is a cached branch node at this child. The cached
1242            // branch node may be the node at this child directly, or this child may be an
1243            // extension and the cached branch is the child of that extension.
1244
1245            // All trie nodes prior to `child_path` will not be modified further, so we can seek the
1246            // trie cursor to the next cached node at-or-after `child_path`.
1247            if trie_cursor_state.path().is_some_and(|path| path < &child_path) {
1248                trace!(target: TRACE_TARGET, ?child_path, "Seeking trie cursor to child path");
1249                *trie_cursor_state = TrieCursorState::seeked(self.trie_cursor_seek(child_path)?);
1250            }
1251
1252            // If the next cached branch node is a child of `child_path` then we can assume it is
1253            // the cached branch for this child. We push it onto the `cached_branch_stack` and loop
1254            // back to the top.
1255            if let TrieCursorState::Available(next_cached_path, next_cached_branch) =
1256                &trie_cursor_state &&
1257                next_cached_path.starts_with(&child_path)
1258            {
1259                // Push the current cached branch back on before pushing its child and then looping
1260                self.cached_branch_stack.push((cached_path, cached_branch));
1261
1262                // If the next cached branch's path is in the prefix set, it could indicate that
1263                // there are dirty leaves which would split the cached branch's extension node (if
1264                // there is one). In that case we return the range those leaves would potentially be
1265                // in to calculate them.
1266                if self.prefix_set.contains(&child_path) {
1267                    let gap_upper = Some(*next_cached_path);
1268                    self.cached_branch_stack.push(trie_cursor_state.take());
1269                    return Ok(Some((*uncalculated_lower_bound_ref, gap_upper)));
1270                }
1271
1272                trace!(
1273                    target: TRACE_TARGET,
1274                    ?child_path,
1275                    ?next_cached_path,
1276                    ?next_cached_branch,
1277                    "Pushing cached branch for child",
1278                );
1279                self.cached_branch_stack.push(trie_cursor_state.take());
1280                continue;
1281            }
1282
1283            // There is no cached data for the sub-trie at this child, we must recalculate the
1284            // sub-trie root (this child) using the leaves. Return the range of keys based on the
1285            // child path.
1286            let child_path_upper = child_path.next_without_prefix();
1287            trace!(
1288                target: TRACE_TARGET,
1289                lower=?child_path,
1290                upper=?child_path_upper,
1291                "Returning sub-trie's key range to calculate",
1292            );
1293
1294            // Push the current cached branch back onto the stack before returning.
1295            self.cached_branch_stack.push((cached_path, cached_branch));
1296
1297            return Ok(Some((child_path, child_path_upper)));
1298        }
1299    }
1300
1301    /// Calculates trie nodes and retains proofs for targeted nodes within a sub-trie. The
1302    /// sub-trie's bounds are denoted by the `lower_bound` and `upper_bound` arguments,
1303    /// `upper_bound` is exclusive, None indicates unbounded.
1304    #[instrument(
1305        target = TRACE_TARGET,
1306        level = "trace",
1307        skip_all,
1308        fields(prefix=?sub_trie_targets.prefix),
1309    )]
1310    fn proof_subtrie<'a>(
1311        &mut self,
1312        value_encoder: &mut VE,
1313        trie_cursor_state: &mut TrieCursorState,
1314        hashed_cursor_current: &mut Option<(Nibbles, VE::DeferredEncoder)>,
1315        sub_trie_targets: SubTrieTargets<'a>,
1316    ) -> Result<(), StateProofError> {
1317        let sub_trie_upper_bound = sub_trie_targets.upper_bound();
1318
1319        // Wrap targets into a `TargetsCursor`.  targets can be empty if we only want to calculate
1320        // the root, in which case we don't need a cursor.
1321        let mut targets = if sub_trie_targets.targets.is_empty() {
1322            None
1323        } else {
1324            Some(TargetsCursor::new(sub_trie_targets.targets))
1325        };
1326
1327        // Ensure initial state is cleared. By the end of the method call these should be empty once
1328        // again.
1329        debug_assert!(self.cached_branch_stack.is_empty());
1330        debug_assert!(self.branch_stack.is_empty());
1331        debug_assert!(self.branch_path.is_empty());
1332        debug_assert!(self.child_stack.is_empty());
1333
1334        // `next_uncached_key_range`, which will be called in the loop below, expects the trie
1335        // cursor to have already been seeked. If it's not yet seeked, or seeked to a prior node,
1336        // then we seek it to the prefix (the first possible node) to initialize it.
1337        if trie_cursor_state.before(&sub_trie_targets.prefix) {
1338            trace!(target: TRACE_TARGET, "Doing initial seek of trie cursor");
1339            *trie_cursor_state =
1340                TrieCursorState::seeked(self.trie_cursor_seek(sub_trie_targets.prefix)?);
1341        }
1342
1343        // `uncalculated_lower_bound` tracks the lower bound of node paths which have yet to be
1344        // visited, either via the hashed key cursor (`calculate_key_range`) or trie cursor
1345        // (`next_uncached_key_range`). If/when this becomes None then there are no further nodes
1346        // which could exist.
1347        let mut uncalculated_lower_bound = Some(sub_trie_targets.prefix);
1348
1349        trace!(target: TRACE_TARGET, "Starting loop");
1350        loop {
1351            // Save the previous lower bound to detect forward progress.
1352            let prev_uncalculated_lower_bound = uncalculated_lower_bound;
1353
1354            // Determine the range of keys of the overall trie which need to be re-computed.
1355            let Some((calc_lower_bound, calc_upper_bound)) = self.next_uncached_key_range(
1356                &mut targets,
1357                trie_cursor_state,
1358                &sub_trie_targets.prefix,
1359                sub_trie_upper_bound.as_ref(),
1360                prev_uncalculated_lower_bound,
1361            )?
1362            else {
1363                // If `next_uncached_key_range` determines that there can be no more keys then
1364                // complete the computation.
1365                break;
1366            };
1367
1368            // Forward-progress guard: detect trie inconsistencies that would cause infinite loops.
1369            // If `next_uncached_key_range` returns a range that starts before the previous
1370            // lower bound, we've gone backwards and would loop forever.
1371            //
1372            // This can specifically happen when there is a cached branch which shouldn't exist, or
1373            // if state mask bit is set on a cached branch which shouldn't be.
1374            if let Some(prev_lower) = prev_uncalculated_lower_bound.as_ref() &&
1375                calc_lower_bound < *prev_lower
1376            {
1377                let msg = format!(
1378                    "next_uncached_key_range went backwards: calc_lower={calc_lower_bound:?} < \
1379                     prev_lower={prev_lower:?}, calc_upper={calc_upper_bound:?}, prefix={:?}",
1380                    sub_trie_targets.prefix,
1381                );
1382                error!(target: TRACE_TARGET, "{msg}");
1383                return Err(StateProofError::TrieInconsistency(msg));
1384            }
1385
1386            // Calculate the trie for that range of keys
1387            self.calculate_key_range(
1388                value_encoder,
1389                &mut targets,
1390                hashed_cursor_current,
1391                calc_lower_bound,
1392                calc_upper_bound,
1393            )?;
1394
1395            // Once outside `calculate_key_range`, `hashed_cursor_current` will be at the first key
1396            // after the range.
1397            //
1398            // If the `hashed_cursor_current` is None (exhausted), or not within the range of the
1399            // sub-trie, then there are no more keys at all, meaning the trie couldn't possibly have
1400            // more data and we should complete computation.
1401            if hashed_cursor_current
1402                .as_ref()
1403                .is_none_or(|(key, _)| !key.starts_with(&sub_trie_targets.prefix))
1404            {
1405                break;
1406            }
1407
1408            // The upper bound of previous calculation becomes the lower bound of the uncalculated
1409            // range, for which we'll once again check for cached data.
1410            uncalculated_lower_bound = calc_upper_bound;
1411        }
1412
1413        // Once there's no more leaves we can pop the remaining branches, if any.
1414        trace!(target: TRACE_TARGET, "Exited loop, popping remaining branches");
1415        while !self.branch_stack.is_empty() {
1416            self.pop_branch(&mut targets)?;
1417        }
1418
1419        // At this point the branch stack should be empty. If the child stack is empty it means no
1420        // keys were ever iterated from the hashed cursor in the first place. Otherwise there should
1421        // only be a single node left: the root node.
1422        debug_assert!(self.branch_stack.is_empty());
1423        debug_assert!(self.branch_path.is_empty());
1424        debug_assert!(self.child_stack.len() < 2);
1425
1426        // The `cached_branch_stack` may still have cached branches on it, as it's not affected by
1427        // `pop_branch`, but it is no longer needed and should be cleared.
1428        self.cached_branch_stack.clear();
1429
1430        // We always pop the root node off of the `child_stack` in order to empty it, however we
1431        // might not want to retain the node unless the `SubTrieTargets` indicates it.
1432        trace!(
1433            target: TRACE_TARGET,
1434            retain_root = ?sub_trie_targets.retain_root,
1435            child_stack_empty = self.child_stack.is_empty(),
1436            "Maybe retaining root",
1437        );
1438        match (sub_trie_targets.retain_root, self.child_stack.pop()) {
1439            (false, _) => {
1440                // Whether the root node is exists or not, we don't want it.
1441            }
1442            (true, None) => {
1443                // If `child_stack` is empty it means there was no keys at all, retain an empty
1444                // root node.
1445                self.retained_proofs.push(ProofTrieNodeV2 {
1446                    path: Nibbles::new(), // root path
1447                    node: TrieNodeV2::EmptyRoot,
1448                    masks: None,
1449                });
1450            }
1451            (true, Some(root_node)) => {
1452                // Encode and retain the root node.
1453                self.rlp_encode_buf.clear();
1454                let root_node =
1455                    root_node.into_proof_trie_node(Nibbles::new(), &mut self.rlp_encode_buf)?;
1456                self.retained_proofs.push(root_node);
1457            }
1458        }
1459
1460        Ok(())
1461    }
1462
1463    /// Clears internal computation state. Called after errors to ensure the calculator is not
1464    /// left in a partially-computed state when reused.
1465    fn clear_computation_state(&mut self) {
1466        self.branch_stack.clear();
1467        self.branch_path = Nibbles::new();
1468        self.child_stack.clear();
1469        self.cached_branch_stack.clear();
1470        self.retained_proofs.clear();
1471    }
1472
1473    /// Internal implementation of proof calculation. Assumes both cursors have already been reset.
1474    /// See docs on [`Self::proof`] for expected behavior.
1475    fn proof_inner(
1476        &mut self,
1477        value_encoder: &mut VE,
1478        targets: &mut [ProofV2Target],
1479    ) -> Result<Vec<ProofTrieNodeV2>, StateProofError> {
1480        // If there are no targets then nothing could be returned, return early.
1481        if targets.is_empty() {
1482            trace!(target: TRACE_TARGET, "Empty targets, returning");
1483            return Ok(Vec::new())
1484        }
1485
1486        // Initialize the variables which track the state of the two cursors. Both indicate the
1487        // cursors are unseeked.
1488        let mut trie_cursor_state = TrieCursorState::unseeked();
1489        let mut hashed_cursor_current: Option<(Nibbles, VE::DeferredEncoder)> = None;
1490
1491        // Divide targets into chunks, each chunk corresponding to a different sub-trie within the
1492        // overall trie, and handle all proofs within that sub-trie.
1493        for sub_trie_targets in iter_sub_trie_targets(targets) {
1494            if let Err(err) = self.proof_subtrie(
1495                value_encoder,
1496                &mut trie_cursor_state,
1497                &mut hashed_cursor_current,
1498                sub_trie_targets,
1499            ) {
1500                self.clear_computation_state();
1501                return Err(err);
1502            }
1503        }
1504
1505        trace!(
1506            target: TRACE_TARGET,
1507            retained_proofs_len = ?self.retained_proofs.len(),
1508            "proof_inner: returning",
1509        );
1510        Ok(core::mem::take(&mut self.retained_proofs))
1511    }
1512
1513    /// Generate a proof for the given targets.
1514    ///
1515    /// Given a set of [`ProofV2Target`]s, returns nodes whose paths are a prefix of any target. The
1516    /// returned nodes will be sorted depth-first by path.
1517    ///
1518    /// # Panics
1519    ///
1520    /// In debug builds, panics if the targets are not sorted lexicographically.
1521    #[instrument(target = TRACE_TARGET, level = "trace", skip_all)]
1522    pub fn proof(
1523        &mut self,
1524        value_encoder: &mut VE,
1525        targets: &mut [ProofV2Target],
1526    ) -> Result<Vec<ProofTrieNodeV2>, StateProofError> {
1527        self.trie_cursor.reset();
1528        self.hashed_cursor.reset();
1529        self.proof_inner(value_encoder, targets)
1530    }
1531
1532    /// Computes the root hash from a set of proof nodes.
1533    ///
1534    /// Returns `None` if there is no root node (partial proof), otherwise returns the hash of the
1535    /// root node.
1536    ///
1537    /// This method reuses the internal RLP encode buffer for efficiency.
1538    pub fn compute_root_hash(
1539        &mut self,
1540        proof_nodes: &[ProofTrieNodeV2],
1541    ) -> Result<Option<B256>, StateProofError> {
1542        // Find the root node (node at empty path)
1543        let root_node = proof_nodes.iter().find(|node| node.path.is_empty());
1544
1545        let Some(root) = root_node else {
1546            return Ok(None);
1547        };
1548
1549        // Compute the hash of the root node
1550        self.rlp_encode_buf.clear();
1551        root.node.encode(&mut self.rlp_encode_buf);
1552        let root_hash = keccak256(&self.rlp_encode_buf);
1553
1554        Ok(Some(root_hash))
1555    }
1556
1557    /// Calculates the root node of the trie.
1558    ///
1559    /// This method does not accept targets nor retain proofs. Returns the root node which can
1560    /// be used to compute the root hash via [`Self::compute_root_hash`].
1561    #[instrument(target = TRACE_TARGET, level = "trace", skip(self, value_encoder))]
1562    pub fn root_node(
1563        &mut self,
1564        value_encoder: &mut VE,
1565    ) -> Result<ProofTrieNodeV2, StateProofError> {
1566        // Initialize the variables which track the state of the two cursors. Both indicate the
1567        // cursors are unseeked.
1568        let mut trie_cursor_state = TrieCursorState::unseeked();
1569        let mut hashed_cursor_current: Option<(Nibbles, VE::DeferredEncoder)> = None;
1570
1571        static EMPTY_TARGETS: [ProofV2Target; 0] = [];
1572        let sub_trie_targets =
1573            SubTrieTargets { prefix: Nibbles::new(), targets: &EMPTY_TARGETS, retain_root: true };
1574
1575        if let Err(err) = self.proof_subtrie(
1576            value_encoder,
1577            &mut trie_cursor_state,
1578            &mut hashed_cursor_current,
1579            sub_trie_targets,
1580        ) {
1581            self.clear_computation_state();
1582            return Err(err);
1583        }
1584
1585        // proof_subtrie will retain the root node if retain_proof is true, regardless of if there
1586        // are any targets.
1587        let mut proofs = core::mem::take(&mut self.retained_proofs);
1588        trace!(
1589            target: TRACE_TARGET,
1590            proofs_len = ?proofs.len(),
1591            "root_node: extracting root",
1592        );
1593
1594        // The root node is at the empty path - it must exist since retain_root is true. Otherwise
1595        // targets was empty, so there should be no other retained proofs.
1596        debug_assert_eq!(
1597            proofs.len(), 1,
1598            "prefix is empty, retain_root is true, and targets is empty, so there must be only the root node"
1599        );
1600
1601        // Find and remove the root node (node at empty path)
1602        let root_node = proofs.pop().expect("prefix is empty, retain_root is true, and targets is empty, so there must be only the root node");
1603
1604        Ok(root_node)
1605    }
1606}
1607
1608/// A proof calculator for storage tries.
1609pub type StorageProofCalculator<TC, HC> = ProofCalculator<TC, HC, StorageValueEncoder>;
1610
1611impl<TC, HC> StorageProofCalculator<TC, HC>
1612where
1613    TC: TrieStorageCursor,
1614    HC: HashedStorageCursor<Value = U256>,
1615{
1616    /// Create a new [`StorageProofCalculator`] instance.
1617    pub fn new_storage(trie_cursor: TC, hashed_cursor: HC) -> Self {
1618        Self::new(trie_cursor, hashed_cursor)
1619    }
1620
1621    /// Generate a proof for a storage trie at the given hashed address.
1622    ///
1623    /// Given a set of [`ProofV2Target`]s, returns nodes whose paths are a prefix of any target. The
1624    /// returned nodes will be sorted depth-first by path.
1625    ///
1626    /// # Panics
1627    ///
1628    /// In debug builds, panics if the targets are not sorted lexicographically.
1629    #[instrument(target = TRACE_TARGET, level = "trace", skip(self, targets))]
1630    pub fn storage_proof(
1631        &mut self,
1632        hashed_address: B256,
1633        targets: &mut [ProofV2Target],
1634    ) -> Result<Vec<ProofTrieNodeV2>, StateProofError> {
1635        self.hashed_cursor.set_hashed_address(hashed_address);
1636
1637        // Shortcut: check if storage is empty
1638        if self.hashed_cursor.is_storage_empty()? {
1639            // Return a single EmptyRoot node at the root path
1640            return Ok(vec![ProofTrieNodeV2 {
1641                path: Nibbles::default(),
1642                node: TrieNodeV2::EmptyRoot,
1643                masks: None,
1644            }])
1645        }
1646
1647        // Don't call `set_hashed_address` on the trie cursor until after the previous shortcut has
1648        // been checked.
1649        self.trie_cursor.set_hashed_address(hashed_address);
1650
1651        // Create a mutable storage value encoder
1652        let mut storage_value_encoder = StorageValueEncoder;
1653        self.proof_inner(&mut storage_value_encoder, targets)
1654    }
1655
1656    /// Calculates the root node of a storage trie.
1657    ///
1658    /// This method does not accept targets nor retain proofs. Returns the root node which can
1659    /// be used to compute the root hash via [`Self::compute_root_hash`].
1660    #[instrument(target = TRACE_TARGET, level = "trace", skip(self))]
1661    pub fn storage_root_node(
1662        &mut self,
1663        hashed_address: B256,
1664    ) -> Result<ProofTrieNodeV2, StateProofError> {
1665        self.hashed_cursor.set_hashed_address(hashed_address);
1666
1667        if self.hashed_cursor.is_storage_empty()? {
1668            return Ok(ProofTrieNodeV2 {
1669                path: Nibbles::default(),
1670                node: TrieNodeV2::EmptyRoot,
1671                masks: None,
1672            })
1673        }
1674
1675        // Don't call `set_hashed_address` on the trie cursor until after the previous shortcut has
1676        // been checked.
1677        self.trie_cursor.set_hashed_address(hashed_address);
1678
1679        // Create a mutable storage value encoder
1680        let mut storage_value_encoder = StorageValueEncoder;
1681        self.root_node(&mut storage_value_encoder)
1682    }
1683}
1684
1685/// Helper type wrapping a slice of [`ProofV2Target`]s, primarily used to iterate through targets in
1686/// [`ProofCalculator::should_retain`].
1687///
1688/// It is assumed that the underlying slice is never empty, and that the iterator is never
1689/// exhausted.
1690struct TargetsCursor<'a> {
1691    targets: &'a [ProofV2Target],
1692    i: usize,
1693}
1694
1695impl<'a> TargetsCursor<'a> {
1696    /// Wraps a slice of [`ProofV2Target`]s with the `TargetsCursor`.
1697    ///
1698    /// # Panics
1699    ///
1700    /// Will panic in debug mode if called with an empty slice.
1701    fn new(targets: &'a [ProofV2Target]) -> Self {
1702        debug_assert!(!targets.is_empty());
1703        Self { targets, i: 0 }
1704    }
1705
1706    /// Returns the current and next [`ProofV2Target`] that the cursor is pointed at.
1707    fn current(&self) -> (&'a ProofV2Target, Option<&'a ProofV2Target>) {
1708        (&self.targets[self.i], self.targets.get(self.i + 1))
1709    }
1710
1711    /// Iterates the cursor forward.
1712    ///
1713    /// # Panics
1714    ///
1715    /// Will panic if the cursor is exhausted.
1716    fn next(&mut self) -> (&'a ProofV2Target, Option<&'a ProofV2Target>) {
1717        self.i += 1;
1718        debug_assert!(self.i < self.targets.len());
1719        self.current()
1720    }
1721
1722    // Iterate forwards over the slice, starting from the [`ProofV2Target`] after the current.
1723    fn skip_iter(&self) -> impl Iterator<Item = &'a ProofV2Target> {
1724        self.targets[self.i + 1..].iter()
1725    }
1726
1727    /// Iterated backwards over the slice, starting from the [`ProofV2Target`] previous to the
1728    /// current.
1729    fn rev_iter(&self) -> impl Iterator<Item = &'a ProofV2Target> {
1730        self.targets[..self.i].iter().rev()
1731    }
1732}
1733
1734/// Used to track the state of the trie cursor, allowing us to differentiate between a branch having
1735/// been taken (used as a cached branch) and the cursor having been exhausted.
1736#[derive(Debug)]
1737enum TrieCursorState {
1738    /// The initial state of the cursor, indicating it's never been seeked.
1739    Unseeked,
1740    /// Cursor is seeked to this path and the node has not been used yet.
1741    Available(Nibbles, BranchNodeCompact),
1742    /// Cursor is seeked to this path, but the node has been used.
1743    Taken(Nibbles),
1744    /// Cursor has been exhausted.
1745    Exhausted,
1746}
1747
1748impl TrieCursorState {
1749    /// Creates a [`Self::Unseeked`] based on an entry returned from the cursor itself.
1750    const fn unseeked() -> Self {
1751        Self::Unseeked
1752    }
1753
1754    /// Creates a [`Self`] based on an entry returned from the cursor itself.
1755    fn seeked(entry: Option<(Nibbles, BranchNodeCompact)>) -> Self {
1756        entry.map_or(Self::Exhausted, |(path, node)| Self::Available(path, node))
1757    }
1758
1759    /// Returns the path the cursor is seeked to, or None if it's exhausted.
1760    ///
1761    /// # Panics
1762    ///
1763    /// Panics if the cursor is unseeked.
1764    const fn path(&self) -> Option<&Nibbles> {
1765        match self {
1766            Self::Unseeked => panic!("cursor is unseeked"),
1767            Self::Available(path, _) | Self::Taken(path) => Some(path),
1768            Self::Exhausted => None,
1769        }
1770    }
1771
1772    /// Returns true if the cursor is unseeked, or is seeked to a node prior to the given one.
1773    fn before(&self, path: &Nibbles) -> bool {
1774        match self {
1775            Self::Unseeked => true,
1776            Self::Available(seeked_to, _) | Self::Taken(seeked_to) => path < seeked_to,
1777            Self::Exhausted => false,
1778        }
1779    }
1780
1781    /// Takes the path and node from a [`Self::Available`]. Panics if not [`Self::Available`].
1782    fn take(&mut self) -> (Nibbles, BranchNodeCompact) {
1783        let Self::Available(path, _) = self else {
1784            panic!("take called on non-Available: {self:?}")
1785        };
1786
1787        let path = *path;
1788        let Self::Available(path, node) = core::mem::replace(self, Self::Taken(path)) else {
1789            unreachable!("already checked that self is Self::Available");
1790        };
1791
1792        (path, node)
1793    }
1794}
1795
1796/// Describes the state of the currently cached branch node (if any).
1797enum PopCachedBranchOutcome {
1798    /// Cached branch has been popped from the `cached_branch_stack` and is ready to be used.
1799    Popped((Nibbles, BranchNodeCompact)),
1800    /// All cached branches have been exhausted.
1801    Exhausted,
1802    /// Need to calculate leaves from this range (exclusive upper) before the cached branch
1803    /// (catch-up range). If None then
1804    CalculateLeaves((Nibbles, Option<Nibbles>)),
1805}
1806
1807#[cfg(test)]
1808mod tests {
1809    use super::*;
1810    use crate::{
1811        hashed_cursor::{mock::MockHashedCursorFactory, HashedCursorFactory},
1812        proof::StorageProof as LegacyStorageProof,
1813        test_utils::TrieTestHarness,
1814        trie_cursor::{depth_first, TrieCursorFactory},
1815    };
1816    use alloy_primitives::map::B256Set;
1817    use alloy_rlp::Decodable;
1818    use alloy_trie::proof::AddedRemovedKeys;
1819    use itertools::Itertools;
1820    use reth_trie_common::{prefix_set::PrefixSetMut, ProofTrieNode, TrieNode};
1821    use std::collections::BTreeMap;
1822
1823    /// Converts legacy proofs to V2 proofs by combining extension nodes with their child branch
1824    /// nodes.
1825    ///
1826    /// In the legacy proof format, extension nodes and branch nodes are separate. In the V2 format,
1827    /// they are combined into a single `BranchNodeV2` where the extension's key becomes the
1828    /// branch's `key` field.
1829    ///
1830    /// Converts legacy proofs (sorted in depth-first order) to V2 format.
1831    ///
1832    /// In depth-first order, children come BEFORE parents. So when we encounter an extension node,
1833    /// its child branch has already been processed and is in the result. We need to pop it and
1834    /// combine it with the extension.
1835    fn convert_legacy_proofs_to_v2(legacy_proofs: &[ProofTrieNode]) -> Vec<ProofTrieNodeV2> {
1836        ProofTrieNodeV2::from_sorted_trie_nodes(
1837            legacy_proofs.iter().map(|p| (p.path, p.node.clone(), p.masks)),
1838        )
1839    }
1840
1841    /// A test harness for comparing `StorageProofCalculator` and legacy `StorageProof`
1842    /// implementations.
1843    ///
1844    /// Wraps [`TrieTestHarness`] and adds a method to test that both proof implementations
1845    /// produce equivalent results for storage proofs.
1846    struct ProofTestHarness {
1847        inner: TrieTestHarness,
1848    }
1849
1850    impl std::ops::Deref for ProofTestHarness {
1851        type Target = TrieTestHarness;
1852        fn deref(&self) -> &Self::Target {
1853            &self.inner
1854        }
1855    }
1856
1857    impl ProofTestHarness {
1858        /// Creates a new test harness from a map of hashed storage slots to values.
1859        fn new(storage: BTreeMap<B256, U256>) -> Self {
1860            Self { inner: TrieTestHarness::new(storage) }
1861        }
1862
1863        /// Asserts that `StorageProofCalculator` and legacy `StorageProof` produce equivalent
1864        /// results for storage proofs.
1865        fn assert_proof(
1866            &self,
1867            targets: impl IntoIterator<Item = ProofV2Target>,
1868        ) -> Result<(), StateProofError> {
1869            let mut targets_vec = targets.into_iter().collect::<Vec<_>>();
1870
1871            // Get v2 proof and root hash via harness
1872            let (proof_v2_result, root_hash) = self.proof_v2(&mut targets_vec);
1873
1874            // Verify the root hash matches the expected root (if the proof contains a root
1875            // node)
1876            if let Some(root_hash) = root_hash {
1877                pretty_assertions::assert_eq!(self.original_root(), root_hash);
1878            }
1879
1880            // Convert ProofV2Target keys to B256Set for legacy implementation
1881            let legacy_targets = targets_vec
1882                .iter()
1883                .map(|target| B256::from_slice(&target.key_nibbles.pack()))
1884                .collect::<B256Set>();
1885
1886            // Call legacy StorageProof::storage_multiproof
1887            let proof_legacy_result = LegacyStorageProof::new_hashed(
1888                self.trie_cursor_factory(),
1889                self.hashed_cursor_factory(),
1890                self.hashed_address(),
1891            )
1892            .with_branch_node_masks(true)
1893            .with_added_removed_keys(Some(AddedRemovedKeys::default().with_assume_added(true)))
1894            .storage_multiproof(legacy_targets)?;
1895
1896            // Helper function to check if a node path matches at least one target
1897            let node_matches_target = |node_path: &Nibbles| -> bool {
1898                targets_vec.iter().any(|target| {
1899                    target.key_nibbles.starts_with(node_path) &&
1900                        node_path.len() >= target.min_len as usize
1901                })
1902            };
1903
1904            // Decode and sort legacy proof nodes
1905            let proof_legacy_nodes = proof_legacy_result
1906                .subtree
1907                .iter()
1908                .map(|(path, node_enc)| {
1909                    let mut buf = node_enc.as_ref();
1910                    let node = TrieNode::decode(&mut buf)
1911                        .expect("legacy implementation should not produce malformed proof nodes");
1912
1913                    let masks = if path.is_empty() {
1914                        None
1915                    } else {
1916                        proof_legacy_result.branch_node_masks.get(path).copied()
1917                    };
1918
1919                    ProofTrieNode { path: *path, node, masks }
1920                })
1921                .sorted_by(|a, b| depth_first::cmp(&a.path, &b.path))
1922                .collect::<Vec<_>>();
1923
1924            // Convert legacy proofs to V2 proofs by combining extensions with their child branches
1925            let proof_legacy_nodes_v2 = convert_legacy_proofs_to_v2(&proof_legacy_nodes);
1926
1927            // Filter both results to only keep nodes which match a target. The v2
1928            // storage_proof returns an EmptyRoot node even when there are no targets, so
1929            // both sides need the same filtering.
1930            let proof_legacy_nodes_v2 = proof_legacy_nodes_v2
1931                .into_iter()
1932                .filter(|ProofTrieNodeV2 { path, .. }| node_matches_target(path))
1933                .collect::<Vec<_>>();
1934
1935            let proof_v2_result = proof_v2_result
1936                .into_iter()
1937                .filter(|ProofTrieNodeV2 { path, .. }| node_matches_target(path))
1938                .collect::<Vec<_>>();
1939
1940            pretty_assertions::assert_eq!(proof_legacy_nodes_v2, proof_v2_result);
1941
1942            Ok(())
1943        }
1944    }
1945
1946    /// Tests that `clear_computation_state` properly resets internal stacks, allowing a
1947    /// `StorageProofCalculator` to be reused after a mid-computation error left stale state.
1948    /// Before the fix, stale data in `branch_stack`, `child_stack`, and `branch_path`
1949    /// could cause a `usize` underflow panic in `pop_branch`.
1950    #[test]
1951    fn test_proof_calculator_reuse_after_error() {
1952        reth_tracing::init_test_tracing();
1953
1954        let slots = [
1955            B256::right_padding_from(&[0x10]),
1956            B256::right_padding_from(&[0x20]),
1957            B256::right_padding_from(&[0x30]),
1958            B256::right_padding_from(&[0x40]),
1959        ];
1960        let storage: BTreeMap<B256, U256> =
1961            slots.iter().map(|&s| (s, U256::from(100u64))).collect();
1962
1963        let harness = ProofTestHarness::new(storage);
1964
1965        let trie_cursor_factory = harness.trie_cursor_factory();
1966        let hashed_cursor_factory = harness.hashed_cursor_factory();
1967
1968        let hashed_address = harness.hashed_address();
1969        let trie_cursor = trie_cursor_factory.storage_trie_cursor(hashed_address).unwrap();
1970        let hashed_cursor = hashed_cursor_factory.hashed_storage_cursor(hashed_address).unwrap();
1971        let mut proof_calculator = StorageProofCalculator::new_storage(trie_cursor, hashed_cursor);
1972
1973        // Simulate stale state left by a mid-computation error: push fake entries onto internal
1974        // stacks and set a non-empty branch_path.
1975        proof_calculator.branch_stack.push(ProofTrieBranch {
1976            ext_len: 2,
1977            state_mask: TrieMask::new(0b1111),
1978            masks: None,
1979        });
1980        proof_calculator.branch_stack.push(ProofTrieBranch {
1981            ext_len: 0,
1982            state_mask: TrieMask::new(0b11),
1983            masks: None,
1984        });
1985        proof_calculator
1986            .child_stack
1987            .push(ProofTrieBranchChild::RlpNode(RlpNode::word_rlp(&B256::ZERO)));
1988        proof_calculator.branch_path = Nibbles::from_nibbles([0x1, 0x2, 0x3]);
1989
1990        // clear_computation_state should reset everything so a subsequent call works.
1991        proof_calculator.clear_computation_state();
1992
1993        let mut sorted_slots = slots.to_vec();
1994        sorted_slots.sort();
1995        let mut targets: Vec<ProofV2Target> =
1996            sorted_slots.iter().copied().map(ProofV2Target::new).collect();
1997
1998        let result = proof_calculator.storage_proof(hashed_address, &mut targets).unwrap();
1999
2000        // Compare against a fresh calculator to verify correctness.
2001        let trie_cursor = trie_cursor_factory.storage_trie_cursor(hashed_address).unwrap();
2002        let hashed_cursor = hashed_cursor_factory.hashed_storage_cursor(hashed_address).unwrap();
2003        let mut fresh_calculator = StorageProofCalculator::new_storage(trie_cursor, hashed_cursor);
2004        let fresh_result = fresh_calculator.storage_proof(hashed_address, &mut targets).unwrap();
2005
2006        pretty_assertions::assert_eq!(fresh_result, result);
2007    }
2008
2009    mod proptest_tests {
2010        use super::*;
2011        use proptest::prelude::*;
2012
2013        /// Generate a strategy for storage datasets (hashed slot → value).
2014        fn storage_strategy() -> impl Strategy<Value = BTreeMap<B256, U256>> {
2015            prop::collection::vec((any::<[u8; 32]>(), any::<u64>()), 0..=100).prop_map(|slots| {
2016                slots
2017                    .into_iter()
2018                    .map(|(slot_bytes, value)| (B256::from(slot_bytes), U256::from(value)))
2019                    .filter(|(_, v)| *v != U256::ZERO)
2020                    .collect()
2021            })
2022        }
2023
2024        /// Generate a strategy for proof targets that are 80% from existing storage slots
2025        /// and 20% random keys. Each target has a random `min_len` of 0..16.
2026        fn proof_targets_strategy(
2027            slot_keys: Vec<B256>,
2028        ) -> impl Strategy<Value = Vec<ProofV2Target>> {
2029            let num_slots = slot_keys.len();
2030
2031            let target_count = 0..=(num_slots + 5);
2032
2033            target_count.prop_flat_map(move |count| {
2034                let slot_keys = slot_keys.clone();
2035                prop::collection::vec(
2036                    (
2037                        prop::bool::weighted(0.8).prop_flat_map(move |from_slots| {
2038                            if from_slots && !slot_keys.is_empty() {
2039                                prop::sample::select(slot_keys.clone()).boxed()
2040                            } else {
2041                                any::<[u8; 32]>().prop_map(B256::from).boxed()
2042                            }
2043                        }),
2044                        0u8..16u8,
2045                    )
2046                        .prop_map(|(key, min_len)| ProofV2Target::new(key).with_min_len(min_len)),
2047                    count,
2048                )
2049            })
2050        }
2051
2052        proptest! {
2053            #![proptest_config(ProptestConfig::with_cases(4000))]
2054            #[test]
2055            /// Tests that `StorageProofCalculator` produces valid proofs for randomly generated
2056            /// storage datasets with proof targets.
2057            fn proptest_proof_with_targets(
2058                (storage, targets) in storage_strategy()
2059                    .prop_flat_map(|storage| {
2060                        let mut slot_keys: Vec<B256> = storage.keys().copied().collect();
2061                        slot_keys.sort_unstable();
2062                        let targets_strategy = proof_targets_strategy(slot_keys);
2063                        (Just(storage), targets_strategy)
2064                    })
2065            ) {
2066                reth_tracing::init_test_tracing();
2067                let harness = ProofTestHarness::new(storage);
2068
2069                harness.assert_proof(targets).expect("Proof generation failed");
2070            }
2071        }
2072    }
2073
2074    #[test]
2075    fn test_big_trie() {
2076        use rand::prelude::*;
2077
2078        reth_tracing::init_test_tracing();
2079        let mut rng = rand::rngs::SmallRng::seed_from_u64(1);
2080
2081        let mut rand_b256 = || {
2082            let mut buf: [u8; 32] = [0; 32];
2083            rng.fill_bytes(&mut buf);
2084            B256::from_slice(&buf)
2085        };
2086
2087        // Generate random storage dataset.
2088        let mut storage = BTreeMap::new();
2089        for _ in 0..10240 {
2090            let hashed_slot = rand_b256();
2091            storage.insert(hashed_slot, U256::from(1u64));
2092        }
2093
2094        // Collect targets; partially from real keys, partially random keys which probably won't
2095        // exist.
2096        let mut targets = storage.keys().copied().collect::<Vec<_>>();
2097        for _ in 0..storage.len() / 5 {
2098            targets.push(rand_b256());
2099        }
2100        targets.sort();
2101
2102        // Create test harness
2103        let harness = ProofTestHarness::new(storage);
2104
2105        harness
2106            .assert_proof(targets.into_iter().map(ProofV2Target::new))
2107            .expect("Proof generation failed");
2108    }
2109
2110    #[test]
2111    fn test_node_with_masked_empty_child() {
2112        reth_tracing::init_test_tracing();
2113
2114        let val = U256::from(42u64);
2115
2116        // All storage keys share a common first nibble (0x6), so the branch is at path 0x6. The
2117        // second nibble differentiates children: 0,1,3,5,7.
2118        let slot_60 = B256::right_padding_from(&[0x60]);
2119        let slot_61 = B256::right_padding_from(&[0x61]);
2120        let slot_65 = B256::right_padding_from(&[0x65]);
2121        let slot_67 = B256::right_padding_from(&[0x67]);
2122
2123        // Construct a branch node at path 0x6 with state_mask bits 0,1,3,5,7.
2124        // hash_mask has bits 0,1,5,7 (NOT 3) — nibble 3's hash is cleared because it's in the
2125        // prefix set. Hashes are dummy values.
2126        let state_mask = TrieMask::new(0b10101011); // bits 0,1,3,5,7
2127        let hash_mask = TrieMask::new(0b10100011); // bits 0,1,5,7 (NOT 3)
2128        let hashes = vec![B256::repeat_byte(0xaa); hash_mask.count_ones() as usize];
2129        let branch = BranchNodeCompact::new(state_mask, TrieMask::new(0), hash_mask, hashes, None);
2130
2131        let storage_nodes: BTreeMap<Nibbles, BranchNodeCompact> =
2132            std::iter::once((Nibbles::from_nibbles([0x6]), branch)).collect();
2133
2134        // Hashed cursor has slots at children 0, 1, 5, 7 — but NOT child 3 (0x63).
2135        // This simulates the post-state overlay having deleted the slot at 0x63.
2136        let mut harness = TrieTestHarness::new(
2137            [slot_60, slot_61, slot_65, slot_67].iter().map(|s| (*s, val)).collect(),
2138        );
2139        harness.set_trie_nodes(storage_nodes);
2140
2141        let storage_trie_cursor =
2142            harness.trie_cursor_factory().storage_trie_cursor(harness.hashed_address()).unwrap();
2143        let hashed_storage_cursor = harness
2144            .hashed_cursor_factory()
2145            .hashed_storage_cursor(harness.hashed_address())
2146            .unwrap();
2147        let mut calculator =
2148            StorageProofCalculator::new_storage(storage_trie_cursor, hashed_storage_cursor);
2149        let root_node = calculator
2150            .storage_root_node(harness.hashed_address())
2151            .expect("storage_root_node should succeed with masked empty child");
2152
2153        let root_hash = calculator.compute_root_hash(core::slice::from_ref(&root_node)).unwrap();
2154        assert!(root_hash.is_some(), "should produce a root hash");
2155    }
2156
2157    /// Tests that `root_node` handles the case where `uncalculated_lower_bound` has advanced
2158    /// entirely past a cached branch that still has unprocessed children in its `state_mask`.
2159    ///
2160    /// Branch at `0x6` has `state_mask` bits 0,1,5,f where nibble 5 has its `hash_mask`
2161    /// cleared and no leaf data. The last child (nibble f)
2162    /// causes `calculate_key_range` to be called with range `(0x6f, Some(0x7))`. After that range,
2163    /// the hashed cursor still has keys (at `0x70...`), so `proof_subtrie` does not break and
2164    /// re-enters `next_uncached_key_range` with `uncalculated_lower_bound = Some(0x7)`.
2165    /// Since `0x7` is past `0x6`, all remaining children are skipped and the branch is popped.
2166    #[test]
2167    fn test_node_with_masked_empty_child_lower_bound_past_branch() {
2168        reth_tracing::init_test_tracing();
2169
2170        let val = U256::from(42u64);
2171
2172        // Leaf keys under 0x6 and one beyond (0x70) to keep the cursor alive after 0x6.
2173        let slot_60 = B256::right_padding_from(&[0x60]);
2174        let slot_61 = B256::right_padding_from(&[0x61]);
2175        let slot_6f = B256::right_padding_from(&[0x6f]);
2176        let slot_70 = B256::right_padding_from(&[0x70]);
2177
2178        // Branch at 0x6: state_mask bits 0,1,5,f; hash_mask bits 0,1 (NOT 5, NOT f).
2179        // Nibble 5 has state_mask set but no hash and no leaf data (masked empty child).
2180        // Nibble f has state_mask set, no hash, but DOES have leaf data.
2181        let state_mask = TrieMask::new(0b1000_0000_0010_0011); // bits 0,1,5,f
2182        let hash_mask = TrieMask::new(0b0000_0000_0000_0011); // bits 0,1
2183        let hashes = vec![B256::repeat_byte(0xaa); hash_mask.count_ones() as usize];
2184        let branch = BranchNodeCompact::new(state_mask, TrieMask::new(0), hash_mask, hashes, None);
2185
2186        let storage_nodes: BTreeMap<Nibbles, BranchNodeCompact> =
2187            std::iter::once((Nibbles::from_nibbles([0x6]), branch)).collect();
2188
2189        // Hashed cursor: slots at 0x60, 0x61, 0x6f, 0x70 — but NOT 0x65.
2190        let mut harness = TrieTestHarness::new(
2191            [slot_60, slot_61, slot_6f, slot_70].iter().map(|s| (*s, val)).collect(),
2192        );
2193        harness.set_trie_nodes(storage_nodes);
2194
2195        let storage_trie_cursor =
2196            harness.trie_cursor_factory().storage_trie_cursor(harness.hashed_address()).unwrap();
2197        let hashed_storage_cursor = harness
2198            .hashed_cursor_factory()
2199            .hashed_storage_cursor(harness.hashed_address())
2200            .unwrap();
2201        let mut calculator =
2202            StorageProofCalculator::new_storage(storage_trie_cursor, hashed_storage_cursor);
2203        let root_node = calculator
2204            .storage_root_node(harness.hashed_address())
2205            .expect("storage_root_node should succeed when lower bound advances past branch");
2206
2207        let root_hash = calculator.compute_root_hash(core::slice::from_ref(&root_node)).unwrap();
2208        assert!(root_hash.is_some(), "should produce a root hash");
2209    }
2210
2211    /// Tests that the prefix set causes `next_uncached_key_range` to add child nibbles that are
2212    /// not present in the cached branch's `state_mask`.
2213    ///
2214    /// Setup: An original state with leaves at `0x60` and `0x61` produces a cached branch at
2215    /// `0x6` with children at nibbles 0 and 1 (both with real cached hashes from `StorageRoot`).
2216    /// A new leaf is then inserted at `0x63...`, which is NOT in the branch's `state_mask`.
2217    /// The prefix set contains the new key. Without prefix set support, the calculator would
2218    /// skip nibble 3 entirely and produce a stale root hash. With prefix set support, nibble 3
2219    /// is discovered and its subtrie is recalculated from leaves.
2220    #[test]
2221    fn test_prefix_set_adds_child_nibbles() {
2222        reth_tracing::init_test_tracing();
2223
2224        let val = U256::from(42u64);
2225        let slot_60 = B256::right_padding_from(&[0x60]);
2226        let slot_61 = B256::right_padding_from(&[0x61]);
2227        let slot_63 = B256::right_padding_from(&[0x63]);
2228
2229        let harness = TrieTestHarness::new([(slot_60, val), (slot_61, val)].into_iter().collect());
2230
2231        let changeset: BTreeMap<B256, U256> = std::iter::once((slot_63, val)).collect();
2232        let (expected_root, _) = harness.get_root_with_updates(&changeset);
2233
2234        let mut updated_storage = harness.storage().clone();
2235        updated_storage.insert(slot_63, val);
2236
2237        let updated_hashed = MockHashedCursorFactory::new(
2238            BTreeMap::new(),
2239            std::iter::once((harness.hashed_address(), updated_storage)).collect(),
2240        );
2241
2242        let mut prefix_set = PrefixSetMut::default();
2243        prefix_set.insert(Nibbles::unpack(slot_63));
2244
2245        let trie_cursor =
2246            harness.trie_cursor_factory().storage_trie_cursor(harness.hashed_address()).unwrap();
2247        let hashed_cursor = updated_hashed.hashed_storage_cursor(harness.hashed_address()).unwrap();
2248        let mut calculator = StorageProofCalculator::new_storage(trie_cursor, hashed_cursor)
2249            .with_prefix_set(prefix_set.freeze());
2250        let root_node = calculator
2251            .storage_root_node(harness.hashed_address())
2252            .expect("storage_root_node should succeed with prefix set adding child nibbles");
2253        let got_root =
2254            calculator.compute_root_hash(core::slice::from_ref(&root_node)).unwrap().unwrap();
2255
2256        pretty_assertions::assert_eq!(
2257            expected_root,
2258            got_root,
2259            "Root hash with prefix set should match fresh computation"
2260        );
2261    }
2262
2263    /// Tests that `next_uncached_key_range` does not use a cached hash when the child's path
2264    /// is in the prefix set, forcing recalculation from leaves.
2265    ///
2266    /// Setup: A cached branch at `0x6` with children at nibbles 0,1,5 — all with cached hashes.
2267    /// The leaf at `0x65...` is changed (different value). The prefix set marks `0x65...` as
2268    /// dirty. Without prefix set support, the calculator would use the stale cached hash for
2269    /// nibble 5 and produce a wrong root. With prefix set support, the cached hash is skipped
2270    /// and the subtrie is recalculated from the updated leaf data.
2271    #[test]
2272    fn test_prefix_set_invalidates_cached_hash() {
2273        reth_tracing::init_test_tracing();
2274
2275        let original_val = U256::from(42u64);
2276        let updated_val = U256::from(9999u64);
2277        let slot_60 = B256::right_padding_from(&[0x60]);
2278        let slot_61 = B256::right_padding_from(&[0x61]);
2279        let slot_65 = B256::right_padding_from(&[0x65]);
2280
2281        let harness = TrieTestHarness::new(
2282            [(slot_60, original_val), (slot_61, original_val), (slot_65, original_val)]
2283                .into_iter()
2284                .collect(),
2285        );
2286
2287        let changeset: BTreeMap<B256, U256> = std::iter::once((slot_65, updated_val)).collect();
2288        let (expected_root, _) = harness.get_root_with_updates(&changeset);
2289
2290        let mut updated_storage = harness.storage().clone();
2291        updated_storage.insert(slot_65, updated_val);
2292
2293        let updated_hashed = MockHashedCursorFactory::new(
2294            BTreeMap::new(),
2295            std::iter::once((harness.hashed_address(), updated_storage)).collect(),
2296        );
2297
2298        let mut prefix_set = PrefixSetMut::default();
2299        prefix_set.insert(Nibbles::unpack(slot_65));
2300
2301        let trie_cursor =
2302            harness.trie_cursor_factory().storage_trie_cursor(harness.hashed_address()).unwrap();
2303        let hashed_cursor = updated_hashed.hashed_storage_cursor(harness.hashed_address()).unwrap();
2304        let mut calculator = StorageProofCalculator::new_storage(trie_cursor, hashed_cursor)
2305            .with_prefix_set(prefix_set.freeze());
2306        let root_node = calculator
2307            .storage_root_node(harness.hashed_address())
2308            .expect("storage_root_node should succeed with prefix set invalidating cached hash");
2309        let got_root =
2310            calculator.compute_root_hash(core::slice::from_ref(&root_node)).unwrap().unwrap();
2311
2312        pretty_assertions::assert_eq!(
2313            expected_root,
2314            got_root,
2315            "Root hash with prefix set invalidation should match fresh computation"
2316        );
2317    }
2318
2319    /// Helper to compute the keccak256 hash of a storage leaf node. The `short_key` is the
2320    /// leaf's key after trimming all branch/extension nibbles consumed by ancestor nodes.
2321    fn storage_leaf_hash(short_key: &Nibbles, value: &U256) -> B256 {
2322        let mut buf = Vec::new();
2323        alloy_trie::nodes::LeafNodeRef::new(short_key, &alloy_rlp::encode_fixed_size(value))
2324            .encode(&mut buf);
2325        keccak256(&buf)
2326    }
2327
2328    /// Tests branch collapse when the removed child comes BEFORE the remaining child.
2329    ///
2330    /// Trie structure (3 hashed storage keys):
2331    ///   `key_a` = 0x20...  (root nibble 2, sub-nibble 0)
2332    ///   `key_b` = 0x21...  (root nibble 2, sub-nibble 1)
2333    ///   `key_c` = 0xb0...  (root nibble b)
2334    ///
2335    /// This creates:
2336    ///   root branch at nibbles {2, b}
2337    ///   sub-branch at path [2] at nibbles {0, 1}
2338    ///
2339    /// `key_a` is removed (prefix set marks it dirty, cursor has no value for it).
2340    /// The sub-branch at [2] collapses into its remaining child (`key_b`). The removed child
2341    /// (nibble 0) comes before the remaining child (nibble 1).
2342    #[test]
2343    fn test_branch_collapse_removed_child_before_remaining() {
2344        reth_tracing::init_test_tracing();
2345
2346        let val = U256::from(1u64);
2347
2348        let key_a = B256::right_padding_from(&[0x20]); // root nibble 2, sub-nibble 0
2349        let key_b = B256::right_padding_from(&[0x21]); // root nibble 2, sub-nibble 1
2350        let key_c = B256::right_padding_from(&[0xb0]); // root nibble b
2351
2352        // Compute leaf hashes for the sub-branch's children.
2353        // The sub-branch at path [2] consumes 2 nibbles from each key (root nibble + sub-nibble).
2354        let leaf_hash_a = storage_leaf_hash(&Nibbles::unpack(key_a).slice(2..), &val);
2355        let leaf_hash_b = storage_leaf_hash(&Nibbles::unpack(key_b).slice(2..), &val);
2356
2357        // Only cache the sub-branch at path [2] — the root will be built from leaves.
2358        // The sub-branch has children at nibbles 0 and 1, both with cached hashes.
2359        let sub_branch_state_mask = TrieMask::new((1 << 0) | (1 << 1));
2360        let cached_sub_branch = BranchNodeCompact::new(
2361            sub_branch_state_mask,
2362            TrieMask::new(0),
2363            sub_branch_state_mask,
2364            vec![leaf_hash_a, leaf_hash_b],
2365            None,
2366        );
2367
2368        let storage_nodes: BTreeMap<Nibbles, BranchNodeCompact> =
2369            std::iter::once((Nibbles::from_nibbles([0x2]), cached_sub_branch)).collect();
2370
2371        // The hashed cursor contains key_b and key_c (the root's other child). key_a was removed
2372        // (not in cursor)
2373        let mut harness = TrieTestHarness::new([(key_b, val), (key_c, val)].into_iter().collect());
2374        harness.set_trie_nodes(storage_nodes);
2375
2376        // Prefix set marks key_a as dirty (removed).
2377        let mut prefix_set_mut = PrefixSetMut::default();
2378        prefix_set_mut.insert(Nibbles::unpack(key_a));
2379        let prefix_set = prefix_set_mut.freeze();
2380
2381        // Compute root with cached branches + prefix set — triggers sub-branch collapse.
2382        let storage_trie_cursor =
2383            harness.trie_cursor_factory().storage_trie_cursor(harness.hashed_address()).unwrap();
2384        let hashed_storage_cursor = harness
2385            .hashed_cursor_factory()
2386            .hashed_storage_cursor(harness.hashed_address())
2387            .unwrap();
2388        let mut calculator =
2389            StorageProofCalculator::new_storage(storage_trie_cursor, hashed_storage_cursor)
2390                .with_prefix_set(prefix_set);
2391        let root_node = calculator
2392            .storage_root_node(harness.hashed_address())
2393            .expect("storage_root_node should succeed after branch collapse");
2394        let root_with_collapse =
2395            calculator.compute_root_hash(core::slice::from_ref(&root_node)).unwrap().unwrap();
2396
2397        // Compute reference root from scratch (no cached branches) using the full final state.
2398        let mut fresh_harness =
2399            TrieTestHarness::new([(key_b, val), (key_c, val)].into_iter().collect());
2400        fresh_harness.set_trie_nodes(BTreeMap::new());
2401        let storage_trie_cursor = fresh_harness
2402            .trie_cursor_factory()
2403            .storage_trie_cursor(fresh_harness.hashed_address())
2404            .unwrap();
2405        let hashed_storage_cursor = fresh_harness
2406            .hashed_cursor_factory()
2407            .hashed_storage_cursor(fresh_harness.hashed_address())
2408            .unwrap();
2409        let mut fresh_calculator =
2410            StorageProofCalculator::new_storage(storage_trie_cursor, hashed_storage_cursor);
2411        let fresh_root_node = fresh_calculator
2412            .storage_root_node(fresh_harness.hashed_address())
2413            .expect("fresh storage_root_node should succeed");
2414        let expected_root = fresh_calculator
2415            .compute_root_hash(core::slice::from_ref(&fresh_root_node))
2416            .unwrap()
2417            .unwrap();
2418
2419        pretty_assertions::assert_eq!(
2420            expected_root,
2421            root_with_collapse,
2422            "Root hash after collapsing branch (removed child before remaining) should match fresh computation"
2423        );
2424    }
2425
2426    /// Tests branch collapse when the removed child comes AFTER the remaining child.
2427    ///
2428    /// Same trie structure as "before" test, but with nibbles 4 and 9 instead of 0 and 1 for
2429    /// the sub-branch, and nibble 9 is removed. The removed child (nibble 9) comes after the
2430    /// remaining child (nibble 4).
2431    #[test]
2432    fn test_branch_collapse_removed_child_after_remaining() {
2433        reth_tracing::init_test_tracing();
2434
2435        let val = U256::from(1u64);
2436
2437        // key_a at sub-nibble 4, key_b at sub-nibble 9 (under root nibble 2).
2438        let key_a = B256::right_padding_from(&[0x24]); // root nibble 2, sub-nibble 4
2439        let key_b = B256::right_padding_from(&[0x29]); // root nibble 2, sub-nibble 9
2440        let key_c = B256::right_padding_from(&[0xb0]); // root nibble b
2441
2442        let leaf_hash_a = storage_leaf_hash(&Nibbles::unpack(key_a).slice(2..), &val);
2443        let leaf_hash_b = storage_leaf_hash(&Nibbles::unpack(key_b).slice(2..), &val);
2444
2445        // Only cache the sub-branch at path [2] — the root will be built from leaves.
2446        let sub_branch_state_mask = TrieMask::new((1 << 4) | (1 << 9));
2447        let cached_sub_branch = BranchNodeCompact::new(
2448            sub_branch_state_mask,
2449            TrieMask::new(0),
2450            sub_branch_state_mask,
2451            vec![leaf_hash_a, leaf_hash_b],
2452            None,
2453        );
2454
2455        let storage_nodes: BTreeMap<Nibbles, BranchNodeCompact> =
2456            std::iter::once((Nibbles::from_nibbles([0x2]), cached_sub_branch)).collect();
2457
2458        // The hashed cursor contains key_a and key_c. key_b was removed (not in cursor)
2459        let mut harness = TrieTestHarness::new([(key_a, val), (key_c, val)].into_iter().collect());
2460        harness.set_trie_nodes(storage_nodes);
2461
2462        // Prefix set marks key_b as dirty (removed).
2463        let mut prefix_set_mut = PrefixSetMut::default();
2464        prefix_set_mut.insert(Nibbles::unpack(key_b));
2465        let prefix_set = prefix_set_mut.freeze();
2466
2467        // Compute root with cached branches + prefix set — triggers sub-branch collapse.
2468        let storage_trie_cursor =
2469            harness.trie_cursor_factory().storage_trie_cursor(harness.hashed_address()).unwrap();
2470        let hashed_storage_cursor = harness
2471            .hashed_cursor_factory()
2472            .hashed_storage_cursor(harness.hashed_address())
2473            .unwrap();
2474        let mut calculator =
2475            StorageProofCalculator::new_storage(storage_trie_cursor, hashed_storage_cursor)
2476                .with_prefix_set(prefix_set);
2477        let root_node = calculator
2478            .storage_root_node(harness.hashed_address())
2479            .expect("storage_root_node should succeed after branch collapse");
2480        let root_with_collapse =
2481            calculator.compute_root_hash(core::slice::from_ref(&root_node)).unwrap().unwrap();
2482
2483        // Compute reference root from scratch (no cached branches) using the full final state.
2484        let mut fresh_harness =
2485            TrieTestHarness::new([(key_a, val), (key_c, val)].into_iter().collect());
2486        fresh_harness.set_trie_nodes(BTreeMap::new());
2487        let storage_trie_cursor = fresh_harness
2488            .trie_cursor_factory()
2489            .storage_trie_cursor(fresh_harness.hashed_address())
2490            .unwrap();
2491        let hashed_storage_cursor = fresh_harness
2492            .hashed_cursor_factory()
2493            .hashed_storage_cursor(fresh_harness.hashed_address())
2494            .unwrap();
2495        let mut fresh_calculator =
2496            StorageProofCalculator::new_storage(storage_trie_cursor, hashed_storage_cursor);
2497        let fresh_root_node = fresh_calculator
2498            .storage_root_node(fresh_harness.hashed_address())
2499            .expect("fresh storage_root_node should succeed");
2500        let expected_root = fresh_calculator
2501            .compute_root_hash(core::slice::from_ref(&fresh_root_node))
2502            .unwrap()
2503            .unwrap();
2504
2505        pretty_assertions::assert_eq!(
2506            expected_root,
2507            root_with_collapse,
2508            "Root hash after collapsing branch (removed child after remaining) should match fresh computation"
2509        );
2510    }
2511
2512    #[test]
2513    fn test_cached_branch_extension_skips_diverging_target() {
2514        reth_tracing::init_test_tracing();
2515
2516        let val = U256::from(100u64);
2517
2518        // Keys whose first bytes directly set the nibble paths we need.
2519        let key_a0 = B256::right_padding_from(&[0x6a, 0x30]); // nibbles: 6,a,3,0,...
2520        let key_a1 = B256::right_padding_from(&[0x6a, 0x31]); // nibbles: 6,a,3,1,...
2521        let key_c = B256::right_padding_from(&[0x6a, 0x80]); // nibbles: 6,a,8,0,...
2522        let key_d = B256::right_padding_from(&[0x6b, 0x00]); // nibbles: 6,b,0,0,...
2523        let key_e = B256::right_padding_from(&[0x6c, 0x00]); // nibbles: 6,c,0,0,...
2524
2525        // Build a correct trie from all five leaves to get the expected root and real hashes.
2526        let all_storage: BTreeMap<B256, U256> =
2527            [(key_a0, val), (key_a1, val), (key_c, val), (key_d, val), (key_e, val)]
2528                .into_iter()
2529                .collect();
2530        let correct_harness = TrieTestHarness::new(all_storage.clone());
2531        let expected_root = correct_harness.original_root();
2532
2533        // Compute leaf hashes for constructing manual cached branch nodes.
2534        let leaf_hash_a0 = storage_leaf_hash(&Nibbles::unpack(key_a0).slice(4..), &val);
2535        let leaf_hash_a1 = storage_leaf_hash(&Nibbles::unpack(key_a1).slice(4..), &val);
2536        let leaf_hash_d = storage_leaf_hash(&Nibbles::unpack(key_d).slice(2..), &val);
2537        let leaf_hash_e = storage_leaf_hash(&Nibbles::unpack(key_e).slice(2..), &val);
2538
2539        // ── Construct cached branch at [6] ─────────────────────────────────────
2540        // state_mask: bits a, b, and c set.
2541        // hash_mask:  bits b and c — both have cached leaf hashes.  Bit a has no hash, so the
2542        //             calculator will seek the trie cursor to find a deeper cached branch.
2543        //
2544        // Having three children with two (b, c) NOT in the prefix set ensures
2545        // `should_skip_cached_branch` does NOT skip this branch (num_unmatched >= 2).
2546        let branch_6_state_mask = TrieMask::new((1 << 0xa) | (1 << 0xb) | (1 << 0xc));
2547        let branch_6_hash_mask = TrieMask::new((1 << 0xb) | (1 << 0xc));
2548        let branch_6 = BranchNodeCompact::new(
2549            branch_6_state_mask,
2550            TrieMask::new(0),
2551            branch_6_hash_mask,
2552            vec![leaf_hash_d, leaf_hash_e],
2553            None,
2554        );
2555
2556        // ── Construct cached branch at [6,a,3] ────────────────────────────────
2557        // state_mask: bits 0 and 1 set (children key_a0 and key_a1).
2558        // hash_mask:  both bits set — both children have cached hashes.
2559        let branch_6a3_state_mask = TrieMask::new((1 << 0) | (1 << 1));
2560        let branch_6a3 = BranchNodeCompact::new(
2561            branch_6a3_state_mask,
2562            TrieMask::new(0),
2563            branch_6a3_state_mask,
2564            vec![leaf_hash_a0, leaf_hash_a1],
2565            None,
2566        );
2567
2568        // Intentionally omit the branch at [6,a] — this is the inconsistency.
2569        let inconsistent_nodes: BTreeMap<Nibbles, BranchNodeCompact> = [
2570            (Nibbles::from_nibbles([0x6]), branch_6),
2571            (Nibbles::from_nibbles([0x6, 0xa, 0x3]), branch_6a3),
2572        ]
2573        .into_iter()
2574        .collect();
2575
2576        // Create harness with all five leaves but the inconsistent trie nodes.
2577        let mut harness = TrieTestHarness::new(all_storage);
2578        harness.set_trie_nodes(inconsistent_nodes);
2579
2580        // Mark key_c as dirty — in the real scenario the leaf was touched by execution.
2581        // The prefix set contains only key_c's full path. `should_skip_cached_branch` will
2582        // NOT skip branch [6] because two of its three children (b, c) are not in the set
2583        // (num_unmatched = 2 > 1). It also will not skip branch [6,a,3] because
2584        // `contains([6,a,3])` is false (key_c's nibbles 6,a,8,... do not start with 6,a,3).
2585        let mut prefix_set = PrefixSetMut::default();
2586        prefix_set.insert(Nibbles::unpack(key_c));
2587
2588        // ── Verify root hash ───────────────────────────────────────────────────
2589        let trie_cursor =
2590            harness.trie_cursor_factory().storage_trie_cursor(harness.hashed_address()).unwrap();
2591        let hashed_cursor = harness
2592            .hashed_cursor_factory()
2593            .hashed_storage_cursor(harness.hashed_address())
2594            .unwrap();
2595        let mut calculator = StorageProofCalculator::new_storage(trie_cursor, hashed_cursor)
2596            .with_prefix_set(prefix_set.freeze());
2597
2598        let root_node = calculator
2599            .storage_root_node(harness.hashed_address())
2600            .expect("storage_root_node should succeed");
2601        let got_root = calculator
2602            .compute_root_hash(core::slice::from_ref(&root_node))
2603            .unwrap()
2604            .expect("should produce a root hash");
2605
2606        // With the bug, the calculator skips key_c and produces a wrong root.
2607        pretty_assertions::assert_eq!(
2608            expected_root,
2609            got_root,
2610            "Root hash should match correct trie; cached extension must not skip diverging leaves"
2611        );
2612
2613        // ── Verify proof for key_c contains nodes on its path ──────────────────
2614        let mut targets = vec![ProofV2Target::new(key_c)];
2615        let proofs = calculator
2616            .storage_proof(harness.hashed_address(), &mut targets)
2617            .expect("storage_proof should succeed");
2618
2619        let key_c_nibbles = Nibbles::unpack(key_c);
2620        let has_matching_node = proofs.iter().any(|node| key_c_nibbles.starts_with(&node.path));
2621        assert!(
2622            has_matching_node,
2623            "Proof for key_c should contain at least one node on key_c's path, got: {proofs:?}"
2624        );
2625    }
2626
2627    #[test]
2628    fn test_cached_branch_extension_skips_diverging_target_before() {
2629        reth_tracing::init_test_tracing();
2630
2631        let val = U256::from(100u64);
2632
2633        // Keys whose first bytes directly set the nibble paths we need.
2634        let key_a0 = B256::right_padding_from(&[0x6a, 0x80]); // nibbles: 6,a,8,0,...
2635        let key_a1 = B256::right_padding_from(&[0x6a, 0x81]); // nibbles: 6,a,8,1,...
2636        let key_c = B256::right_padding_from(&[0x6a, 0x30]); // nibbles: 6,a,3,0,... (BEFORE [6,a,8])
2637        let key_d = B256::right_padding_from(&[0x6b, 0x00]); // nibbles: 6,b,0,0,...
2638        let key_e = B256::right_padding_from(&[0x6c, 0x00]); // nibbles: 6,c,0,0,...
2639
2640        // Build a correct trie from all five leaves to get the expected root and real hashes.
2641        let all_storage: BTreeMap<B256, U256> =
2642            [(key_a0, val), (key_a1, val), (key_c, val), (key_d, val), (key_e, val)]
2643                .into_iter()
2644                .collect();
2645        let correct_harness = TrieTestHarness::new(all_storage.clone());
2646        let expected_root = correct_harness.original_root();
2647
2648        // Compute leaf hashes for constructing manual cached branch nodes.
2649        let leaf_hash_a0 = storage_leaf_hash(&Nibbles::unpack(key_a0).slice(4..), &val);
2650        let leaf_hash_a1 = storage_leaf_hash(&Nibbles::unpack(key_a1).slice(4..), &val);
2651        let leaf_hash_d = storage_leaf_hash(&Nibbles::unpack(key_d).slice(2..), &val);
2652        let leaf_hash_e = storage_leaf_hash(&Nibbles::unpack(key_e).slice(2..), &val);
2653
2654        // ── Construct cached branch at [6] ─────────────────────────────────────
2655        // state_mask: bits a, b, and c set.
2656        // hash_mask:  bits b and c — both have cached leaf hashes.  Bit a has no hash, so the
2657        //             calculator will seek the trie cursor to find a deeper cached branch.
2658        //
2659        // Having three children with two (b, c) NOT in the prefix set ensures
2660        // `should_skip_cached_branch` does NOT skip this branch (num_unmatched >= 2).
2661        let branch_6_state_mask = TrieMask::new((1 << 0xa) | (1 << 0xb) | (1 << 0xc));
2662        let branch_6_hash_mask = TrieMask::new((1 << 0xb) | (1 << 0xc));
2663        let branch_6 = BranchNodeCompact::new(
2664            branch_6_state_mask,
2665            TrieMask::new(0),
2666            branch_6_hash_mask,
2667            vec![leaf_hash_d, leaf_hash_e],
2668            None,
2669        );
2670
2671        // ── Construct cached branch at [6,a,8] ────────────────────────────────
2672        // state_mask: bits 0 and 1 set (children key_a0 and key_a1).
2673        // hash_mask:  both bits set — both children have cached hashes.
2674        let branch_6a8_state_mask = TrieMask::new((1 << 0) | (1 << 1));
2675        let branch_6a8 = BranchNodeCompact::new(
2676            branch_6a8_state_mask,
2677            TrieMask::new(0),
2678            branch_6a8_state_mask,
2679            vec![leaf_hash_a0, leaf_hash_a1],
2680            None,
2681        );
2682
2683        // Intentionally omit the branch at [6,a] — this is the inconsistency.
2684        let inconsistent_nodes: BTreeMap<Nibbles, BranchNodeCompact> = [
2685            (Nibbles::from_nibbles([0x6]), branch_6),
2686            (Nibbles::from_nibbles([0x6, 0xa, 0x8]), branch_6a8),
2687        ]
2688        .into_iter()
2689        .collect();
2690
2691        // Create harness with all five leaves but the inconsistent trie nodes.
2692        let mut harness = TrieTestHarness::new(all_storage);
2693        harness.set_trie_nodes(inconsistent_nodes);
2694
2695        // Mark key_c as dirty — it comes BEFORE the cached branch [6,a,8] in nibble order.
2696        let mut prefix_set = PrefixSetMut::default();
2697        prefix_set.insert(Nibbles::unpack(key_c));
2698
2699        // ── Verify root hash ───────────────────────────────────────────────────
2700        let trie_cursor =
2701            harness.trie_cursor_factory().storage_trie_cursor(harness.hashed_address()).unwrap();
2702        let hashed_cursor = harness
2703            .hashed_cursor_factory()
2704            .hashed_storage_cursor(harness.hashed_address())
2705            .unwrap();
2706        let mut calculator = StorageProofCalculator::new_storage(trie_cursor, hashed_cursor)
2707            .with_prefix_set(prefix_set.freeze());
2708
2709        let root_node = calculator
2710            .storage_root_node(harness.hashed_address())
2711            .expect("storage_root_node should succeed");
2712        let got_root = calculator
2713            .compute_root_hash(core::slice::from_ref(&root_node))
2714            .unwrap()
2715            .expect("should produce a root hash");
2716
2717        // With the bug, the calculator skips key_c and produces a wrong root.
2718        pretty_assertions::assert_eq!(
2719            expected_root,
2720            got_root,
2721            "Root hash should match correct trie; cached extension must not skip diverging leaves before cached branch"
2722        );
2723
2724        // ── Verify proof for key_c contains nodes on its path ──────────────────
2725        let mut targets = vec![ProofV2Target::new(key_c)];
2726        let proofs = calculator
2727            .storage_proof(harness.hashed_address(), &mut targets)
2728            .expect("storage_proof should succeed");
2729
2730        let key_c_nibbles = Nibbles::unpack(key_c);
2731        let has_matching_node = proofs.iter().any(|node| key_c_nibbles.starts_with(&node.path));
2732        assert!(
2733            has_matching_node,
2734            "Proof for key_c should contain at least one node on key_c's path, got: {proofs:?}"
2735        );
2736    }
2737
2738    #[test]
2739    fn test_skipped_parent_branch_with_unskipped_child() {
2740        reth_tracing::init_test_tracing();
2741
2742        let val = U256::from(1u64);
2743        let updated_val = U256::from(2u64);
2744
2745        // We need cached branches at [2], [2,f], and [3] in the trie DB.
2746        let key_2 = B256::right_padding_from(&[0x20]);
2747        let key_2f00 = B256::right_padding_from(&[0x2f, 0x00]);
2748        let key_2f01 = B256::right_padding_from(&[0x2f, 0x01]);
2749        let key_2f10 = B256::right_padding_from(&[0x2f, 0x10]);
2750        let key_2f11 = B256::right_padding_from(&[0x2f, 0x11]);
2751        let key_300 = B256::right_padding_from(&[0x30, 0x00]);
2752        let key_301 = B256::right_padding_from(&[0x30, 0x10]);
2753        let key_310 = B256::right_padding_from(&[0x31, 0x00]);
2754        let key_311 = B256::right_padding_from(&[0x31, 0x10]);
2755        let key_500 = B256::right_padding_from(&[0x50, 0x00]);
2756        let key_501 = B256::right_padding_from(&[0x50, 0x10]);
2757        let key_510 = B256::right_padding_from(&[0x51, 0x00]);
2758        let key_511 = B256::right_padding_from(&[0x51, 0x10]);
2759
2760        let all_keys = [
2761            key_2, key_2f00, key_2f01, key_2f10, key_2f11, key_300, key_301, key_310, key_311,
2762            key_500, key_501, key_510, key_511,
2763        ];
2764
2765        let original_storage: BTreeMap<B256, U256> = all_keys.iter().map(|k| (*k, val)).collect();
2766        let harness = TrieTestHarness::new(original_storage);
2767
2768        // Verify that the expected branches exist in the trie.
2769        let trie_updates = harness.storage_trie_updates();
2770        assert!(trie_updates.storage_nodes.contains_key(&Nibbles::from_nibbles([0x2])));
2771        assert!(trie_updates.storage_nodes.contains_key(&Nibbles::from_nibbles([0x2, 0xf])));
2772        assert!(trie_updates.storage_nodes.contains_key(&Nibbles::from_nibbles([0x3])));
2773
2774        // Change only key_2 — triggers skip of parent branch [2] while child [2,f] is not
2775        // skipped.
2776        let changeset: BTreeMap<B256, U256> = std::iter::once((key_2, updated_val)).collect();
2777        let (expected_root, _) = harness.get_root_with_updates(&changeset);
2778
2779        let mut updated_storage = harness.storage().clone();
2780        updated_storage.insert(key_2, updated_val);
2781
2782        let updated_hashed = MockHashedCursorFactory::new(
2783            BTreeMap::new(),
2784            std::iter::once((harness.hashed_address(), updated_storage)).collect(),
2785        );
2786
2787        let mut prefix_set = PrefixSetMut::default();
2788        prefix_set.insert(Nibbles::unpack(key_2));
2789
2790        let trie_cursor =
2791            harness.trie_cursor_factory().storage_trie_cursor(harness.hashed_address()).unwrap();
2792        let hashed_cursor = updated_hashed.hashed_storage_cursor(harness.hashed_address()).unwrap();
2793        let mut calculator = StorageProofCalculator::new_storage(trie_cursor, hashed_cursor)
2794            .with_prefix_set(prefix_set.freeze());
2795        let root_node = calculator
2796            .storage_root_node(harness.hashed_address())
2797            .expect("storage_root_node should succeed");
2798
2799        let got_root = calculator
2800            .compute_root_hash(&[root_node])
2801            .expect("root hash should succeed")
2802            .expect("root should get hashed");
2803        pretty_assertions::assert_eq!(expected_root, got_root);
2804    }
2805
2806    #[test]
2807    fn test_cached_hash_with_deleted_leaf() {
2808        reth_tracing::init_test_tracing();
2809
2810        // Use different values to ensure distinct leaf hashes.
2811        let val_3 = U256::from(111u64);
2812        let val_5 = U256::from(222u64);
2813        let val_8 = U256::from(333u64);
2814
2815        // Keys under a common prefix `0x6_` to create a branch at path [6].
2816        // Use second byte to distinguish short keys (so they differ after position 2).
2817        let key_63 = B256::right_padding_from(&[0x63, 0xaa]); // nibble path: 6,3,a,a,...
2818        let key_65 = B256::right_padding_from(&[0x65, 0xbb]); // nibble path: 6,5,b,b,...
2819        let key_68 = B256::right_padding_from(&[0x68, 0xcc]); // nibble path: 6,8,c,c,...
2820
2821        // Compute leaf hashes. The branch at [6] consumes 2 nibbles (the branch path [6]
2822        // plus the child nibble), so each leaf's short key starts at position 2.
2823        let leaf_hash_3 = storage_leaf_hash(&Nibbles::unpack(key_63).slice(2..), &val_3);
2824        let leaf_hash_5 = storage_leaf_hash(&Nibbles::unpack(key_65).slice(2..), &val_5);
2825        let leaf_hash_8 = storage_leaf_hash(&Nibbles::unpack(key_68).slice(2..), &val_8);
2826
2827        // Build cached branch at [6] with state_mask and hash_mask bits for nibbles 3, 5, 8.
2828        let state_mask = TrieMask::new((1 << 3) | (1 << 5) | (1 << 8));
2829        let cached_branch = BranchNodeCompact::new(
2830            state_mask,
2831            TrieMask::new(0),
2832            state_mask, // hash_mask = state_mask (all children have cached hashes)
2833            vec![leaf_hash_3, leaf_hash_5, leaf_hash_8],
2834            None,
2835        );
2836
2837        let storage_nodes: BTreeMap<Nibbles, BranchNodeCompact> =
2838            std::iter::once((Nibbles::from_nibbles([0x6]), cached_branch)).collect();
2839
2840        // Compute the expected root from a fresh trie with just key_65 and key_68.
2841        let mut harness =
2842            TrieTestHarness::new([(key_65, val_5), (key_68, val_8)].into_iter().collect());
2843        let expected_root = harness.original_root();
2844
2845        // Update the harness with a cached trie node which will reference key_63 by hash.
2846        harness.set_trie_nodes(storage_nodes);
2847
2848        // Mark key_63 as dirty in the prefix set — in the real scenario the leaf was
2849        // deleted and the HashedPostState overlay masks it out.
2850        let mut prefix_set = PrefixSetMut::default();
2851        prefix_set.insert(Nibbles::unpack(key_63));
2852
2853        // Request a proof for key_63 (absence proof — no leaf exists).
2854        // Because the prefix set marks nibble 3's child path as dirty, the cached hash for
2855        // nibble 3 is skipped.
2856        let mut targets = vec![ProofV2Target::new(key_63)];
2857
2858        let trie_cursor =
2859            harness.trie_cursor_factory().storage_trie_cursor(harness.hashed_address()).unwrap();
2860        let hashed_cursor = harness
2861            .hashed_cursor_factory()
2862            .hashed_storage_cursor(harness.hashed_address())
2863            .unwrap();
2864        let mut calculator = StorageProofCalculator::new_storage(trie_cursor, hashed_cursor)
2865            .with_prefix_set(prefix_set.freeze());
2866
2867        let proofs = calculator
2868            .storage_proof(harness.hashed_address(), &mut targets)
2869            .expect("storage_proof should succeed");
2870        assert_eq!(1, proofs.len());
2871        let got_root = calculator
2872            .compute_root_hash(&proofs)
2873            .expect("compute_root_hash should succeed")
2874            .expect("should produce a root hash (proof contains root node)");
2875
2876        // With the bug, nibble 5 gets hashes[0] (nibble 3's hash) and nibble 8 gets
2877        // hashes[1] (nibble 5's hash), producing a wrong root.
2878        pretty_assertions::assert_eq!(
2879            expected_root,
2880            got_root,
2881            "Root hash should match trie without key_63; cached hash index is off when \
2882             an earlier hashed child has no leaves (absence proof target)"
2883        );
2884    }
2885}