Skip to main content

reth_trie/proof_v2/
mod.rs

1//! Proof calculation version 2: Leaf-only implementation.
2//!
3//! This module provides a rewritten proof calculator that:
4//! - Uses only leaf data (HashedAccounts/Storages) to generate proofs
5//! - Returns proof nodes sorted lexicographically by path
6//! - Automatically resets after each calculation
7//! - Re-uses cursors across calculations
8//! - Supports generic value types with lazy evaluation
9
10use crate::{
11    hashed_cursor::{HashedCursor, HashedStorageCursor},
12    trie_cursor::{depth_first, TrieCursor, TrieStorageCursor},
13};
14use alloy_primitives::{keccak256, B256, U256};
15use alloy_rlp::Encodable;
16use alloy_trie::{BranchNodeCompact, TrieMask};
17use reth_execution_errors::trie::StateProofError;
18use reth_trie_common::{
19    BranchNodeMasks, BranchNodeRef, BranchNodeV2, Nibbles, ProofTrieNodeV2, ProofV2Target, RlpNode,
20    TrieNodeV2,
21};
22use std::cmp::Ordering;
23use tracing::{error, instrument, trace};
24
25mod value;
26pub use value::*;
27
28mod node;
29use node::*;
30
31mod target;
32pub(crate) use target::*;
33
34/// Target to use with the `tracing` crate.
35static TRACE_TARGET: &str = "trie::proof_v2";
36
37/// Number of bytes to pre-allocate for [`ProofCalculator`]'s `rlp_encode_buf` field.
38const RLP_ENCODE_BUF_SIZE: usize = 1024;
39
40/// A proof calculator that generates merkle proofs using only leaf data.
41///
42/// The calculator:
43/// - Accepts one or more B256 proof targets sorted lexicographically
44/// - Returns proof nodes sorted lexicographically by path
45/// - Automatically resets after each calculation
46/// - Re-uses cursors from one calculation to the next
47#[derive(Debug)]
48pub struct ProofCalculator<TC, HC, VE: LeafValueEncoder> {
49    /// Trie cursor for traversing stored branch nodes.
50    trie_cursor: TC,
51    /// Hashed cursor for iterating over leaf data.
52    hashed_cursor: HC,
53    /// Branches which are currently in the process of being constructed, each being a child of
54    /// the previous one.
55    branch_stack: Vec<ProofTrieBranch>,
56    /// The path of the last branch in `branch_stack`.
57    branch_path: Nibbles,
58    /// Children of branches in the `branch_stack`.
59    ///
60    /// Each branch in `branch_stack` tracks which children are in this stack using its
61    /// `state_mask`; the number of children the branch has in this stack is equal to the number of
62    /// bits set in its `state_mask`.
63    ///
64    /// The children for the bottom branch in `branch_stack` are found at the bottom of this stack,
65    /// and so on. When a branch is removed from `branch_stack` its children are removed from this
66    /// one, and the branch is pushed onto this stack in their place (see [`Self::pop_branch`].
67    ///
68    /// Children on the `child_stack` are converted to [`ProofTrieBranchChild::RlpNode`]s via the
69    /// [`Self::commit_child`] method. Committing a child indicates that no further changes are
70    /// expected to happen to it (e.g. splitting its short key when inserting a new branch). Given
71    /// that keys are consumed in lexicographical order, only the last child on the stack can
72    /// ever be modified, and therefore all children besides the last are expected to be
73    /// [`ProofTrieBranchChild::RlpNode`]s.
74    child_stack: Vec<ProofTrieBranchChild<VE::DeferredEncoder>>,
75    /// Cached branch data pulled from the `trie_cursor`. The calculator will use the cached
76    /// [`BranchNodeCompact::hashes`] to skip over the calculation of sub-tries in the overall
77    /// trie. The cached hashes cannot be used for any paths which are prefixes of a proof target.
78    cached_branch_stack: Vec<(Nibbles, BranchNodeCompact)>,
79    /// The proofs which will be returned from the calculation. This gets taken at the end of every
80    /// proof call.
81    retained_proofs: Vec<ProofTrieNodeV2>,
82    /// Free-list of re-usable buffers of [`RlpNode`]s, used for encoding branch nodes to RLP.
83    ///
84    /// We are generally able to re-use these buffers across different branch nodes for the
85    /// duration of a proof calculation, but occasionally we will lose one when a branch
86    /// node is returned as a `ProofTrieNode`.
87    rlp_nodes_bufs: Vec<Vec<RlpNode>>,
88    /// Re-usable byte buffer, used for RLP encoding.
89    rlp_encode_buf: Vec<u8>,
90}
91
92impl<TC, HC, VE: LeafValueEncoder> ProofCalculator<TC, HC, VE> {
93    /// Create a new [`ProofCalculator`] instance for calculating account proofs.
94    pub fn new(trie_cursor: TC, hashed_cursor: HC) -> Self {
95        Self {
96            trie_cursor,
97            hashed_cursor,
98            branch_stack: Vec::<_>::with_capacity(64),
99            branch_path: Nibbles::new(),
100            child_stack: Vec::<_>::with_capacity(64),
101            cached_branch_stack: Vec::<_>::with_capacity(64),
102            retained_proofs: Vec::<_>::with_capacity(32),
103            rlp_nodes_bufs: Vec::<_>::with_capacity(8),
104            rlp_encode_buf: Vec::<_>::with_capacity(RLP_ENCODE_BUF_SIZE),
105        }
106    }
107}
108
109impl<TC, HC, VE> ProofCalculator<TC, HC, VE>
110where
111    TC: TrieCursor,
112    HC: HashedCursor,
113    VE: LeafValueEncoder<Value = HC::Value>,
114{
115    /// Takes a re-usable `RlpNode` buffer from the internal free-list, or allocates a new one if
116    /// the free-list is empty.
117    ///
118    /// The returned Vec will have a length of zero.
119    fn take_rlp_nodes_buf(&mut self) -> Vec<RlpNode> {
120        self.rlp_nodes_bufs
121            .pop()
122            .map(|mut buf| {
123                buf.clear();
124                buf
125            })
126            .unwrap_or_else(|| Vec::with_capacity(16))
127    }
128
129    // Returns zero if `branch_stack` is empty, one otherwise.
130    //
131    // This is used when working with the `ext_len` field of [`ProofTrieBranch`]. The `ext_len` is
132    // calculated by taking the difference of the current `branch_path` and the new branch's path;
133    // if the new branch has a parent branch (ie `branch_stack` is not empty) then 1 is subtracted
134    // from the `ext_len` to account for the child's nibble on the parent.
135    #[inline]
136    const fn maybe_parent_nibble(&self) -> usize {
137        !self.branch_stack.is_empty() as usize
138    }
139
140    /// Returns true if the proof of a node at the given path should be retained. A node is retained
141    /// if its path is a prefix of any target.
142    ///
143    /// This may move the `targets` iterator forward if the given path comes after the current
144    /// target.
145    ///
146    /// This method takes advantage of the [`std::slice::Iter`] component of [`TargetsCursor`] to
147    /// check the minimum number of targets. In general it looks at a current target and the next
148    /// target simultaneously, forming an end-exclusive range.
149    ///
150    /// ```text
151    /// * Given targets: [ 0x012, 0x045, 0x678 ]
152    /// * targets.current() returns:
153    ///     - (0x012, Some(0x045)): covers (0x012..0x045)
154    ///     - (0x045, Some(0x678)): covers (0x045..0x678)
155    ///     - (0x678, None): covers (0x678..)
156    /// ```
157    ///
158    /// As long as the path which is passed in lies within that range we can continue to use the
159    /// current target. Once the path goes beyond that range (ie path >= next target) then we can be
160    /// sure that no further paths will be in the range, and we can iterate forward.
161    ///
162    /// ```text
163    /// * Given:
164    ///     - path: 0x04
165    ///     - targets.current() returns (0x012, Some(0x045))
166    ///
167    /// * 0x04 comes _after_ 0x045 in depth-first order, so (0x012..0x045) does not contain 0x04.
168    ///
169    /// * targets.next() is called.
170    ///
171    /// * targets.current() now returns (0x045, Some(0x678)). This does contain 0x04.
172    ///
173    /// * 0x04 is a prefix of 0x045, and so is retained.
174    /// ```
175    fn should_retain<'a>(
176        &self,
177        targets: &mut Option<TargetsCursor<'a>>,
178        path: &Nibbles,
179        check_min_len: bool,
180    ) -> bool {
181        // If no targets are given then we never retain anything
182        let Some(targets) = targets.as_mut() else { return false };
183
184        let (mut lower, mut upper) = targets.current();
185
186        trace!(target: TRACE_TARGET, ?path, target = ?lower, "should_retain: called");
187        debug_assert!(self.retained_proofs.last().is_none_or(
188                |ProofTrieNodeV2 { path: last_retained_path, .. }| {
189                    depth_first::cmp(path, last_retained_path) == Ordering::Greater
190                }
191            ),
192            "should_retain called with path {path:?} which is not after previously retained node {:?} in depth-first order",
193            self.retained_proofs.last().map(|n| n.path),
194        );
195
196        loop {
197            // If the node in question is a prefix of the target then we do not iterate targets
198            // further.
199            //
200            // Even if the node is a prefix of the target's key, if the target has a non-zero
201            // `min_len` it indicates that the node should only be retained if it is
202            // longer than that value.
203            //
204            // _However_ even if the node doesn't match the target due to the target's `min_len`, it
205            // may match other targets whose keys match this node. So we search forwards and
206            // backwards for all targets which might match this node, and check against the
207            // `min_len` of each.
208            //
209            // For example, given a branch 0xabc, with children at 0, 1, and 2, and targets:
210            // - key: 0xabc0, min_len: 2
211            // - key: 0xabc1, min_len: 1
212            // - key: 0xabc2, min_len: 4 <-- current
213            // - key: 0xabc3, min_len: 3
214            //
215            // When the branch node at 0xabc is visited it will be after the targets has iterated
216            // forward to 0xabc2 (because all children will have been visited already). At this
217            // point the target for 0xabc2 will not match the branch due to its prefix, but any of
218            // the other targets would, so we need to check those as well.
219            if lower.key_nibbles.starts_with(path) {
220                return !check_min_len ||
221                    (path.len() >= lower.min_len as usize ||
222                        targets
223                            .skip_iter()
224                            .take_while(|target| target.key_nibbles.starts_with(path))
225                            .any(|target| path.len() >= target.min_len as usize) ||
226                        targets
227                            .rev_iter()
228                            .take_while(|target| target.key_nibbles.starts_with(path))
229                            .any(|target| path.len() >= target.min_len as usize))
230            }
231
232            // If the path isn't in the current range then iterate forward until it is (or until
233            // there is no upper bound, indicating unbounded).
234            if upper
235                .is_some_and(|upper| depth_first::cmp(path, &upper.key_nibbles) != Ordering::Less)
236            {
237                (lower, upper) = targets.next();
238                trace!(target: TRACE_TARGET, target = ?lower, "upper target <= path, next target");
239            } else {
240                return false
241            }
242        }
243    }
244
245    /// Takes a child which has been removed from the `child_stack` and converts it to an
246    /// [`RlpNode`].
247    ///
248    /// Calling this method indicates that the child will not undergo any further modifications, and
249    /// therefore can be retained as a proof node if applicable.
250    fn commit_child<'a>(
251        &mut self,
252        targets: &mut Option<TargetsCursor<'a>>,
253        child_path: Nibbles,
254        child: ProofTrieBranchChild<VE::DeferredEncoder>,
255    ) -> Result<RlpNode, StateProofError> {
256        // If the child is already an `RlpNode` then there is nothing to do.
257        if let ProofTrieBranchChild::RlpNode(rlp_node) = child {
258            return Ok(rlp_node)
259        }
260
261        // If we should retain the child then do so.
262        if self.should_retain(targets, &child_path, true) {
263            trace!(target: TRACE_TARGET, ?child_path, "Retaining child");
264
265            // Convert to `ProofTrieNodeV2`, which will be what is retained.
266            //
267            // If this node is a branch then its `rlp_nodes_buf` will be taken and not returned to
268            // the `rlp_nodes_bufs` free-list.
269            self.rlp_encode_buf.clear();
270            let proof_node = child.into_proof_trie_node(child_path, &mut self.rlp_encode_buf)?;
271
272            // Use the `ProofTrieNodeV2` to encode the `RlpNode`, and then push it onto retained
273            // nodes before returning.
274            self.rlp_encode_buf.clear();
275            proof_node.node.encode(&mut self.rlp_encode_buf);
276
277            self.retained_proofs.push(proof_node);
278            return Ok(RlpNode::from_rlp(&self.rlp_encode_buf));
279        }
280
281        // If the child path is not being retained then we convert directly to an `RlpNode`
282        // using `into_rlp`. Since we are not retaining the node we can recover any `RlpNode`
283        // buffers for the free-list here, hence why we do this as a separate logical branch.
284        self.rlp_encode_buf.clear();
285        let (child_rlp_node, freed_rlp_nodes_buf) = child.into_rlp(&mut self.rlp_encode_buf)?;
286
287        // If there is an `RlpNode` buffer which can be re-used then push it onto the free-list.
288        if let Some(buf) = freed_rlp_nodes_buf {
289            self.rlp_nodes_bufs.push(buf);
290        }
291
292        Ok(child_rlp_node)
293    }
294
295    /// Returns the path of the child of the currently under-construction branch at the given
296    /// nibble.
297    #[inline]
298    fn child_path_at(&self, nibble: u8) -> Nibbles {
299        let mut child_path = self.branch_path;
300        debug_assert!(child_path.len() < 64);
301        child_path.push_unchecked(nibble);
302        child_path
303    }
304
305    /// Returns index of the highest nibble which is set in the mask.
306    ///
307    /// # Panics
308    ///
309    /// Will panic in debug mode if the mask is empty.
310    #[inline]
311    fn highest_set_nibble(mask: TrieMask) -> u8 {
312        debug_assert!(!mask.is_empty());
313        (u16::BITS - mask.leading_zeros() - 1) as u8
314    }
315
316    /// Returns the path of the child on top of the `child_stack`, or the root path if the stack is
317    /// empty. Returns None if the current branch has not yet pushed a child (empty `state_mask`).
318    fn last_child_path(&self) -> Option<Nibbles> {
319        // If there is no branch under construction then the top child must be the root child.
320        let Some(branch) = self.branch_stack.last() else {
321            return Some(Nibbles::new());
322        };
323
324        (!branch.state_mask.is_empty())
325            .then(|| self.child_path_at(Self::highest_set_nibble(branch.state_mask)))
326    }
327
328    /// Calls [`Self::commit_child`] on the last child of `child_stack`, replacing it with a
329    /// [`ProofTrieBranchChild::RlpNode`].
330    ///
331    /// If `child_stack` is empty then this is a no-op.
332    ///
333    /// NOTE that this method call relies on the `state_mask` of the top branch of the
334    /// `branch_stack` to determine the last child's path. When committing the last child prior to
335    /// pushing a new child, it's important to set the new child's `state_mask` bit _after_ the call
336    /// to this method.
337    fn commit_last_child<'a>(
338        &mut self,
339        targets: &mut Option<TargetsCursor<'a>>,
340    ) -> Result<(), StateProofError> {
341        let Some(child_path) = self.last_child_path() else { return Ok(()) };
342        let child =
343            self.child_stack.pop().expect("child_stack can't be empty if there's a child path");
344
345        // If the child is already an `RlpNode` then there is nothing to do, push it back on with no
346        // changes.
347        if let ProofTrieBranchChild::RlpNode(_) = child {
348            self.child_stack.push(child);
349            return Ok(())
350        }
351
352        // Only commit immediately if retained for the proof. Otherwise, defer conversion
353        // to pop_branch() to give DeferredEncoder time for async work.
354        if self.should_retain(targets, &child_path, true) {
355            let child_rlp_node = self.commit_child(targets, child_path, child)?;
356            self.child_stack.push(ProofTrieBranchChild::RlpNode(child_rlp_node));
357        } else {
358            self.child_stack.push(child);
359        }
360
361        Ok(())
362    }
363
364    /// Creates a new leaf node on a branch, setting its `state_mask` bit and pushing the leaf onto
365    /// the `child_stack`.
366    ///
367    /// # Panics
368    ///
369    /// - If `branch_stack` is empty
370    /// - If the leaf's nibble is already set in the branch's `state_mask`.
371    fn push_new_leaf<'a>(
372        &mut self,
373        targets: &mut Option<TargetsCursor<'a>>,
374        leaf_nibble: u8,
375        leaf_short_key: Nibbles,
376        leaf_val: VE::DeferredEncoder,
377    ) -> Result<(), StateProofError> {
378        // Before pushing the new leaf onto the `child_stack` we need to commit the previous last
379        // child, so that only `child_stack`'s final child is a non-RlpNode.
380        self.commit_last_child(targets)?;
381
382        // Once the last child is committed we set the new child's bit on the top branch's
383        // `state_mask` and push that new child.
384        let branch = self.branch_stack.last_mut().expect("branch_stack cannot be empty");
385
386        debug_assert!(!branch.state_mask.is_bit_set(leaf_nibble));
387        branch.state_mask.set_bit(leaf_nibble);
388
389        self.child_stack
390            .push(ProofTrieBranchChild::Leaf { short_key: leaf_short_key, value: leaf_val });
391
392        Ok(())
393    }
394
395    /// Pushes a new branch onto the `branch_stack` based on the path and short key of the last
396    /// child on the `child_stack` and the path of the next child which will be pushed on to the
397    /// stack after this call.
398    ///
399    /// Returns the nibble of the branch's `state_mask` which should be set for the new child, and
400    /// short key that the next child should use.
401    fn push_new_branch(&mut self, new_child_path: Nibbles) -> (u8, Nibbles) {
402        // First determine the new child's shortkey relative to the current branch. If there is no
403        // current branch then the short key is the full path.
404        let new_child_short_key = if self.branch_stack.is_empty() {
405            new_child_path
406        } else {
407            // When there is a current branch then trim off its path as well as the nibble that it
408            // has set for this leaf.
409            trim_nibbles_prefix(&new_child_path, self.branch_path.len() + 1)
410        };
411
412        // Get the new branch's first child, which is the child on the top of the stack with which
413        // the new child shares the same nibble on the current branch.
414        let first_child = self
415            .child_stack
416            .last_mut()
417            .expect("push_new_branch can't be called with empty child_stack");
418
419        let first_child_short_key = first_child.short_key();
420        debug_assert!(
421            !first_child_short_key.is_empty(),
422            "push_new_branch called when top child on stack is not a leaf or extension with a short key",
423        );
424
425        // Determine how many nibbles are shared between the new branch's first child and the new
426        // child. This common prefix will be the extension of the new branch
427        let common_prefix_len = first_child_short_key.common_prefix_length(&new_child_short_key);
428
429        // Trim off the common prefix from the first child's short key, plus one nibble which will
430        // stored by the new branch itself in its state mask.
431        let first_child_nibble = first_child_short_key.get_unchecked(common_prefix_len);
432        first_child.trim_short_key_prefix(common_prefix_len + 1);
433
434        // Similarly, trim off the common prefix, plus one nibble for the new branch, from the new
435        // child's short key.
436        let new_child_nibble = new_child_short_key.get_unchecked(common_prefix_len);
437        let new_child_short_key = trim_nibbles_prefix(&new_child_short_key, common_prefix_len + 1);
438
439        // Update the branch path to reflect the new branch about to be pushed. Its path will be
440        // the path of the previous branch, plus the nibble shared by each child, plus the parent
441        // extension (denoted by a non-zero `ext_len`). Since the new branch's path is a prefix of
442        // the original new_child_path we can just slice that.
443        //
444        // If the new branch is the first branch then we do not add the extra 1, as there is no
445        // nibble in a parent branch to account for.
446        let branch_path_len =
447            self.branch_path.len() + common_prefix_len + self.maybe_parent_nibble();
448        self.branch_path = new_child_path.slice_unchecked(0, branch_path_len);
449
450        // Push the new branch onto the `branch_stack`. We do not yet set the `state_mask` bit of
451        // the new child; whatever actually pushes the child onto the `child_stack` is expected to
452        // do that.
453        self.branch_stack.push(ProofTrieBranch {
454            ext_len: common_prefix_len as u8,
455            state_mask: TrieMask::new(1 << first_child_nibble),
456            masks: None,
457        });
458
459        trace!(
460            target: TRACE_TARGET,
461            ?new_child_path,
462            ?common_prefix_len,
463            ?first_child_nibble,
464            branch_path = ?self.branch_path,
465            "Pushed new branch",
466        );
467
468        (new_child_nibble, new_child_short_key)
469    }
470
471    /// Pops the top branch off of the `branch_stack`, hashes its children on the `child_stack`, and
472    /// replaces those children on the `child_stack`. The `branch_path` field will be updated
473    /// accordingly.
474    ///
475    /// # Panics
476    ///
477    /// This method panics if `branch_stack` is empty.
478    fn pop_branch<'a>(
479        &mut self,
480        targets: &mut Option<TargetsCursor<'a>>,
481    ) -> Result<(), StateProofError> {
482        trace!(
483            target: TRACE_TARGET,
484            branch = ?self.branch_stack.last(),
485            branch_path = ?self.branch_path,
486            child_stack_len = ?self.child_stack.len(),
487            "pop_branch: called",
488        );
489
490        // Ensure the final child on the child stack has been committed, as this method expects all
491        // children of the branch to have been committed.
492        self.commit_last_child(targets)?;
493
494        let mut rlp_nodes_buf = self.take_rlp_nodes_buf();
495        let branch = self.branch_stack.pop().expect("branch_stack cannot be empty");
496
497        // Take the branch's children off the stack, using the state mask to determine how many
498        // there are.
499        let num_children = branch.state_mask.count_ones() as usize;
500        debug_assert!(num_children > 1, "A branch must have at least two children");
501        debug_assert!(
502            self.child_stack.len() >= num_children,
503            "Stack is missing necessary children ({num_children:?})"
504        );
505
506        // Collect children into RlpNode Vec. Children are in lexicographic order.
507        for child in self.child_stack.drain(self.child_stack.len() - num_children..) {
508            let child_rlp_node = match child {
509                ProofTrieBranchChild::RlpNode(rlp_node) => rlp_node,
510                uncommitted_child => {
511                    // Convert uncommitted child (not retained for proof) to RlpNode now.
512                    self.rlp_encode_buf.clear();
513                    let (rlp_node, freed_buf) =
514                        uncommitted_child.into_rlp(&mut self.rlp_encode_buf)?;
515                    if let Some(buf) = freed_buf {
516                        self.rlp_nodes_bufs.push(buf);
517                    }
518                    rlp_node
519                }
520            };
521            rlp_nodes_buf.push(child_rlp_node);
522        }
523
524        debug_assert_eq!(
525            rlp_nodes_buf.len(),
526            branch.state_mask.count_ones() as usize,
527            "children length must match number of bits set in state_mask"
528        );
529
530        // Calculate the short key of the parent extension (if the branch has a parent extension).
531        // It's important to calculate this short key prior to modifying the `branch_path`.
532        let short_key = trim_nibbles_prefix(
533            &self.branch_path,
534            self.branch_path.len() - branch.ext_len as usize,
535        );
536
537        // Compute hash for the branch node if it has a parent extension.
538        let rlp_node = if short_key.is_empty() {
539            None
540        } else {
541            self.rlp_encode_buf.clear();
542            BranchNodeRef::new(&rlp_nodes_buf, branch.state_mask).encode(&mut self.rlp_encode_buf);
543            Some(RlpNode::from_rlp(&self.rlp_encode_buf))
544        };
545
546        // Wrap the `BranchNodeV2` so it can be pushed onto the child stack.
547        let branch_as_child = ProofTrieBranchChild::Branch {
548            node: BranchNodeV2::new(short_key, rlp_nodes_buf, branch.state_mask, rlp_node),
549            masks: branch.masks,
550        };
551
552        self.child_stack.push(branch_as_child);
553
554        // Update the branch_path. If this branch is the only branch then only its extension needs
555        // to be trimmed, otherwise we also need to remove its nibble from its parent.
556        let new_path_len =
557            self.branch_path.len() - branch.ext_len as usize - self.maybe_parent_nibble();
558
559        debug_assert!(self.branch_path.len() >= new_path_len);
560        self.branch_path = self.branch_path.slice_unchecked(0, new_path_len);
561
562        Ok(())
563    }
564
565    /// Adds a single leaf for a key to the stack, possibly collapsing an existing branch and/or
566    /// creating a new one depending on the path of the key.
567    fn push_leaf<'a>(
568        &mut self,
569        targets: &mut Option<TargetsCursor<'a>>,
570        key: Nibbles,
571        val: VE::DeferredEncoder,
572    ) -> Result<(), StateProofError> {
573        loop {
574            trace!(
575                target: TRACE_TARGET,
576                ?key,
577                branch_stack_len = ?self.branch_stack.len(),
578                branch_path = ?self.branch_path,
579                child_stack_len = ?self.child_stack.len(),
580                "push_leaf: loop",
581            );
582
583            // Get the `state_mask` of the branch currently being built. If there are no branches
584            // on the stack then it means either the trie is empty or only a single leaf has been
585            // added previously.
586            let curr_branch_state_mask = match self.branch_stack.last() {
587                Some(curr_branch) => curr_branch.state_mask,
588                None if self.child_stack.is_empty() => {
589                    // If the child stack is empty then this is the first leaf, push it and be done
590                    self.child_stack
591                        .push(ProofTrieBranchChild::Leaf { short_key: key, value: val });
592                    return Ok(())
593                }
594                None => {
595                    // If the child stack is not empty then it must only have a single other child
596                    // which is either a leaf or extension with a non-zero short key.
597                    debug_assert_eq!(self.child_stack.len(), 1);
598                    debug_assert!(!self
599                        .child_stack
600                        .last()
601                        .expect("already checked for emptiness")
602                        .short_key()
603                        .is_empty());
604                    let (nibble, short_key) = self.push_new_branch(key);
605                    self.push_new_leaf(targets, nibble, short_key, val)?;
606                    return Ok(())
607                }
608            };
609
610            // Find the common prefix length, which is the number of nibbles shared between the
611            // current branch and the key.
612            let common_prefix_len = self.branch_path.common_prefix_length(&key);
613
614            // If the current branch does not share all of its nibbles with the new key then it is
615            // not the parent of the new key. In this case the current branch will have no more
616            // children. We can pop it and loop back to the top to try again with its parent branch.
617            if common_prefix_len < self.branch_path.len() {
618                self.pop_branch(targets)?;
619                continue
620            }
621
622            // If the current branch is a prefix of the new key then the leaf is a child of the
623            // branch. If the branch doesn't have the leaf's nibble set then the leaf can be added
624            // directly, otherwise a new branch must be created in-between this branch and that
625            // existing child.
626            let nibble = key.get_unchecked(common_prefix_len);
627            if curr_branch_state_mask.is_bit_set(nibble) {
628                // Push a new branch which splits the short key of the existing child at this
629                // nibble.
630                let (nibble, short_key) = self.push_new_branch(key);
631                // Push the new leaf onto the new branch.
632                self.push_new_leaf(targets, nibble, short_key, val)?;
633            } else {
634                let short_key = key.slice_unchecked(common_prefix_len + 1, key.len());
635                self.push_new_leaf(targets, nibble, short_key, val)?;
636            }
637
638            return Ok(())
639        }
640    }
641
642    /// Given the lower and upper bounds (exclusive) of a range of keys, iterates over the
643    /// `hashed_cursor` and calculates all trie nodes possible based on those keys. If the upper
644    /// bound is None then it is considered unbounded.
645    ///
646    /// It is expected that this method is "driven" by `next_uncached_key_range`, which decides
647    /// which ranges of keys need to be calculated based on what cached trie data is available.
648    #[instrument(
649        target = TRACE_TARGET,
650        level = "trace",
651        skip_all,
652        fields(?lower_bound, ?upper_bound),
653    )]
654    fn calculate_key_range<'a>(
655        &mut self,
656        value_encoder: &mut VE,
657        targets: &mut Option<TargetsCursor<'a>>,
658        hashed_cursor_current: &mut Option<(Nibbles, VE::DeferredEncoder)>,
659        lower_bound: Nibbles,
660        upper_bound: Option<Nibbles>,
661    ) -> Result<(), StateProofError> {
662        // A helper closure for mapping entries returned from the `hashed_cursor`, converting the
663        // key to Nibbles and immediately creating the DeferredValueEncoder so that encoding of the
664        // leaf value can begin ASAP.
665        let mut map_hashed_cursor_entry = |(key_b256, val): (B256, _)| {
666            debug_assert_eq!(key_b256.len(), 32);
667            let key = Nibbles::unpack_array(key_b256.as_ref());
668            let val = value_encoder.deferred_encoder(key_b256, val);
669            (key, val)
670        };
671
672        // If the cursor hasn't been used, or the last iterated key is prior to this range's
673        // key range, then seek forward to at least the first key.
674        if hashed_cursor_current.as_ref().is_none_or(|(key, _)| key < &lower_bound) {
675            trace!(
676                target: TRACE_TARGET,
677                current=?hashed_cursor_current.as_ref().map(|(k, _)| k),
678                "Seeking hashed cursor to meet lower bound",
679            );
680
681            let lower_key = B256::right_padding_from(&lower_bound.pack());
682            *hashed_cursor_current =
683                self.hashed_cursor.seek(lower_key)?.map(&mut map_hashed_cursor_entry);
684        }
685
686        // Loop over all keys in the range, calling `push_leaf` on each.
687        while let Some((key, _)) = hashed_cursor_current.as_ref() &&
688            upper_bound.is_none_or(|upper_bound| key < &upper_bound)
689        {
690            let (key, val) =
691                core::mem::take(hashed_cursor_current).expect("while-let checks for Some");
692            self.push_leaf(targets, key, val)?;
693            *hashed_cursor_current = self.hashed_cursor.next()?.map(&mut map_hashed_cursor_entry);
694        }
695
696        trace!(target: TRACE_TARGET, "No further keys within range");
697        Ok(())
698    }
699
700    /// Constructs and returns a new [`ProofTrieBranch`] based on an existing [`BranchNodeCompact`].
701    #[inline]
702    const fn new_from_cached_branch(
703        cached_branch: &BranchNodeCompact,
704        ext_len: u8,
705    ) -> ProofTrieBranch {
706        ProofTrieBranch {
707            ext_len,
708            state_mask: TrieMask::new(0),
709            masks: Some(BranchNodeMasks {
710                tree_mask: cached_branch.tree_mask,
711                hash_mask: cached_branch.hash_mask,
712            }),
713        }
714    }
715
716    /// Pushes a new branch onto the `branch_stack` which is based on a cached branch obtained via
717    /// the trie cursor.
718    ///
719    /// If there is already a child at the top branch of `branch_stack` occupying this new branch's
720    /// nibble then that child will have its short-key split with another new branch, and this
721    /// cached branch will be a child of that splitting branch.
722    fn push_cached_branch<'a>(
723        &mut self,
724        targets: &mut Option<TargetsCursor<'a>>,
725        cached_path: Nibbles,
726        cached_branch: &BranchNodeCompact,
727    ) -> Result<(), StateProofError> {
728        debug_assert!(
729            cached_path.starts_with(&self.branch_path),
730            "push_cached_branch called with path {cached_path:?} which is not a child of current branch {:?}",
731            self.branch_path,
732        );
733
734        let parent_branch = self.branch_stack.last();
735
736        // If both stacks are empty then there were no leaves before this cached branch, push it and
737        // be done; the extension of the branch will be its full path.
738        if self.child_stack.is_empty() && parent_branch.is_none() {
739            self.branch_path = cached_path;
740            self.branch_stack
741                .push(Self::new_from_cached_branch(cached_branch, cached_path.len() as u8));
742            return Ok(())
743        }
744
745        // Get the nibble which should be set in the parent branch's `state_mask` for this new
746        // branch.
747        let cached_branch_nibble = cached_path.get_unchecked(self.branch_path.len());
748
749        // We calculate the `ext_len` of the new branch, and potentially update its nibble if a new
750        // parent branch is inserted here, based on the state of the parent branch.
751        let (cached_branch_nibble, ext_len) = if parent_branch
752            .is_none_or(|parent_branch| parent_branch.state_mask.is_bit_set(cached_branch_nibble))
753        {
754            // If the `child_stack` is not empty but the `branch_stack` is then it implies that
755            // there must be a leaf or extension at the root of the trie whose short-key will get
756            // split by a new branch, which will become the parent of both that leaf/extension and
757            // this new branch.
758            //
759            // Similarly, if there is a branch on the `branch_stack` but its `state_mask` bit for
760            // this new branch is already set, then there must be a leaf/extension with a short-key
761            // to be split.
762            debug_assert!(!self
763                .child_stack
764                .last()
765                .expect("already checked for emptiness")
766                .short_key()
767                .is_empty());
768
769            // Split that leaf/extension's short key with a new branch.
770            let (nibble, short_key) = self.push_new_branch(cached_path);
771            (nibble, short_key.len())
772        } else {
773            // If there is a parent branch but its `state_mask` bit for this branch is not set
774            // then we can simply calculate the `ext_len` based on the difference of each, minus
775            // 1 to account for the nibble in the `state_mask`.
776            (cached_branch_nibble, cached_path.len() - self.branch_path.len() - 1)
777        };
778
779        // `commit_last_child` relies on the last set bit of the parent branch's `state_mask` to
780        // determine the path of the last child on the `child_stack`. Since we are about to
781        // change that mask we need to commit that last child first.
782        self.commit_last_child(targets)?;
783
784        // When pushing a new branch we need to set its child nibble in the `state_mask` of
785        // its parent, if there is one.
786        if let Some(parent_branch) = self.branch_stack.last_mut() {
787            parent_branch.state_mask.set_bit(cached_branch_nibble);
788        }
789
790        // Finally update the `branch_path` and push the new branch.
791        self.branch_path = cached_path;
792        self.branch_stack.push(Self::new_from_cached_branch(cached_branch, ext_len as u8));
793
794        trace!(
795            target: TRACE_TARGET,
796            branch=?self.branch_stack.last(),
797            branch_path=?self.branch_path,
798            "Pushed cached branch",
799        );
800
801        Ok(())
802    }
803
804    /// Attempts to pop off the top branch of the `cached_branch_stack`, returning
805    /// [`PopCachedBranchOutcome::Popped`] on success. Returns other variants to indicate that the
806    /// stack is empty and what to do about it.
807    ///
808    /// This method only returns [`PopCachedBranchOutcome::CalculateLeaves`] if there is a cached
809    /// branch on top of the stack.
810    #[inline]
811    fn try_pop_cached_branch(
812        &mut self,
813        trie_cursor_state: &mut TrieCursorState,
814        sub_trie_prefix: &Nibbles,
815        uncalculated_lower_bound: &Option<Nibbles>,
816    ) -> Result<PopCachedBranchOutcome, StateProofError> {
817        // If there is a branch on top of the stack we use that.
818        if let Some(cached) = self.cached_branch_stack.pop() {
819            return Ok(PopCachedBranchOutcome::Popped(cached));
820        }
821
822        // There is no cached branch on the stack. It's possible that another one exists
823        // farther on in the trie, but we perform some checks first to prevent unnecessary
824        // attempts to find it.
825
826        // If the `uncalculated_lower_bound` is None it indicates that there can be no more
827        // leaf data, so similarly there can be no more branches.
828        let Some(uncalculated_lower_bound) = uncalculated_lower_bound else {
829            return Ok(PopCachedBranchOutcome::Exhausted)
830        };
831
832        // If [`TrieCursorState::path`] returns None it means that the cursor has been
833        // exhausted, so there can be no more cached data.
834        let Some(mut trie_cursor_path) = trie_cursor_state.path() else {
835            return Ok(PopCachedBranchOutcome::Exhausted)
836        };
837
838        // If the trie cursor is seeked to a branch whose leaves have already been processed
839        // then we can't use it, instead we seek forward and try again.
840        if trie_cursor_path < uncalculated_lower_bound {
841            *trie_cursor_state =
842                TrieCursorState::seeked(self.trie_cursor.seek(*uncalculated_lower_bound)?);
843
844            // Having just seeked forward we need to check if the cursor is now exhausted,
845            // extracting the new path at the same time.
846            if let Some(new_trie_cursor_path) = trie_cursor_state.path() {
847                trie_cursor_path = new_trie_cursor_path
848            } else {
849                return Ok(PopCachedBranchOutcome::Exhausted)
850            };
851        }
852
853        // If the trie cursor has exceeded the sub-trie then we consider it to be exhausted.
854        if !trie_cursor_path.starts_with(sub_trie_prefix) {
855            return Ok(PopCachedBranchOutcome::Exhausted)
856        }
857
858        // At this point we can be sure that the cursor is in an `Available` state. We know for
859        // sure it's not `Exhausted` because of the calls to `path` above, and we know it's not
860        // `Taken` because we push all taken branches onto the `cached_branch_stack`, and the
861        // stack is empty.
862        //
863        // We will use this `Available` cached branch as our next branch.
864        let cached = trie_cursor_state.take();
865        trace!(target: TRACE_TARGET, cached=?cached, "Pushed next trie node onto cached_branch_stack");
866
867        // If the calculated range is not caught up to the next cached branch it means there
868        // are portions of the trie prior to that branch which may need to be calculated;
869        // return the uncalculated range up to that branch to make that happen.
870        //
871        // If the next cached branch's path is all zeros then we can skip this catch-up step,
872        // because there cannot be any keys prior to that range.
873        let cached_path = &cached.0;
874        if uncalculated_lower_bound < cached_path && !cached_path.is_zeroes() {
875            let range = (*uncalculated_lower_bound, Some(*cached_path));
876            trace!(target: TRACE_TARGET, ?range, "Returning key range to calculate in order to catch up to cached branch");
877
878            // Push the cached branch onto the stack so it's available once the leaf range is done
879            // being calculated.
880            self.cached_branch_stack.push(cached);
881
882            return Ok(PopCachedBranchOutcome::CalculateLeaves(range));
883        }
884
885        Ok(PopCachedBranchOutcome::Popped(cached))
886    }
887
888    /// Accepts the current state of both hashed and trie cursors, and determines the next range of
889    /// hashed keys which need to be processed using [`Self::push_leaf`].
890    ///
891    /// This method will use cached branch node data from the trie cursor to skip over all possible
892    /// ranges of keys, to reduce computation as much as possible.
893    ///
894    /// # Returns
895    ///
896    /// - `None`: No more data to process, finish computation
897    ///
898    /// - `Some(lower, None)`: Indicates to call `push_leaf` on all keys starting at `lower`, with
899    ///   no upper bound. This method won't be called again after this.
900    ///
901    /// - `Some(lower, Some(upper))`: Indicates to call `push_leaf` on all keys starting at `lower`,
902    ///   up to but excluding `upper`, and then call this method once done.
903    #[instrument(target = TRACE_TARGET, level = "trace", skip_all)]
904    fn next_uncached_key_range<'a>(
905        &mut self,
906        targets: &mut Option<TargetsCursor<'a>>,
907        trie_cursor_state: &mut TrieCursorState,
908        sub_trie_prefix: &Nibbles,
909        sub_trie_upper_bound: Option<&Nibbles>,
910        mut uncalculated_lower_bound: Option<Nibbles>,
911    ) -> Result<Option<(Nibbles, Option<Nibbles>)>, StateProofError> {
912        // Pop any under-construction branches that are now complete.
913        // All trie data prior to the current cached branch, if any, has been computed. Any branches
914        // which were under-construction previously, and which are not on the same path as this
915        // cached branch, can be assumed to be completed; they will not have any further keys added.
916        // to them.
917        if let Some(cached_path) = self.cached_branch_stack.last().map(|kv| kv.0) {
918            while !cached_path.starts_with(&self.branch_path) {
919                self.pop_branch(targets)?;
920            }
921        }
922
923        loop {
924            // Pop the currently cached branch node.
925            //
926            // NOTE we pop off the `cached_branch_stack` because cloning the `BranchNodeCompact`
927            // means cloning an Arc, which incurs synchronization overhead. We have to be sure to
928            // push the cached branch back onto the stack once done.
929            let (cached_path, cached_branch) = match self.try_pop_cached_branch(
930                trie_cursor_state,
931                sub_trie_prefix,
932                &uncalculated_lower_bound,
933            )? {
934                PopCachedBranchOutcome::Popped(cached) => cached,
935                PopCachedBranchOutcome::Exhausted => {
936                    // If cached branches are exhausted it's possible that there is still an
937                    // unbounded range of leaves to be processed. `uncalculated_lower_bound` is
938                    // used to return that range.
939                    trace!(target: TRACE_TARGET, ?uncalculated_lower_bound, "Exhausted cached trie nodes");
940                    return Ok(uncalculated_lower_bound
941                        .map(|lower| (lower, sub_trie_upper_bound.copied())));
942                }
943                PopCachedBranchOutcome::CalculateLeaves(range) => {
944                    return Ok(Some(range));
945                }
946            };
947
948            trace!(
949                target: TRACE_TARGET,
950                branch_path = ?self.branch_path,
951                branch_state_mask = ?self.branch_stack.last().map(|b| b.state_mask),
952                ?cached_path,
953                cached_branch_state_mask = ?cached_branch.state_mask,
954                cached_branch_hash_mask = ?cached_branch.hash_mask,
955                "loop",
956            );
957
958            // Since we've popped all branches which don't start with cached_path, branch_path at
959            // this point must be equal to or shorter than cached_path.
960            debug_assert!(
961                self.branch_path.len() < cached_path.len() || self.branch_path == cached_path,
962                "branch_path {:?} is different-or-longer-than cached_path {cached_path:?}",
963                self.branch_path
964            );
965
966            // If the branch_path != cached_path it means the branch_stack is either empty, or the
967            // top branch is the parent of this cached branch. Either way we push a branch
968            // corresponding to the cached one onto the stack, so we can begin constructing it.
969            if self.branch_path != cached_path {
970                self.push_cached_branch(targets, cached_path, &cached_branch)?;
971            }
972
973            // At this point the top of the branch stack is the same branch which was found in the
974            // cache.
975            let curr_branch =
976                self.branch_stack.last().expect("top of branch_stack corresponds to cached branch");
977
978            let cached_state_mask = cached_branch.state_mask;
979            let curr_state_mask = curr_branch.state_mask;
980
981            // Determine all child nibbles which are set in the cached branch but not the
982            // under-construction branch.
983            let next_child_nibbles = curr_state_mask ^ cached_state_mask;
984            debug_assert_eq!(
985                cached_state_mask | next_child_nibbles, cached_state_mask,
986                "curr_branch has state_mask bits set which aren't set on cached_branch. curr_branch:{:?}",
987                curr_state_mask,
988            );
989
990            // If there are no further children to construct for this branch then pop it off both
991            // stacks and loop using the parent branch.
992            if next_child_nibbles.is_empty() {
993                trace!(
994                    target: TRACE_TARGET,
995                    path=?cached_path,
996                    ?curr_branch,
997                    ?cached_branch,
998                    "No further children, popping branch",
999                );
1000                self.pop_branch(targets)?;
1001
1002                // no need to pop from `cached_branch_stack`, the current cached branch is already
1003                // popped (see note at the top of the loop).
1004
1005                // The just-popped branch is completely processed; we know there can be no more keys
1006                // with that prefix. Set the lower bound which can be returned from this method to
1007                // be the next possible prefix, if any.
1008                uncalculated_lower_bound = cached_path.next_without_prefix();
1009
1010                continue
1011            }
1012
1013            // Determine the next nibble of the branch which has not yet been constructed, and
1014            // determine the child's full path.
1015            let child_nibble = next_child_nibbles.trailing_zeros() as u8;
1016            let child_path = self.child_path_at(child_nibble);
1017
1018            // If the `hash_mask` bit is set for the next child it means the child's hash is cached
1019            // in the `cached_branch`. We can use that instead of re-calculating the hash of the
1020            // entire sub-trie.
1021            //
1022            // If the child needs to be retained for a proof then we should not use the cached
1023            // hash, and instead continue on to calculate its node manually.
1024            if cached_branch.hash_mask.is_bit_set(child_nibble) {
1025                // Commit the last child. We do this here for two reasons:
1026                // - `commit_last_child` will check if the last child needs to be retained. We need
1027                //   to check that before the subsequent `should_retain` call here to prevent
1028                //   `targets` from being moved beyond the last child before it is checked.
1029                // - If we do end up using the cached hash value, then we will need to commit the
1030                //   last child before pushing a new one onto the stack anyway.
1031                self.commit_last_child(targets)?;
1032
1033                if !self.should_retain(targets, &child_path, false) {
1034                    // Pull this child's hash out of the cached branch node. To get the hash's index
1035                    // we first need to calculate the mask of which cached hashes have already been
1036                    // used by this branch (if any). The number of set bits in that mask will be the
1037                    // index of the next hash in the array to use.
1038                    let curr_hashed_used_mask = cached_branch.hash_mask & curr_state_mask;
1039                    let hash_idx = curr_hashed_used_mask.count_ones() as usize;
1040                    let hash = cached_branch.hashes[hash_idx];
1041
1042                    trace!(
1043                        target: TRACE_TARGET,
1044                        ?child_path,
1045                        ?hash_idx,
1046                        ?hash,
1047                        "Using cached hash for child",
1048                    );
1049
1050                    self.child_stack.push(ProofTrieBranchChild::RlpNode(RlpNode::word_rlp(&hash)));
1051                    self.branch_stack
1052                        .last_mut()
1053                        .expect("already asserted there is a last branch")
1054                        .state_mask
1055                        .set_bit(child_nibble);
1056
1057                    // Update the `uncalculated_lower_bound` to indicate that the child whose bit
1058                    // was just set is completely processed.
1059                    uncalculated_lower_bound = child_path.next_without_prefix();
1060
1061                    // Push the current cached branch back onto the stack before looping.
1062                    self.cached_branch_stack.push((cached_path, cached_branch));
1063
1064                    continue
1065                }
1066            }
1067
1068            // We now want to check if there is a cached branch node at this child. The cached
1069            // branch node may be the node at this child directly, or this child may be an
1070            // extension and the cached branch is the child of that extension.
1071
1072            // All trie nodes prior to `child_path` will not be modified further, so we can seek the
1073            // trie cursor to the next cached node at-or-after `child_path`.
1074            if trie_cursor_state.path().is_some_and(|path| path < &child_path) {
1075                trace!(target: TRACE_TARGET, ?child_path, "Seeking trie cursor to child path");
1076                *trie_cursor_state = TrieCursorState::seeked(self.trie_cursor.seek(child_path)?);
1077            }
1078
1079            // If the next cached branch node is a child of `child_path` then we can assume it is
1080            // the cached branch for this child. We push it onto the `cached_branch_stack` and loop
1081            // back to the top.
1082            if let TrieCursorState::Available(next_cached_path, next_cached_branch) =
1083                &trie_cursor_state &&
1084                next_cached_path.starts_with(&child_path)
1085            {
1086                // Push the current cached branch back on before pushing its child and then looping
1087                self.cached_branch_stack.push((cached_path, cached_branch));
1088
1089                trace!(
1090                    target: TRACE_TARGET,
1091                    ?child_path,
1092                    ?next_cached_path,
1093                    ?next_cached_branch,
1094                    "Pushing cached branch for child",
1095                );
1096                self.cached_branch_stack.push(trie_cursor_state.take());
1097                continue;
1098            }
1099
1100            // There is no cached data for the sub-trie at this child, we must recalculate the
1101            // sub-trie root (this child) using the leaves. Return the range of keys based on the
1102            // child path.
1103            let child_path_upper = child_path.next_without_prefix();
1104            trace!(
1105                target: TRACE_TARGET,
1106                lower=?child_path,
1107                upper=?child_path_upper,
1108                "Returning sub-trie's key range to calculate",
1109            );
1110
1111            // Push the current cached branch back onto the stack before returning.
1112            self.cached_branch_stack.push((cached_path, cached_branch));
1113
1114            return Ok(Some((child_path, child_path_upper)));
1115        }
1116    }
1117
1118    /// Calculates trie nodes and retains proofs for targeted nodes within a sub-trie. The
1119    /// sub-trie's bounds are denoted by the `lower_bound` and `upper_bound` arguments,
1120    /// `upper_bound` is exclusive, None indicates unbounded.
1121    #[instrument(
1122        target = TRACE_TARGET,
1123        level = "trace",
1124        skip_all,
1125        fields(prefix=?sub_trie_targets.prefix),
1126    )]
1127    fn proof_subtrie<'a>(
1128        &mut self,
1129        value_encoder: &mut VE,
1130        trie_cursor_state: &mut TrieCursorState,
1131        hashed_cursor_current: &mut Option<(Nibbles, VE::DeferredEncoder)>,
1132        sub_trie_targets: SubTrieTargets<'a>,
1133    ) -> Result<(), StateProofError> {
1134        let sub_trie_upper_bound = sub_trie_targets.upper_bound();
1135
1136        // Wrap targets into a `TargetsCursor`.  targets can be empty if we only want to calculate
1137        // the root, in which case we don't need a cursor.
1138        let mut targets = if sub_trie_targets.targets.is_empty() {
1139            None
1140        } else {
1141            Some(TargetsCursor::new(sub_trie_targets.targets))
1142        };
1143
1144        // Ensure initial state is cleared. By the end of the method call these should be empty once
1145        // again.
1146        debug_assert!(self.cached_branch_stack.is_empty());
1147        debug_assert!(self.branch_stack.is_empty());
1148        debug_assert!(self.branch_path.is_empty());
1149        debug_assert!(self.child_stack.is_empty());
1150
1151        // `next_uncached_key_range`, which will be called in the loop below, expects the trie
1152        // cursor to have already been seeked. If it's not yet seeked, or seeked to a prior node,
1153        // then we seek it to the prefix (the first possible node) to initialize it.
1154        if trie_cursor_state.before(&sub_trie_targets.prefix) {
1155            trace!(target: TRACE_TARGET, "Doing initial seek of trie cursor");
1156            *trie_cursor_state =
1157                TrieCursorState::seeked(self.trie_cursor.seek(sub_trie_targets.prefix)?);
1158        }
1159
1160        // `uncalculated_lower_bound` tracks the lower bound of node paths which have yet to be
1161        // visited, either via the hashed key cursor (`calculate_key_range`) or trie cursor
1162        // (`next_uncached_key_range`). If/when this becomes None then there are no further nodes
1163        // which could exist.
1164        let mut uncalculated_lower_bound = Some(sub_trie_targets.prefix);
1165
1166        trace!(target: TRACE_TARGET, "Starting loop");
1167        loop {
1168            // Save the previous lower bound to detect forward progress.
1169            let prev_uncalculated_lower_bound = uncalculated_lower_bound;
1170
1171            // Determine the range of keys of the overall trie which need to be re-computed.
1172            let Some((calc_lower_bound, calc_upper_bound)) = self.next_uncached_key_range(
1173                &mut targets,
1174                trie_cursor_state,
1175                &sub_trie_targets.prefix,
1176                sub_trie_upper_bound.as_ref(),
1177                prev_uncalculated_lower_bound,
1178            )?
1179            else {
1180                // If `next_uncached_key_range` determines that there can be no more keys then
1181                // complete the computation.
1182                break;
1183            };
1184
1185            // Forward-progress guard: detect trie inconsistencies that would cause infinite loops.
1186            // If `next_uncached_key_range` returns a range that starts before the previous
1187            // lower bound, we've gone backwards and would loop forever.
1188            //
1189            // This can specifically happen when there is a cached branch which shouldn't exist, or
1190            // if state mask bit is set on a cached branch which shouldn't be.
1191            if let Some(prev_lower) = prev_uncalculated_lower_bound.as_ref() &&
1192                calc_lower_bound < *prev_lower
1193            {
1194                let msg = format!(
1195                    "next_uncached_key_range went backwards: calc_lower={calc_lower_bound:?} < \
1196                     prev_lower={prev_lower:?}, calc_upper={calc_upper_bound:?}, prefix={:?}",
1197                    sub_trie_targets.prefix,
1198                );
1199                error!(target: TRACE_TARGET, "{msg}");
1200                return Err(StateProofError::TrieInconsistency(msg));
1201            }
1202
1203            // Calculate the trie for that range of keys
1204            self.calculate_key_range(
1205                value_encoder,
1206                &mut targets,
1207                hashed_cursor_current,
1208                calc_lower_bound,
1209                calc_upper_bound,
1210            )?;
1211
1212            // Once outside `calculate_key_range`, `hashed_cursor_current` will be at the first key
1213            // after the range.
1214            //
1215            // If the `hashed_cursor_current` is None (exhausted), or not within the range of the
1216            // sub-trie, then there are no more keys at all, meaning the trie couldn't possibly have
1217            // more data and we should complete computation.
1218            if hashed_cursor_current
1219                .as_ref()
1220                .is_none_or(|(key, _)| !key.starts_with(&sub_trie_targets.prefix))
1221            {
1222                break;
1223            }
1224
1225            // The upper bound of previous calculation becomes the lower bound of the uncalculated
1226            // range, for which we'll once again check for cached data.
1227            uncalculated_lower_bound = calc_upper_bound;
1228        }
1229
1230        // Once there's no more leaves we can pop the remaining branches, if any.
1231        trace!(target: TRACE_TARGET, "Exited loop, popping remaining branches");
1232        while !self.branch_stack.is_empty() {
1233            self.pop_branch(&mut targets)?;
1234        }
1235
1236        // At this point the branch stack should be empty. If the child stack is empty it means no
1237        // keys were ever iterated from the hashed cursor in the first place. Otherwise there should
1238        // only be a single node left: the root node.
1239        debug_assert!(self.branch_stack.is_empty());
1240        debug_assert!(self.branch_path.is_empty());
1241        debug_assert!(self.child_stack.len() < 2);
1242
1243        // The `cached_branch_stack` may still have cached branches on it, as it's not affected by
1244        // `pop_branch`, but it is no longer needed and should be cleared.
1245        self.cached_branch_stack.clear();
1246
1247        // We always pop the root node off of the `child_stack` in order to empty it, however we
1248        // might not want to retain the node unless the `SubTrieTargets` indicates it.
1249        trace!(
1250            target: TRACE_TARGET,
1251            retain_root = ?sub_trie_targets.retain_root,
1252            child_stack_empty = self.child_stack.is_empty(),
1253            "Maybe retaining root",
1254        );
1255        match (sub_trie_targets.retain_root, self.child_stack.pop()) {
1256            (false, _) => {
1257                // Whether the root node is exists or not, we don't want it.
1258            }
1259            (true, None) => {
1260                // If `child_stack` is empty it means there was no keys at all, retain an empty
1261                // root node.
1262                self.retained_proofs.push(ProofTrieNodeV2 {
1263                    path: Nibbles::new(), // root path
1264                    node: TrieNodeV2::EmptyRoot,
1265                    masks: None,
1266                });
1267            }
1268            (true, Some(root_node)) => {
1269                // Encode and retain the root node.
1270                self.rlp_encode_buf.clear();
1271                let root_node =
1272                    root_node.into_proof_trie_node(Nibbles::new(), &mut self.rlp_encode_buf)?;
1273                self.retained_proofs.push(root_node);
1274            }
1275        }
1276
1277        Ok(())
1278    }
1279
1280    /// Clears internal computation state. Called after errors to ensure the calculator is not
1281    /// left in a partially-computed state when reused.
1282    fn clear_computation_state(&mut self) {
1283        self.branch_stack.clear();
1284        self.branch_path = Nibbles::new();
1285        self.child_stack.clear();
1286        self.cached_branch_stack.clear();
1287        self.retained_proofs.clear();
1288    }
1289
1290    /// Internal implementation of proof calculation. Assumes both cursors have already been reset.
1291    /// See docs on [`Self::proof`] for expected behavior.
1292    fn proof_inner(
1293        &mut self,
1294        value_encoder: &mut VE,
1295        targets: &mut [ProofV2Target],
1296    ) -> Result<Vec<ProofTrieNodeV2>, StateProofError> {
1297        // If there are no targets then nothing could be returned, return early.
1298        if targets.is_empty() {
1299            trace!(target: TRACE_TARGET, "Empty targets, returning");
1300            return Ok(Vec::new())
1301        }
1302
1303        // Initialize the variables which track the state of the two cursors. Both indicate the
1304        // cursors are unseeked.
1305        let mut trie_cursor_state = TrieCursorState::unseeked();
1306        let mut hashed_cursor_current: Option<(Nibbles, VE::DeferredEncoder)> = None;
1307
1308        // Divide targets into chunks, each chunk corresponding to a different sub-trie within the
1309        // overall trie, and handle all proofs within that sub-trie.
1310        for sub_trie_targets in iter_sub_trie_targets(targets) {
1311            if let Err(err) = self.proof_subtrie(
1312                value_encoder,
1313                &mut trie_cursor_state,
1314                &mut hashed_cursor_current,
1315                sub_trie_targets,
1316            ) {
1317                self.clear_computation_state();
1318                return Err(err);
1319            }
1320        }
1321
1322        trace!(
1323            target: TRACE_TARGET,
1324            retained_proofs_len = ?self.retained_proofs.len(),
1325            "proof_inner: returning",
1326        );
1327        Ok(core::mem::take(&mut self.retained_proofs))
1328    }
1329
1330    /// Generate a proof for the given targets.
1331    ///
1332    /// Given a set of [`ProofV2Target`]s, returns nodes whose paths are a prefix of any target. The
1333    /// returned nodes will be sorted depth-first by path.
1334    ///
1335    /// # Panics
1336    ///
1337    /// In debug builds, panics if the targets are not sorted lexicographically.
1338    #[instrument(target = TRACE_TARGET, level = "trace", skip_all)]
1339    pub fn proof(
1340        &mut self,
1341        value_encoder: &mut VE,
1342        targets: &mut [ProofV2Target],
1343    ) -> Result<Vec<ProofTrieNodeV2>, StateProofError> {
1344        self.trie_cursor.reset();
1345        self.hashed_cursor.reset();
1346        self.proof_inner(value_encoder, targets)
1347    }
1348
1349    /// Computes the root hash from a set of proof nodes.
1350    ///
1351    /// Returns `None` if there is no root node (partial proof), otherwise returns the hash of the
1352    /// root node.
1353    ///
1354    /// This method reuses the internal RLP encode buffer for efficiency.
1355    pub fn compute_root_hash(
1356        &mut self,
1357        proof_nodes: &[ProofTrieNodeV2],
1358    ) -> Result<Option<B256>, StateProofError> {
1359        // Find the root node (node at empty path)
1360        let root_node = proof_nodes.iter().find(|node| node.path.is_empty());
1361
1362        let Some(root) = root_node else {
1363            return Ok(None);
1364        };
1365
1366        // Compute the hash of the root node
1367        self.rlp_encode_buf.clear();
1368        root.node.encode(&mut self.rlp_encode_buf);
1369        let root_hash = keccak256(&self.rlp_encode_buf);
1370
1371        Ok(Some(root_hash))
1372    }
1373
1374    /// Calculates the root node of the trie.
1375    ///
1376    /// This method does not accept targets nor retain proofs. Returns the root node which can
1377    /// be used to compute the root hash via [`Self::compute_root_hash`].
1378    #[instrument(target = TRACE_TARGET, level = "trace", skip(self, value_encoder))]
1379    pub fn root_node(
1380        &mut self,
1381        value_encoder: &mut VE,
1382    ) -> Result<ProofTrieNodeV2, StateProofError> {
1383        // Initialize the variables which track the state of the two cursors. Both indicate the
1384        // cursors are unseeked.
1385        let mut trie_cursor_state = TrieCursorState::unseeked();
1386        let mut hashed_cursor_current: Option<(Nibbles, VE::DeferredEncoder)> = None;
1387
1388        static EMPTY_TARGETS: [ProofV2Target; 0] = [];
1389        let sub_trie_targets =
1390            SubTrieTargets { prefix: Nibbles::new(), targets: &EMPTY_TARGETS, retain_root: true };
1391
1392        if let Err(err) = self.proof_subtrie(
1393            value_encoder,
1394            &mut trie_cursor_state,
1395            &mut hashed_cursor_current,
1396            sub_trie_targets,
1397        ) {
1398            self.clear_computation_state();
1399            return Err(err);
1400        }
1401
1402        // proof_subtrie will retain the root node if retain_proof is true, regardless of if there
1403        // are any targets.
1404        let mut proofs = core::mem::take(&mut self.retained_proofs);
1405        trace!(
1406            target: TRACE_TARGET,
1407            proofs_len = ?proofs.len(),
1408            "root_node: extracting root",
1409        );
1410
1411        // The root node is at the empty path - it must exist since retain_root is true. Otherwise
1412        // targets was empty, so there should be no other retained proofs.
1413        debug_assert_eq!(
1414            proofs.len(), 1,
1415            "prefix is empty, retain_root is true, and targets is empty, so there must be only the root node"
1416        );
1417
1418        // Find and remove the root node (node at empty path)
1419        let root_node = proofs.pop().expect("prefix is empty, retain_root is true, and targets is empty, so there must be only the root node");
1420
1421        Ok(root_node)
1422    }
1423}
1424
1425/// A proof calculator for storage tries.
1426pub type StorageProofCalculator<TC, HC> = ProofCalculator<TC, HC, StorageValueEncoder>;
1427
1428impl<TC, HC> StorageProofCalculator<TC, HC>
1429where
1430    TC: TrieStorageCursor,
1431    HC: HashedStorageCursor<Value = U256>,
1432{
1433    /// Create a new [`StorageProofCalculator`] instance.
1434    pub fn new_storage(trie_cursor: TC, hashed_cursor: HC) -> Self {
1435        Self::new(trie_cursor, hashed_cursor)
1436    }
1437
1438    /// Generate a proof for a storage trie at the given hashed address.
1439    ///
1440    /// Given a set of [`ProofV2Target`]s, returns nodes whose paths are a prefix of any target. The
1441    /// returned nodes will be sorted depth-first by path.
1442    ///
1443    /// # Panics
1444    ///
1445    /// In debug builds, panics if the targets are not sorted lexicographically.
1446    #[instrument(target = TRACE_TARGET, level = "trace", skip(self, targets))]
1447    pub fn storage_proof(
1448        &mut self,
1449        hashed_address: B256,
1450        targets: &mut [ProofV2Target],
1451    ) -> Result<Vec<ProofTrieNodeV2>, StateProofError> {
1452        self.hashed_cursor.set_hashed_address(hashed_address);
1453
1454        // Shortcut: check if storage is empty
1455        if self.hashed_cursor.is_storage_empty()? {
1456            // Return a single EmptyRoot node at the root path
1457            return Ok(vec![ProofTrieNodeV2 {
1458                path: Nibbles::default(),
1459                node: TrieNodeV2::EmptyRoot,
1460                masks: None,
1461            }])
1462        }
1463
1464        // Don't call `set_hashed_address` on the trie cursor until after the previous shortcut has
1465        // been checked.
1466        self.trie_cursor.set_hashed_address(hashed_address);
1467
1468        // Create a mutable storage value encoder
1469        let mut storage_value_encoder = StorageValueEncoder;
1470        self.proof_inner(&mut storage_value_encoder, targets)
1471    }
1472
1473    /// Calculates the root node of a storage trie.
1474    ///
1475    /// This method does not accept targets nor retain proofs. Returns the root node which can
1476    /// be used to compute the root hash via [`Self::compute_root_hash`].
1477    #[instrument(target = TRACE_TARGET, level = "trace", skip(self))]
1478    pub fn storage_root_node(
1479        &mut self,
1480        hashed_address: B256,
1481    ) -> Result<ProofTrieNodeV2, StateProofError> {
1482        self.hashed_cursor.set_hashed_address(hashed_address);
1483
1484        if self.hashed_cursor.is_storage_empty()? {
1485            return Ok(ProofTrieNodeV2 {
1486                path: Nibbles::default(),
1487                node: TrieNodeV2::EmptyRoot,
1488                masks: None,
1489            })
1490        }
1491
1492        // Don't call `set_hashed_address` on the trie cursor until after the previous shortcut has
1493        // been checked.
1494        self.trie_cursor.set_hashed_address(hashed_address);
1495
1496        // Create a mutable storage value encoder
1497        let mut storage_value_encoder = StorageValueEncoder;
1498        self.root_node(&mut storage_value_encoder)
1499    }
1500}
1501
1502/// Helper type wrapping a slice of [`ProofV2Target`]s, primarily used to iterate through targets in
1503/// [`ProofCalculator::should_retain`].
1504///
1505/// It is assumed that the underlying slice is never empty, and that the iterator is never
1506/// exhausted.
1507struct TargetsCursor<'a> {
1508    targets: &'a [ProofV2Target],
1509    i: usize,
1510}
1511
1512impl<'a> TargetsCursor<'a> {
1513    /// Wraps a slice of [`ProofV2Target`]s with the `TargetsCursor`.
1514    ///
1515    /// # Panics
1516    ///
1517    /// Will panic in debug mode if called with an empty slice.
1518    fn new(targets: &'a [ProofV2Target]) -> Self {
1519        debug_assert!(!targets.is_empty());
1520        Self { targets, i: 0 }
1521    }
1522
1523    /// Returns the current and next [`ProofV2Target`] that the cursor is pointed at.
1524    fn current(&self) -> (&'a ProofV2Target, Option<&'a ProofV2Target>) {
1525        (&self.targets[self.i], self.targets.get(self.i + 1))
1526    }
1527
1528    /// Iterates the cursor forward.
1529    ///
1530    /// # Panics
1531    ///
1532    /// Will panic if the cursor is exhausted.
1533    fn next(&mut self) -> (&'a ProofV2Target, Option<&'a ProofV2Target>) {
1534        self.i += 1;
1535        debug_assert!(self.i < self.targets.len());
1536        self.current()
1537    }
1538
1539    // Iterate forwards over the slice, starting from the [`ProofV2Target`] after the current.
1540    fn skip_iter(&self) -> impl Iterator<Item = &'a ProofV2Target> {
1541        self.targets[self.i + 1..].iter()
1542    }
1543
1544    /// Iterated backwards over the slice, starting from the [`ProofV2Target`] previous to the
1545    /// current.
1546    fn rev_iter(&self) -> impl Iterator<Item = &'a ProofV2Target> {
1547        self.targets[..self.i].iter().rev()
1548    }
1549}
1550
1551/// Used to track the state of the trie cursor, allowing us to differentiate between a branch having
1552/// been taken (used as a cached branch) and the cursor having been exhausted.
1553#[derive(Debug)]
1554enum TrieCursorState {
1555    /// The initial state of the cursor, indicating it's never been seeked.
1556    Unseeked,
1557    /// Cursor is seeked to this path and the node has not been used yet.
1558    Available(Nibbles, BranchNodeCompact),
1559    /// Cursor is seeked to this path, but the node has been used.
1560    Taken(Nibbles),
1561    /// Cursor has been exhausted.
1562    Exhausted,
1563}
1564
1565impl TrieCursorState {
1566    /// Creates a [`Self::Unseeked`] based on an entry returned from the cursor itself.
1567    const fn unseeked() -> Self {
1568        Self::Unseeked
1569    }
1570
1571    /// Creates a [`Self`] based on an entry returned from the cursor itself.
1572    fn seeked(entry: Option<(Nibbles, BranchNodeCompact)>) -> Self {
1573        entry.map_or(Self::Exhausted, |(path, node)| Self::Available(path, node))
1574    }
1575
1576    /// Returns the path the cursor is seeked to, or None if it's exhausted.
1577    ///
1578    /// # Panics
1579    ///
1580    /// Panics if the cursor is unseeked.
1581    const fn path(&self) -> Option<&Nibbles> {
1582        match self {
1583            Self::Unseeked => panic!("cursor is unseeked"),
1584            Self::Available(path, _) | Self::Taken(path) => Some(path),
1585            Self::Exhausted => None,
1586        }
1587    }
1588
1589    /// Returns true if the cursor is unseeked, or is seeked to a node prior to the given one.
1590    fn before(&self, path: &Nibbles) -> bool {
1591        match self {
1592            Self::Unseeked => true,
1593            Self::Available(seeked_to, _) | Self::Taken(seeked_to) => path < seeked_to,
1594            Self::Exhausted => false,
1595        }
1596    }
1597
1598    /// Takes the path and node from a [`Self::Available`]. Panics if not [`Self::Available`].
1599    fn take(&mut self) -> (Nibbles, BranchNodeCompact) {
1600        let Self::Available(path, _) = self else {
1601            panic!("take called on non-Available: {self:?}")
1602        };
1603
1604        let path = *path;
1605        let Self::Available(path, node) = core::mem::replace(self, Self::Taken(path)) else {
1606            unreachable!("already checked that self is Self::Available");
1607        };
1608
1609        (path, node)
1610    }
1611}
1612
1613/// Describes the state of the currently cached branch node (if any).
1614enum PopCachedBranchOutcome {
1615    /// Cached branch has been popped from the `cached_branch_stack` and is ready to be used.
1616    Popped((Nibbles, BranchNodeCompact)),
1617    /// All cached branches have been exhausted.
1618    Exhausted,
1619    /// Need to calculate leaves from this range (exclusive upper) before the cached branch
1620    /// (catch-up range). If None then
1621    CalculateLeaves((Nibbles, Option<Nibbles>)),
1622}
1623
1624#[cfg(test)]
1625mod tests {
1626    use super::*;
1627    use crate::{
1628        hashed_cursor::{
1629            mock::MockHashedCursorFactory, HashedCursorFactory, HashedCursorMetricsCache,
1630            InstrumentedHashedCursor,
1631        },
1632        proof::Proof,
1633        trie_cursor::{
1634            depth_first, mock::MockTrieCursorFactory, InstrumentedTrieCursor, TrieCursorFactory,
1635            TrieCursorMetricsCache,
1636        },
1637    };
1638    use alloy_primitives::map::{B256Map, B256Set};
1639    use alloy_rlp::Decodable;
1640    use alloy_trie::proof::AddedRemovedKeys;
1641    use itertools::Itertools;
1642    use reth_primitives_traits::Account;
1643    use reth_trie_common::{
1644        updates::{StorageTrieUpdates, TrieUpdates},
1645        HashedPostState, MultiProofTargets, ProofTrieNode, TrieNode,
1646    };
1647
1648    /// Target to use with the `tracing` crate.
1649    static TRACE_TARGET: &str = "trie::proof_v2::tests";
1650
1651    /// Converts legacy proofs to V2 proofs by combining extension nodes with their child branch
1652    /// nodes.
1653    ///
1654    /// In the legacy proof format, extension nodes and branch nodes are separate. In the V2 format,
1655    /// they are combined into a single `BranchNodeV2` where the extension's key becomes the
1656    /// branch's `key` field.
1657    ///
1658    /// Converts legacy proofs (sorted in depth-first order) to V2 format.
1659    ///
1660    /// In depth-first order, children come BEFORE parents. So when we encounter an extension node,
1661    /// its child branch has already been processed and is in the result. We need to pop it and
1662    /// combine it with the extension.
1663    fn convert_legacy_proofs_to_v2(legacy_proofs: &[ProofTrieNode]) -> Vec<ProofTrieNodeV2> {
1664        ProofTrieNodeV2::from_sorted_trie_nodes(
1665            legacy_proofs.iter().map(|p| (p.path, p.node.clone(), p.masks)),
1666        )
1667    }
1668
1669    /// A test harness for comparing `ProofCalculator` and legacy `Proof` implementations.
1670    ///
1671    /// This harness creates mock cursor factories from a `HashedPostState` and provides
1672    /// a method to test that both proof implementations produce equivalent results.
1673    struct ProofTestHarness {
1674        /// Mock factory for trie cursors (empty by default for leaf-only tests)
1675        trie_cursor_factory: MockTrieCursorFactory,
1676        /// Mock factory for hashed cursors, populated from `HashedPostState`
1677        hashed_cursor_factory: MockHashedCursorFactory,
1678        /// The expected state root, calculated by `StateRoot`
1679        expected_root: B256,
1680    }
1681
1682    impl ProofTestHarness {
1683        /// Creates a new test harness from a `HashedPostState`.
1684        ///
1685        /// The `HashedPostState` is used to populate the mock hashed cursor factory directly.
1686        /// The trie cursor factory is initialized from `TrieUpdates` generated by `StateRoot`.
1687        fn new(post_state: HashedPostState) -> Self {
1688            // Create empty trie cursor factory to serve as the initial state for StateRoot
1689            // Ensure that there's a storage trie dataset for every account, to make
1690            // `MockTrieCursorFactory` happy.
1691            let storage_tries: B256Map<_> = post_state
1692                .accounts
1693                .keys()
1694                .copied()
1695                .map(|addr| (addr, StorageTrieUpdates::default()))
1696                .collect();
1697
1698            let empty_trie_cursor_factory = MockTrieCursorFactory::from_trie_updates(TrieUpdates {
1699                storage_tries: storage_tries.clone(),
1700                ..Default::default()
1701            });
1702
1703            // Create mock hashed cursor factory from the post state
1704            let hashed_cursor_factory = MockHashedCursorFactory::from_hashed_post_state(post_state);
1705
1706            // Generate TrieUpdates using StateRoot
1707            let (expected_root, mut trie_updates) =
1708                crate::StateRoot::new(empty_trie_cursor_factory, hashed_cursor_factory.clone())
1709                    .root_with_updates()
1710                    .expect("StateRoot should succeed");
1711
1712            // Continue using empty storage tries for each account, to keep `MockTrieCursorFactory`
1713            // happy.
1714            trie_updates.storage_tries = storage_tries;
1715
1716            // Initialize trie cursor factory from the generated TrieUpdates
1717            let trie_cursor_factory = MockTrieCursorFactory::from_trie_updates(trie_updates);
1718
1719            Self { trie_cursor_factory, hashed_cursor_factory, expected_root }
1720        }
1721
1722        /// Asserts that `ProofCalculator` and legacy `Proof` produce equivalent results for account
1723        /// proofs.
1724        ///
1725        /// This method calls both implementations with the given account targets and compares
1726        /// the results.
1727        fn assert_proof(
1728            &self,
1729            targets: impl IntoIterator<Item = ProofV2Target>,
1730        ) -> Result<(), StateProofError> {
1731            let targets_vec = targets.into_iter().collect::<Vec<_>>();
1732
1733            // Convert ProofV2Target keys to MultiProofTargets for legacy implementation
1734            // For account-only proofs, each account maps to an empty storage set
1735            // Legacy implementation only uses the keys, not the prefix
1736            let legacy_targets = targets_vec
1737                .iter()
1738                .map(|target| (B256::from_slice(&target.key_nibbles.pack()), B256Set::default()))
1739                .collect::<MultiProofTargets>();
1740
1741            // Create ProofCalculator (proof_v2) with account cursors
1742            let trie_cursor = self.trie_cursor_factory.account_trie_cursor()?;
1743            let hashed_cursor = self.hashed_cursor_factory.hashed_account_cursor()?;
1744
1745            // Collect metrics for cursors
1746            let mut trie_cursor_metrics = TrieCursorMetricsCache::default();
1747            let trie_cursor = InstrumentedTrieCursor::new(trie_cursor, &mut trie_cursor_metrics);
1748            let mut hashed_cursor_metrics = HashedCursorMetricsCache::default();
1749            let hashed_cursor =
1750                InstrumentedHashedCursor::new(hashed_cursor, &mut hashed_cursor_metrics);
1751
1752            // Call ProofCalculator::proof with account targets
1753            let mut value_encoder = SyncAccountValueEncoder::new(
1754                self.trie_cursor_factory.clone(),
1755                self.hashed_cursor_factory.clone(),
1756            );
1757            let mut proof_calculator = ProofCalculator::new(trie_cursor, hashed_cursor);
1758            let proof_v2_result =
1759                proof_calculator.proof(&mut value_encoder, &mut targets_vec.clone())?;
1760
1761            // Output metrics
1762            trace!(target: TRACE_TARGET, ?trie_cursor_metrics, "V2 trie cursor metrics");
1763            trace!(target: TRACE_TARGET, ?hashed_cursor_metrics, "V2 hashed cursor metrics");
1764
1765            // Call Proof::multiproof (legacy implementation)
1766            let proof_legacy_result =
1767                Proof::new(self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone())
1768                    .with_branch_node_masks(true)
1769                    .with_added_removed_keys(Some(
1770                        // This will force the HashBuilder to always retain the child branch of all
1771                        // extensions. We need this because in V2 extensions and branches are a
1772                        // single node type, so child branches are always included with extensions.
1773                        AddedRemovedKeys::default().with_assume_added(true),
1774                    ))
1775                    .multiproof(legacy_targets)?;
1776
1777            // Helper function to check if a node path matches at least one target
1778            let node_matches_target = |node_path: &Nibbles| -> bool {
1779                targets_vec.iter().any(|target| {
1780                    // Node path must be a prefix of the target's key
1781                    target.key_nibbles.starts_with(node_path) &&
1782                    // Node path must be at least `min_len` long
1783                    node_path.len() >= target.min_len as usize
1784                })
1785            };
1786
1787            // Decode and sort legacy proof nodes
1788            let proof_legacy_nodes = proof_legacy_result
1789                .account_subtree
1790                .iter()
1791                .map(|(path, node_enc)| {
1792                    let mut buf = node_enc.as_ref();
1793                    let node = TrieNode::decode(&mut buf)
1794                        .expect("legacy implementation should not produce malformed proof nodes");
1795
1796                    // The legacy proof calculator will calculate masks for the root node, even
1797                    // though we never store the root node so the masks for it aren't really valid.
1798                    let masks = if path.is_empty() {
1799                        None
1800                    } else {
1801                        proof_legacy_result.branch_node_masks.get(path).copied()
1802                    };
1803
1804                    ProofTrieNode { path: *path, node, masks }
1805                })
1806                .sorted_by(|a, b| depth_first::cmp(&a.path, &b.path))
1807                .collect::<Vec<_>>();
1808
1809            // Convert legacy proofs to V2 proofs by combining extensions with their child branches
1810            let proof_legacy_nodes_v2 = convert_legacy_proofs_to_v2(&proof_legacy_nodes);
1811
1812            // Filter to only keep nodes which match a target. We do this after conversion so we
1813            // don't keep branches whose extension parents are excluded due to a min_len.
1814            let proof_legacy_nodes_v2 = proof_legacy_nodes_v2
1815                .into_iter()
1816                .filter(|ProofTrieNodeV2 { path, .. }| node_matches_target(path))
1817                .collect::<Vec<_>>();
1818
1819            // Basic comparison: both should succeed and produce identical results
1820            pretty_assertions::assert_eq!(proof_legacy_nodes_v2, proof_v2_result);
1821
1822            // Also test root_node - get a fresh calculator and verify it returns the root node
1823            // that hashes to the expected root
1824            let trie_cursor = self.trie_cursor_factory.account_trie_cursor()?;
1825            let hashed_cursor = self.hashed_cursor_factory.hashed_account_cursor()?;
1826            let mut value_encoder = SyncAccountValueEncoder::new(
1827                self.trie_cursor_factory.clone(),
1828                self.hashed_cursor_factory.clone(),
1829            );
1830            let mut proof_calculator = ProofCalculator::new(trie_cursor, hashed_cursor);
1831            let root_node = proof_calculator.root_node(&mut value_encoder)?;
1832
1833            // The root node should be at the empty path
1834            assert!(root_node.path.is_empty(), "root_node should return node at empty path");
1835
1836            // The hash of the root node should match the expected root from legacy StateRoot
1837            let root_hash = proof_calculator
1838                .compute_root_hash(&[root_node])?
1839                .expect("root_node returns a node at empty path");
1840            pretty_assertions::assert_eq!(self.expected_root, root_hash);
1841
1842            Ok(())
1843        }
1844    }
1845
1846    /// Tests that `clear_computation_state` properly resets internal stacks, allowing a
1847    /// `ProofCalculator` to be reused after a mid-computation error left stale state.
1848    /// Before the fix, stale data in `branch_stack`, `child_stack`, and `branch_path`
1849    /// could cause a `usize` underflow panic in `pop_branch`.
1850    #[test]
1851    fn test_proof_calculator_reuse_after_error() {
1852        use alloy_primitives::U256;
1853
1854        reth_tracing::init_test_tracing();
1855
1856        let mut post_state = HashedPostState::default();
1857        let addresses = [
1858            B256::right_padding_from(&[0x10]),
1859            B256::right_padding_from(&[0x20]),
1860            B256::right_padding_from(&[0x30]),
1861            B256::right_padding_from(&[0x40]),
1862        ];
1863        for addr in &addresses {
1864            let account =
1865                Account { nonce: 1, balance: U256::from(100u64), bytecode_hash: Some(B256::ZERO) };
1866            post_state.accounts.insert(*addr, Some(account));
1867        }
1868
1869        let harness = ProofTestHarness::new(post_state);
1870
1871        let trie_cursor = harness.trie_cursor_factory.account_trie_cursor().unwrap();
1872        let hashed_cursor = harness.hashed_cursor_factory.hashed_account_cursor().unwrap();
1873        let mut proof_calculator = ProofCalculator::new(trie_cursor, hashed_cursor);
1874
1875        // Simulate stale state left by a mid-computation error: push fake entries onto internal
1876        // stacks and set a non-empty branch_path.
1877        proof_calculator.branch_stack.push(ProofTrieBranch {
1878            ext_len: 2,
1879            state_mask: TrieMask::new(0b1111),
1880            masks: None,
1881        });
1882        proof_calculator.branch_stack.push(ProofTrieBranch {
1883            ext_len: 0,
1884            state_mask: TrieMask::new(0b11),
1885            masks: None,
1886        });
1887        proof_calculator
1888            .child_stack
1889            .push(ProofTrieBranchChild::RlpNode(RlpNode::word_rlp(&B256::ZERO)));
1890        proof_calculator.branch_path = Nibbles::from_nibbles([0x1, 0x2, 0x3]);
1891
1892        // clear_computation_state should reset everything so a subsequent proof() call works.
1893        proof_calculator.clear_computation_state();
1894
1895        let mut value_encoder = SyncAccountValueEncoder::new(
1896            harness.trie_cursor_factory.clone(),
1897            harness.hashed_cursor_factory.clone(),
1898        );
1899        let mut sorted_addresses = addresses.to_vec();
1900        sorted_addresses.sort();
1901        let mut targets: Vec<ProofV2Target> =
1902            sorted_addresses.iter().copied().map(ProofV2Target::new).collect();
1903
1904        let result = proof_calculator.proof(&mut value_encoder, &mut targets).unwrap();
1905
1906        // Compare against a fresh calculator to verify correctness.
1907        let trie_cursor = harness.trie_cursor_factory.account_trie_cursor().unwrap();
1908        let hashed_cursor = harness.hashed_cursor_factory.hashed_account_cursor().unwrap();
1909        let mut fresh_calculator = ProofCalculator::new(trie_cursor, hashed_cursor);
1910        let mut value_encoder = SyncAccountValueEncoder::new(
1911            harness.trie_cursor_factory.clone(),
1912            harness.hashed_cursor_factory,
1913        );
1914        let fresh_result = fresh_calculator.proof(&mut value_encoder, &mut targets).unwrap();
1915
1916        pretty_assertions::assert_eq!(fresh_result, result);
1917    }
1918
1919    mod proptest_tests {
1920        use super::*;
1921        use alloy_primitives::{map::B256Map, U256};
1922        use proptest::prelude::*;
1923        use reth_trie_common::HashedPostState;
1924
1925        /// Generate a strategy for Account values
1926        fn account_strategy() -> impl Strategy<Value = Account> {
1927            (any::<u64>(), any::<u64>(), any::<[u8; 32]>()).prop_map(
1928                |(nonce, balance, code_hash)| Account {
1929                    nonce,
1930                    balance: U256::from(balance),
1931                    bytecode_hash: Some(B256::from(code_hash)),
1932                },
1933            )
1934        }
1935
1936        /// Generate a strategy for `HashedPostState` with random accounts
1937        fn hashed_post_state_strategy() -> impl Strategy<Value = HashedPostState> {
1938            prop::collection::vec((any::<[u8; 32]>(), account_strategy()), 0..=100).prop_map(
1939                |accounts| {
1940                    let account_map = accounts
1941                        .into_iter()
1942                        .map(|(addr_bytes, account)| (B256::from(addr_bytes), Some(account)))
1943                        .collect::<B256Map<_>>();
1944
1945                    HashedPostState { accounts: account_map, ..Default::default() }
1946                },
1947            )
1948        }
1949
1950        /// Generate a strategy for proof targets that are 80% from the `HashedPostState` accounts
1951        /// and 20% random keys. Each target has a random `min_len` of 0..16.
1952        fn proof_targets_strategy(
1953            account_keys: Vec<B256>,
1954        ) -> impl Strategy<Value = Vec<ProofV2Target>> {
1955            let num_accounts = account_keys.len();
1956
1957            // Generate between 0 and (num_accounts + 5) targets
1958            let target_count = 0..=(num_accounts + 5);
1959
1960            target_count.prop_flat_map(move |count| {
1961                let account_keys = account_keys.clone();
1962                prop::collection::vec(
1963                    (
1964                        prop::bool::weighted(0.8).prop_flat_map(move |from_accounts| {
1965                            if from_accounts && !account_keys.is_empty() {
1966                                // 80% chance: pick from existing account keys
1967                                prop::sample::select(account_keys.clone()).boxed()
1968                            } else {
1969                                // 20% chance: generate random B256
1970                                any::<[u8; 32]>().prop_map(B256::from).boxed()
1971                            }
1972                        }),
1973                        0u8..16u8, // Random min_len from 0 to 15
1974                    )
1975                        .prop_map(|(key, min_len)| ProofV2Target::new(key).with_min_len(min_len)),
1976                    count,
1977                )
1978            })
1979        }
1980
1981        proptest! {
1982            #![proptest_config(ProptestConfig::with_cases(4000))]
1983            #[test]
1984            /// Tests that ProofCalculator produces valid proofs for randomly generated
1985            /// HashedPostState with proof targets.
1986            ///
1987            /// This test:
1988            /// - Generates random accounts in a HashedPostState
1989            /// - Generates proof targets: 80% from existing account keys, 20% random
1990            /// - Creates a test harness with the generated state
1991            /// - Calls assert_proof with the generated targets
1992            /// - Verifies both ProofCalculator and legacy Proof produce equivalent results
1993            fn proptest_proof_with_targets(
1994                (post_state, targets) in hashed_post_state_strategy()
1995                    .prop_flat_map(|post_state| {
1996                        let mut account_keys: Vec<B256> = post_state.accounts.keys().copied().collect();
1997                        // Sort to ensure deterministic order when using PROPTEST_RNG_SEED
1998                        account_keys.sort_unstable();
1999                        let targets_strategy = proof_targets_strategy(account_keys);
2000                        (Just(post_state), targets_strategy)
2001                    })
2002            ) {
2003                reth_tracing::init_test_tracing();
2004                let harness = ProofTestHarness::new(post_state);
2005
2006                harness.assert_proof(targets).expect("Proof generation failed");
2007            }
2008        }
2009    }
2010
2011    #[test]
2012    fn test_big_trie() {
2013        use rand::prelude::*;
2014
2015        reth_tracing::init_test_tracing();
2016        let mut rng = rand::rngs::SmallRng::seed_from_u64(1);
2017
2018        let mut rand_b256 = || {
2019            let mut buf: [u8; 32] = [0; 32];
2020            rng.fill_bytes(&mut buf);
2021            B256::from_slice(&buf)
2022        };
2023
2024        // Generate random HashedPostState.
2025        let mut post_state = HashedPostState::default();
2026        for _ in 0..10240 {
2027            let hashed_addr = rand_b256();
2028            let account = Account { bytecode_hash: Some(hashed_addr), ..Default::default() };
2029            post_state.accounts.insert(hashed_addr, Some(account));
2030        }
2031
2032        // Collect targets; partially from real keys, partially random keys which probably won't
2033        // exist.
2034        let mut targets = post_state.accounts.keys().copied().collect::<Vec<_>>();
2035        for _ in 0..post_state.accounts.len() / 5 {
2036            targets.push(rand_b256());
2037        }
2038        targets.sort();
2039
2040        // Create test harness
2041        let harness = ProofTestHarness::new(post_state);
2042
2043        // Assert the proof (convert B256 to ProofV2Target with no min_len for this test)
2044        harness
2045            .assert_proof(targets.into_iter().map(ProofV2Target::new))
2046            .expect("Proof generation failed");
2047    }
2048
2049    #[test]
2050    fn test_failing_proptest_case_0() {
2051        use alloy_primitives::{hex, map::B256Map};
2052
2053        reth_tracing::init_test_tracing();
2054
2055        // Helper function to create B256 from hex string
2056        let b256 = |s: &str| B256::from_slice(&hex::decode(s).unwrap());
2057
2058        // Create the HashedPostState from test case input
2059        let mut accounts = B256Map::default();
2060
2061        // Define all account data from test case input
2062        let account_data = [
2063            (
2064                "9f3a475db85ff1f5b5e82d8614ee4afc670d27aefb9a43da0bd863a54acf1fe6",
2065                8396790837504194281u64,
2066                9224366602005816983u64,
2067                "103c5b0538f4e37944321a30f5cb1f7005d2ee70998106f34f36d7adb838c789",
2068            ),
2069            (
2070                "c736258fdfd23d73ec4c5e54b8c3b58e26726b361d438ef48670f028286b70ca",
2071                9193115115482903760u64,
2072                4515164289866465875u64,
2073                "9f24ef3ab0b4893b0ec38d0e9b00f239da072ccf093b0b24f1ea1f99547abe55",
2074            ),
2075            (
2076                "780a3476520090f97e847181aee17515c5ea30b7607775103df16d2b6611a87a",
2077                8404772182417755681u64,
2078                16639574952778823617u64,
2079                "214b12bee666ce8c64c6bbbcfafa0c3e55b4b05a8724ec4182b9a6caa774c56d",
2080            ),
2081            (
2082                "23ebfa849308a5d02c3048040217cd1f4b71fb01a9b54dafe541284ebec2bcce",
2083                17978809803974566048u64,
2084                11093542035392742776u64,
2085                "5384dfda8f1935d98e463c00a96960ff24e4d4893ec21e5ece0d272df33ac7e9",
2086            ),
2087            (
2088                "348e476c24fac841b11d358431b4526db09edc9f39906e0ac8809886a04f3c5a",
2089                9422945522568453583u64,
2090                9737072818780682487u64,
2091                "79f8f25b2cbb7485c5c7b627917c0f562f012d3d7ddd486212c90fbea0cf686e",
2092            ),
2093            (
2094                "830536ee6c8f780a1cd760457345b79fc09476018a59cf3e8fd427a793d99633",
2095                16497625187081138489u64,
2096                15143978245385012455u64,
2097                "00ede4000cc2a16fca7e930761aaf30d1fddcc3803f0009d6a0742b4ee519342",
2098            ),
2099            (
2100                "806c74b024b2fe81f077ea93d2936c489689f7fe024febc3a0fb71a8a9f22fbc",
2101                8103477314050566918u64,
2102                1383893458340561723u64,
2103                "690ed176136174c4f0cc442e6dcbcf6e7b577e30fc052430b6060f97af1f8e85",
2104            ),
2105            (
2106                "b903d962ffc520877f14e1e8328160e5b22f8086b0f7e9cba7a373a8376028a0",
2107                12972727566246296372u64,
2108                1130659127924527352u64,
2109                "cadf1f09d8e6a0d945a58ccd2ff36e2ae99f8146f02be96873e84bef0462d64a",
2110            ),
2111            (
2112                "d36a16afff0097e06b2c28bd795b889265e2ceff9a086173113fbeb6f7a9bc42",
2113                15682404502571860137u64,
2114                2025886798818635036u64,
2115                "c2cee70663e9ff1b521e2e1602e88723da52ccdc7a69e370cde9595af435e654",
2116            ),
2117            (
2118                "f3e8461cba0b84f5b81f8ca63d0456cb567e701ec1d6e77b1a03624c5018389b",
2119                5663749586038550112u64,
2120                7681243595728002238u64,
2121                "072c547c3ab9744bcd2ed9dbd813bd62866a673f4ca5d46939b65e9507be0e70",
2122            ),
2123            (
2124                "40b71840b6f43a493b32f4aa755e02d572012392fd582c81a513a169447e194c",
2125                518207789203399614u64,
2126                317311275468085815u64,
2127                "85541d48471bf639c2574600a9b637338c49729ba9e741f157cc6ebaae139da0",
2128            ),
2129            (
2130                "3f77cd91ceb7d335dd2527c29e79aaf94f14141438740051eb0163d86c35bcc9",
2131                16227517944662106096u64,
2132                12646193931088343779u64,
2133                "54999911d82dd63d526429275115fa98f6a560bc2d8e00be24962e91e38d7182",
2134            ),
2135            (
2136                "5cd903814ba84daa6956572411cd1bf4d48a8e230003d28cc3f942697bf8debb",
2137                5096288383163945009u64,
2138                17919982845103509853u64,
2139                "6a53c812e713f1bfe6bf21954f291140c60ec3f2ef353ecdae5dc7b263a37282",
2140            ),
2141            (
2142                "23f3602c95fd98d7fbe48a326ae1549030a2c7574099432cce5b458182f16bf2",
2143                11136020130962086191u64,
2144                12045219101880183180u64,
2145                "ce53fb9b108a3ee90db8469e44948ba3263ca8d8a0d92a076c9516f9a3d30bd1",
2146            ),
2147            (
2148                "be86489b3594a9da83e04a9ff81c8d68d528b8b9d31f3942d1c5856a4a8c5af7",
2149                16293506537092575994u64,
2150                536238712429663046u64,
2151                "a2af0607ade21241386ecfb3780aa90514f43595941daeff8dd599c203cde30a",
2152            ),
2153            (
2154                "97bcd85ee5d6033bdf86397e8b26f711912948a7298114be27ca5499ea99725f",
2155                3086656672041156193u64,
2156                8667446575959669532u64,
2157                "0474377538684a991ffc9b41f970b48e65eda9e07c292e60861258ef87d45272",
2158            ),
2159            (
2160                "40065932e6c70eb907e4f2a89ec772f5382ca90a49ef44c4ae21155b9decdcc0",
2161                17152529399128063686u64,
2162                3643450822628960860u64,
2163                "d5f6198c64c797f455f5b44062bb136734f508f9cdd02d8d69d24100ac8d6252",
2164            ),
2165            (
2166                "c136436c2db6b2ebd14985e2c883e73c6d8fd95ace54bfefae9eeca47b7da800",
2167                727585093455815585u64,
2168                521742371554431881u64,
2169                "3dfad04a6eb46d175b63e96943c7d636c56d61063277e25557aace95820432da",
2170            ),
2171            (
2172                "9ea50348595593788645394eb041ac4f75ee4d6a4840b9cf1ed304e895060791",
2173                8654829249939415079u64,
2174                15623358443672184321u64,
2175                "61bb0d6ffcd5b32d0ee34a3b7dfb1c495888059be02b255dd1fa3be02fa1ddbd",
2176            ),
2177            (
2178                "5abc714353ad6abda44a609f9b61f310f5b0a7df55ccf553dc2db3edda18ca17",
2179                5732104102609402825u64,
2180                15720007305337585794u64,
2181                "8b55b7e9c6f54057322c5e0610b33b3137f1fcd46f7d4af1aca797c7b5fff033",
2182            ),
2183            (
2184                "e270b59e6e56100f9e2813f263884ba5f74190a1770dd88cd9603266174e0a6b",
2185                4728642361690813205u64,
2186                6762867306120182099u64,
2187                "5e9aa1ff854504b4bfea4a7f0175866eba04e88e14e57ac08dddc63d6917bf47",
2188            ),
2189            (
2190                "78286294c6fb6823bb8b2b2ddb7a1e71ee64e05c9ba33b0eb8bb6654c64a8259",
2191                6032052879332640150u64,
2192                498315069638377858u64,
2193                "799ef578ffb51a5ec42484e788d6ada4f13f0ff73e1b7b3e6d14d58caae9319a",
2194            ),
2195            (
2196                "af1b85cf284b0cb59a4bfb0f699194bcd6ad4538f27057d9d93dc7a95c1ff32e",
2197                1647153930670480138u64,
2198                13109595411418593026u64,
2199                "429dcdf4748c0047b0dd94f3ad12b5e62bbadf8302525cc5d2aad9c9c746696f",
2200            ),
2201            (
2202                "0152b7a0626771a2518de84c01e52839e7821a655f9dcb9a174d8f52b64b7086",
2203                3915492299782594412u64,
2204                9550071871839879785u64,
2205                "4d5e6ce993dfc9597585ae2b4bacd6d055fefc56ae825666c83e0770e4aa0527",
2206            ),
2207            (
2208                "9ea9b8a4f6bce1dba63290b81f4d1b88dfeac3e244856904a5c9d4086a10271b",
2209                8824593031424861220u64,
2210                15831101445348312026u64,
2211                "a07602b4dd5cba679562061b7c5c0344b2edd6eba36aa97ca57a6fe01ed80a48",
2212            ),
2213            (
2214                "d7b26c2d8f85b74423a57a3da56c61829340f65967791bab849c90b5e1547e7a",
2215                12723258987146468813u64,
2216                10714399360315276559u64,
2217                "3705e57b27d931188c0d2017ab62577355b0cdda4173203478a8562a0cdcae0c",
2218            ),
2219            (
2220                "da354ceca117552482e628937931870a28e9d4416f47a58ee77176d0b760c75b",
2221                1580954430670112951u64,
2222                14920857341852745222u64,
2223                "a13d6b0123daa2e662699ac55a2d0ed1d2e73a02ed00ee5a4dd34db8dea2a37e",
2224            ),
2225            (
2226                "53140d0c8b90b4c3c49e0604879d0dc036e914c4c4f799f1ccae357fef2613e3",
2227                12521658365236780592u64,
2228                11630410585145916252u64,
2229                "46f06ce1435a7a0fd3476bbcffe4aac88c33a7fcf50080270b715d25c93d96d7",
2230            ),
2231            (
2232                "4b1c151815da6f18f27e98890eac1f7d43b80f3386c7c7d15ee0e43a7edfe0a6",
2233                9575643484508382933u64,
2234                3471795678079408573u64,
2235                "a9e6a8fac46c5fc61ae07bddc223e9f105f567ad039d2312a03431d1f24d8b2c",
2236            ),
2237            (
2238                "39436357a2bcd906e58fb88238be2ddb2e43c8a5590332e3aee1d1134a0d0ba4",
2239                10171391804125392783u64,
2240                2915644784933705108u64,
2241                "1d5db03f07137da9d3af85096ed51a4ff64bb476a79bf4294850438867fe3833",
2242            ),
2243            (
2244                "5fbe8d9d6a12b061a94a72436caec331ab1fd4e472c3bb4688215788c5e9bcd9",
2245                5663512925993713993u64,
2246                18170240962605758111u64,
2247                "bd5d601cbcb47bd84d410bafec72f2270fceb1ed2ed11499a1e218a9f89a9f7f",
2248            ),
2249            (
2250                "f2e29a909dd31b38e9b92b2b2d214e822ebddb26183cd077d4009773854ab099",
2251                7512894577556564068u64,
2252                15905517369556068583u64,
2253                "a36e66ce11eca7900248c518e12c6c08d659d609f4cbd98468292de7adf780f2",
2254            ),
2255            (
2256                "3eb82e6d6e964ca56b50cc54bdd55bb470c67a4932aba48d27d175d1be2542aa",
2257                12645567232869276853u64,
2258                8416544129280224452u64,
2259                "d177f246a45cc76d39a8ee06b32d8c076c986106b9a8e0455a0b41d00fe3cbde",
2260            ),
2261            (
2262                "c903731014f6a5b4b45174ef5f9d5a2895a19d1308292f25aa323fda88acc938",
2263                5989992708726918818u64,
2264                17462460601463602125u64,
2265                "01241c61ad1c8adc27e5a1096ab6c643af0fbb6e2818ef77272b70e5c3624abc",
2266            ),
2267            (
2268                "ef46410ab47113a78c27e100ed1b476f82a8789012bd95a047a4b23385596f53",
2269                11884362385049322305u64,
2270                619908411193297508u64,
2271                "e9b4c929e26077ac1fd5a771ea5badc7e9ddb58a20a2a797389c63b3dd3df00d",
2272            ),
2273            (
2274                "be336bc6722bb787d542f4ef8ecb6f46a449557ca7b69b8668b6fed19dfa73b7",
2275                11490216175357680195u64,
2276                13136528075688203375u64,
2277                "31bfd807f92e6d5dc5c534e9ad0cb29d00c6f0ae7d7b5f1e65f8e683de0bce59",
2278            ),
2279            (
2280                "39599e5828a8f102b8a6808103ae7df29b838fe739d8b73f72f8f0d282ca5a47",
2281                6957481657451522177u64,
2282                4196708540027060724u64,
2283                "968a12d79704b313471ece148cb4e26b8b11620db2a9ee6da0f5dc200801f555",
2284            ),
2285            (
2286                "acd99530bb14ca9a7fac3df8eebfd8cdd234b0f6f7c3893a20bc159a4fd54df5",
2287                9792913946138032169u64,
2288                9219321015500590384u64,
2289                "db45a98128770a329c82c904ceee21d3917f6072b8bd260e46218f65656c964c",
2290            ),
2291            (
2292                "453b80a0b11f237011c57630034ed46888ad96f4300a58aea24c0fe4a5472f68",
2293                14407140330317286994u64,
2294                5783848199433986576u64,
2295                "b8cded0b4efd6bf2282a4f8b3c353f74821714f84df9a6ab25131edc7fdad00f",
2296            ),
2297            (
2298                "23e464d1e9b413a4a6b378cee3a0405ec6ccbb4d418372d1b42d3fde558d48d1",
2299                1190974500816796805u64,
2300                1621159728666344828u64,
2301                "d677f41d273754da3ab8080b605ae07a7193c9f35f6318b809e42a1fdf594be3",
2302            ),
2303            (
2304                "d0e590648dec459aca50edf44251627bab5a36029a0c748b1ddf86b7b887425b",
2305                4807164391931567365u64,
2306                4256042233199858200u64,
2307                "a8677de59ab856516a03663730af54c55a79169346c3d958b564e5ee35d8622b",
2308            ),
2309            (
2310                "72387dbaaaf2c39175d8c067558b869ba7bdc6234bc63ee97a53fea1d988ff39",
2311                5046042574093452325u64,
2312                3088471405044806123u64,
2313                "83c226621506b07073936aec3c87a8e2ef34dd42e504adc2bbab39ede49aa77f",
2314            ),
2315            (
2316                "de6874ca2b9dd8b4347c25d32b882a2a7c127b127d6c5e00d073ab3853339d0e",
2317                6112730660331874479u64,
2318                10943246617310133253u64,
2319                "a0c96a69e5ab3e3fe1a1a2fd0e5e68035ff3c7b2985e4e6b8407d4c377600c6f",
2320            ),
2321            (
2322                "b0d8689e08b983e578d6a0c136b76952497087ee144369af653a0a1b231eeb28",
2323                15612408165265483596u64,
2324                13112504741499957010u64,
2325                "4fc49edeff215f1d54dfd2e60a14a3de2abecbe845db2148c7aee32c65f3c91c",
2326            ),
2327            (
2328                "29d7fb6b714cbdd1be95c4a268cef7f544329642ae05fab26dc251bbc773085e",
2329                17509162400681223655u64,
2330                5075629528173950353u64,
2331                "781ecb560ef8cf0bcfa96b8d12075f4cf87ad52d69dfb2c72801206eded135bd",
2332            ),
2333            (
2334                "85dbf7074c93a4e39b67cc504b35351ee16c1fab437a7fb9e5d9320be1d9c13c",
2335                17692199403267011109u64,
2336                7069378948726478427u64,
2337                "a3ff0d8dee5aa0214460f5b03a70bd76ef00ac8c07f07c0b3d82c9c57e4c72a9",
2338            ),
2339            (
2340                "7bd5a9f3126b4a681afac9a177c6ff7f3dd80d8d7fd5a821a705221c96975ded",
2341                17807965607151214145u64,
2342                5562549152802999850u64,
2343                "dbc3861943b7372e49698b1c5b0e4255b7c93e9fa2c13d6a4405172ab0db9a5b",
2344            ),
2345            (
2346                "496d13d45dbe7eb02fee23c914ac9fefdf86cf5c937c520719fc6a31b3fcf8d9",
2347                13446203348342334214u64,
2348                332407928246785326u64,
2349                "d2d73f15fcdc12adce25b911aa4551dcf900e225761e254eb6392cbd414e389c",
2350            ),
2351            (
2352                "b2f0a0127fc74a35dec5515b1c7eb8a3833ca99925049c47cd109ec94678e6c5",
2353                9683373807753869342u64,
2354                7570798132195583433u64,
2355                "e704110433e5ab17858c5fbe4f1b6d692942d5f5981cac68372d06066bee97fe",
2356            ),
2357            (
2358                "d5f65171b17d7720411905ef138e84b9d1f459e2b248521c449f1781aafd675e",
2359                10088287051097617949u64,
2360                185695341767856973u64,
2361                "8d784c4171e242af4187f30510cd298106b7e68cd3088444a055cb1f3893ba28",
2362            ),
2363            (
2364                "7dcbec5c20fbf1d69665d4b9cdc450fea2d0098e78084bce0a864fea4ba016b0",
2365                13908816056510478374u64,
2366                17793990636863600193u64,
2367                "18e9026372d91e116faf813ce3ba9d7fadef2bb3b779be6efeba8a4ecd9e1f38",
2368            ),
2369            (
2370                "d4f772f4bf1cfa4dad4b55962b50900da8657a4961dabbdf0664f3cd42d368f8",
2371                16438076732493217366u64,
2372                18419670900047275588u64,
2373                "b9fd16b16b3a8fab4d9c47f452d9ce4aad530edeb06ee6830589078db2f79382",
2374            ),
2375            (
2376                "2d009535f82b1813ce2ca7236ceae7864c1e4d3644a1acd02656919ef1aa55d0",
2377                10206924399607440433u64,
2378                3986996560633257271u64,
2379                "db49e225bd427768599a7c06d7aee432121fa3179505f9ee8c717f51c7fa8c54",
2380            ),
2381            (
2382                "b1d7a292df12e505e7433c7e850e9efc81a8931b65f3354a66402894b6d5ba76",
2383                8215550459234533539u64,
2384                10241096845089693964u64,
2385                "5567813b312cb811909a01d14ee8f7ec4d239198ea2d37243123e1de2317e1af",
2386            ),
2387            (
2388                "85120d6f43ea9258accf6a87e49cd5461d9b3735a4dc623f9fbcc669cbdd1ce6",
2389                17566770568845511328u64,
2390                8686605711223432099u64,
2391                "e163f4fcd17acf5714ee48278732808601e861cd4c4c24326cd24431aab1d0ce",
2392            ),
2393            (
2394                "48fe4c22080c6e702f7af0e97fb5354c1c14ff4616c6fc4ac8a4491d4b9b3473",
2395                14371024664575587429u64,
2396                15149464181957728462u64,
2397                "061dec7af4b41bdd056306a8b13b71d574a49a4595884b1a77674f5150d4509d",
2398            ),
2399            (
2400                "29d14b014fa3cabbb3b4808e751e81f571de6d0e727cae627318a5fd82fef517",
2401                9612395342616083334u64,
2402                3700617080099093094u64,
2403                "f7b33a2d2784441f77f0cc1c87930e79bea3332a921269b500e81d823108561c",
2404            ),
2405        ];
2406
2407        // Insert all accounts
2408        for (addr, nonce, balance, code_hash) in &account_data {
2409            accounts.insert(
2410                b256(addr),
2411                Some(Account {
2412                    nonce: *nonce,
2413                    balance: U256::from(*balance),
2414                    bytecode_hash: Some(b256(code_hash)),
2415                }),
2416            );
2417        }
2418
2419        let post_state = HashedPostState { accounts, storages: Default::default() };
2420
2421        // Create test harness
2422        let harness = ProofTestHarness::new(post_state);
2423
2424        // Create targets from test case input - these are Nibbles in hex form
2425        let targets = vec![
2426            ProofV2Target::new(b256(
2427                "0153000000000000000000000000000000000000000000000000000000000000",
2428            ))
2429            .with_min_len(2),
2430            ProofV2Target::new(b256(
2431                "0000000000000000000000000000000000000000000000000000000000000000",
2432            ))
2433            .with_min_len(2),
2434            ProofV2Target::new(b256(
2435                "2300000000000000000000000000000000000000000000000000000000000000",
2436            ))
2437            .with_min_len(2),
2438        ];
2439
2440        // Test proof generation
2441        harness.assert_proof(targets).expect("Proof generation failed");
2442    }
2443}