reth_trie/proof_v2/mod.rs
1//! Proof calculation version 2: Leaf-only implementation.
2//!
3//! This module provides a rewritten proof calculator that:
4//! - Uses only leaf data (HashedAccounts/Storages) to generate proofs
5//! - Returns proof nodes sorted lexicographically by path
6//! - Automatically resets after each calculation
7//! - Re-uses cursors across calculations
8//! - Supports generic value types with lazy evaluation
9
10use crate::{
11 hashed_cursor::{HashedCursor, HashedStorageCursor},
12 trie_cursor::{depth_first, TrieCursor, TrieStorageCursor},
13};
14use alloy_primitives::{keccak256, B256, U256};
15use alloy_rlp::Encodable;
16use alloy_trie::{BranchNodeCompact, TrieMask};
17use reth_execution_errors::trie::StateProofError;
18use reth_trie_common::{BranchNode, BranchNodeMasks, Nibbles, ProofTrieNode, RlpNode, TrieNode};
19use std::cmp::Ordering;
20use tracing::{error, instrument, trace};
21
22mod value;
23pub use value::*;
24
25mod node;
26use node::*;
27
28mod target;
29pub use target::*;
30
31/// Target to use with the `tracing` crate.
32static TRACE_TARGET: &str = "trie::proof_v2";
33
34/// Number of bytes to pre-allocate for [`ProofCalculator`]'s `rlp_encode_buf` field.
35const RLP_ENCODE_BUF_SIZE: usize = 1024;
36
37/// A [`Nibbles`] which contains 64 zero nibbles.
38static PATH_ALL_ZEROS: Nibbles = {
39 let mut path = Nibbles::new();
40 let mut i = 0;
41 while i < 64 {
42 path.push_unchecked(0);
43 i += 1;
44 }
45 path
46};
47
48/// A proof calculator that generates merkle proofs using only leaf data.
49///
50/// The calculator:
51/// - Accepts one or more B256 proof targets sorted lexicographically
52/// - Returns proof nodes sorted lexicographically by path
53/// - Automatically resets after each calculation
54/// - Re-uses cursors from one calculation to the next
55#[derive(Debug)]
56pub struct ProofCalculator<TC, HC, VE: LeafValueEncoder> {
57 /// Trie cursor for traversing stored branch nodes.
58 trie_cursor: TC,
59 /// Hashed cursor for iterating over leaf data.
60 hashed_cursor: HC,
61 /// Branches which are currently in the process of being constructed, each being a child of
62 /// the previous one.
63 branch_stack: Vec<ProofTrieBranch>,
64 /// The path of the last branch in `branch_stack`.
65 branch_path: Nibbles,
66 /// Children of branches in the `branch_stack`.
67 ///
68 /// Each branch in `branch_stack` tracks which children are in this stack using its
69 /// `state_mask`; the number of children the branch has in this stack is equal to the number of
70 /// bits set in its `state_mask`.
71 ///
72 /// The children for the bottom branch in `branch_stack` are found at the bottom of this stack,
73 /// and so on. When a branch is removed from `branch_stack` its children are removed from this
74 /// one, and the branch is pushed onto this stack in their place (see [`Self::pop_branch`].
75 ///
76 /// Children on the `child_stack` are converted to [`ProofTrieBranchChild::RlpNode`]s via the
77 /// [`Self::commit_child`] method. Committing a child indicates that no further changes are
78 /// expected to happen to it (e.g. splitting its short key when inserting a new branch). Given
79 /// that keys are consumed in lexicographical order, only the last child on the stack can
80 /// ever be modified, and therefore all children besides the last are expected to be
81 /// [`ProofTrieBranchChild::RlpNode`]s.
82 child_stack: Vec<ProofTrieBranchChild<VE::DeferredEncoder>>,
83 /// Cached branch data pulled from the `trie_cursor`. The calculator will use the cached
84 /// [`BranchNodeCompact::hashes`] to skip over the calculation of sub-tries in the overall
85 /// trie. The cached hashes cannot be used for any paths which are prefixes of a proof target.
86 cached_branch_stack: Vec<(Nibbles, BranchNodeCompact)>,
87 /// The proofs which will be returned from the calculation. This gets taken at the end of every
88 /// proof call.
89 retained_proofs: Vec<ProofTrieNode>,
90 /// Free-list of re-usable buffers of [`RlpNode`]s, used for encoding branch nodes to RLP.
91 ///
92 /// We are generally able to re-use these buffers across different branch nodes for the
93 /// duration of a proof calculation, but occasionally we will lose one when a branch
94 /// node is returned as a `ProofTrieNode`.
95 rlp_nodes_bufs: Vec<Vec<RlpNode>>,
96 /// Re-usable byte buffer, used for RLP encoding.
97 rlp_encode_buf: Vec<u8>,
98}
99
100impl<TC, HC, VE: LeafValueEncoder> ProofCalculator<TC, HC, VE> {
101 /// Create a new [`ProofCalculator`] instance for calculating account proofs.
102 pub fn new(trie_cursor: TC, hashed_cursor: HC) -> Self {
103 Self {
104 trie_cursor,
105 hashed_cursor,
106 branch_stack: Vec::<_>::with_capacity(64),
107 branch_path: Nibbles::new(),
108 child_stack: Vec::<_>::with_capacity(64),
109 cached_branch_stack: Vec::<_>::with_capacity(64),
110 retained_proofs: Vec::<_>::with_capacity(32),
111 rlp_nodes_bufs: Vec::<_>::with_capacity(8),
112 rlp_encode_buf: Vec::<_>::with_capacity(RLP_ENCODE_BUF_SIZE),
113 }
114 }
115}
116
117impl<TC, HC, VE> ProofCalculator<TC, HC, VE>
118where
119 TC: TrieCursor,
120 HC: HashedCursor,
121 VE: LeafValueEncoder<Value = HC::Value>,
122{
123 /// Takes a re-usable `RlpNode` buffer from the internal free-list, or allocates a new one if
124 /// the free-list is empty.
125 ///
126 /// The returned Vec will have a length of zero.
127 fn take_rlp_nodes_buf(&mut self) -> Vec<RlpNode> {
128 self.rlp_nodes_bufs
129 .pop()
130 .map(|mut buf| {
131 buf.clear();
132 buf
133 })
134 .unwrap_or_else(|| Vec::with_capacity(16))
135 }
136
137 // Returns zero if `branch_stack` is empty, one otherwise.
138 //
139 // This is used when working with the `ext_len` field of [`ProofTrieBranch`]. The `ext_len` is
140 // calculated by taking the difference of the current `branch_path` and the new branch's path;
141 // if the new branch has a parent branch (ie `branch_stack` is not empty) then 1 is subtracted
142 // from the `ext_len` to account for the child's nibble on the parent.
143 #[inline]
144 const fn maybe_parent_nibble(&self) -> usize {
145 !self.branch_stack.is_empty() as usize
146 }
147
148 /// Returns true if the proof of a node at the given path should be retained. A node is retained
149 /// if its path is a prefix of any target.
150 ///
151 /// This may move the `targets` iterator forward if the given path comes after the current
152 /// target.
153 ///
154 /// This method takes advantage of the [`std::slice::Iter`] component of [`TargetsCursor`] to
155 /// check the minimum number of targets. In general it looks at a current target and the next
156 /// target simultaneously, forming an end-exclusive range.
157 ///
158 /// ```text
159 /// * Given targets: [ 0x012, 0x045, 0x678 ]
160 /// * targets.current() returns:
161 /// - (0x012, Some(0x045)): covers (0x012..0x045)
162 /// - (0x045, Some(0x678)): covers (0x045..0x678)
163 /// - (0x678, None): covers (0x678..)
164 /// ```
165 ///
166 /// As long as the path which is passed in lies within that range we can continue to use the
167 /// current target. Once the path goes beyond that range (ie path >= next target) then we can be
168 /// sure that no further paths will be in the range, and we can iterate forward.
169 ///
170 /// ```text
171 /// * Given:
172 /// - path: 0x04
173 /// - targets.current() returns (0x012, Some(0x045))
174 ///
175 /// * 0x04 comes _after_ 0x045 in depth-first order, so (0x012..0x045) does not contain 0x04.
176 ///
177 /// * targets.next() is called.
178 ///
179 /// * targets.current() now returns (0x045, Some(0x678)). This does contain 0x04.
180 ///
181 /// * 0x04 is a prefix of 0x045, and so is retained.
182 /// ```
183 fn should_retain<'a>(
184 &self,
185 targets: &mut Option<TargetsCursor<'a>>,
186 path: &Nibbles,
187 check_min_len: bool,
188 ) -> bool {
189 // If no targets are given then we never retain anything
190 let Some(targets) = targets.as_mut() else { return false };
191
192 let (mut lower, mut upper) = targets.current();
193
194 trace!(target: TRACE_TARGET, ?path, target = ?lower, "should_retain: called");
195 debug_assert!(self.retained_proofs.last().is_none_or(
196 |ProofTrieNode { path: last_retained_path, .. }| {
197 depth_first::cmp(path, last_retained_path) == Ordering::Greater
198 }
199 ),
200 "should_retain called with path {path:?} which is not after previously retained node {:?} in depth-first order",
201 self.retained_proofs.last().map(|n| n.path),
202 );
203
204 loop {
205 // If the node in question is a prefix of the target then we do not iterate targets
206 // further.
207 //
208 // Even if the node is a prefix of the target's key, if the target has a non-zero
209 // `min_len` it indicates that the node should only be retained if it is
210 // longer than that value.
211 //
212 // _However_ even if the node doesn't match the target due to the target's `min_len`, it
213 // may match other targets whose keys match this node. So we search forwards and
214 // backwards for all targets which might match this node, and check against the
215 // `min_len` of each.
216 //
217 // For example, given a branch 0xabc, with children at 0, 1, and 2, and targets:
218 // - key: 0xabc0, min_len: 2
219 // - key: 0xabc1, min_len: 1
220 // - key: 0xabc2, min_len: 4 <-- current
221 // - key: 0xabc3, min_len: 3
222 //
223 // When the branch node at 0xabc is visited it will be after the targets has iterated
224 // forward to 0xabc2 (because all children will have been visited already). At this
225 // point the target for 0xabc2 will not match the branch due to its prefix, but any of
226 // the other targets would, so we need to check those as well.
227 if lower.key.starts_with(path) {
228 return !check_min_len ||
229 (path.len() >= lower.min_len as usize ||
230 targets
231 .skip_iter()
232 .take_while(|target| target.key.starts_with(path))
233 .any(|target| path.len() >= target.min_len as usize) ||
234 targets
235 .rev_iter()
236 .take_while(|target| target.key.starts_with(path))
237 .any(|target| path.len() >= target.min_len as usize))
238 }
239
240 // If the path isn't in the current range then iterate forward until it is (or until
241 // there is no upper bound, indicating unbounded).
242 if upper.is_some_and(|upper| depth_first::cmp(path, &upper.key) != Ordering::Less) {
243 (lower, upper) = targets.next();
244 trace!(target: TRACE_TARGET, target = ?lower, "upper target <= path, next target");
245 } else {
246 return false
247 }
248 }
249 }
250
251 /// Takes a child which has been removed from the `child_stack` and converts it to an
252 /// [`RlpNode`].
253 ///
254 /// Calling this method indicates that the child will not undergo any further modifications, and
255 /// therefore can be retained as a proof node if applicable.
256 fn commit_child<'a>(
257 &mut self,
258 targets: &mut Option<TargetsCursor<'a>>,
259 child_path: Nibbles,
260 child: ProofTrieBranchChild<VE::DeferredEncoder>,
261 ) -> Result<RlpNode, StateProofError> {
262 // If the child is already an `RlpNode` then there is nothing to do.
263 if let ProofTrieBranchChild::RlpNode(rlp_node) = child {
264 return Ok(rlp_node)
265 }
266
267 // If we should retain the child then do so.
268 if self.should_retain(targets, &child_path, true) {
269 trace!(target: TRACE_TARGET, ?child_path, "Retaining child");
270
271 // Convert to `ProofTrieNode`, which will be what is retained.
272 //
273 // If this node is a branch then its `rlp_nodes_buf` will be taken and not returned to
274 // the `rlp_nodes_bufs` free-list.
275 self.rlp_encode_buf.clear();
276 let proof_node = child.into_proof_trie_node(child_path, &mut self.rlp_encode_buf)?;
277
278 // Use the `ProofTrieNode` to encode the `RlpNode`, and then push it onto retained
279 // nodes before returning.
280 self.rlp_encode_buf.clear();
281 proof_node.node.encode(&mut self.rlp_encode_buf);
282
283 self.retained_proofs.push(proof_node);
284 return Ok(RlpNode::from_rlp(&self.rlp_encode_buf));
285 }
286
287 // If the child path is not being retained then we convert directly to an `RlpNode`
288 // using `into_rlp`. Since we are not retaining the node we can recover any `RlpNode`
289 // buffers for the free-list here, hence why we do this as a separate logical branch.
290 self.rlp_encode_buf.clear();
291 let (child_rlp_node, freed_rlp_nodes_buf) = child.into_rlp(&mut self.rlp_encode_buf)?;
292
293 // If there is an `RlpNode` buffer which can be re-used then push it onto the free-list.
294 if let Some(buf) = freed_rlp_nodes_buf {
295 self.rlp_nodes_bufs.push(buf);
296 }
297
298 Ok(child_rlp_node)
299 }
300
301 /// Returns the path of the child of the currently under-construction branch at the given
302 /// nibble.
303 #[inline]
304 fn child_path_at(&self, nibble: u8) -> Nibbles {
305 let mut child_path = self.branch_path;
306 debug_assert!(child_path.len() < 64);
307 child_path.push_unchecked(nibble);
308 child_path
309 }
310
311 /// Returns index of the highest nibble which is set in the mask.
312 ///
313 /// # Panics
314 ///
315 /// Will panic in debug mode if the mask is empty.
316 #[inline]
317 fn highest_set_nibble(mask: TrieMask) -> u8 {
318 debug_assert!(!mask.is_empty());
319 (u16::BITS - mask.leading_zeros() - 1) as u8
320 }
321
322 /// Returns the path of the child on top of the `child_stack`, or the root path if the stack is
323 /// empty. Returns None if the current branch has not yet pushed a child (empty `state_mask`).
324 fn last_child_path(&self) -> Option<Nibbles> {
325 // If there is no branch under construction then the top child must be the root child.
326 let Some(branch) = self.branch_stack.last() else {
327 return Some(Nibbles::new());
328 };
329
330 (!branch.state_mask.is_empty())
331 .then(|| self.child_path_at(Self::highest_set_nibble(branch.state_mask)))
332 }
333
334 /// Calls [`Self::commit_child`] on the last child of `child_stack`, replacing it with a
335 /// [`ProofTrieBranchChild::RlpNode`].
336 ///
337 /// If `child_stack` is empty then this is a no-op.
338 ///
339 /// NOTE that this method call relies on the `state_mask` of the top branch of the
340 /// `branch_stack` to determine the last child's path. When committing the last child prior to
341 /// pushing a new child, it's important to set the new child's `state_mask` bit _after_ the call
342 /// to this method.
343 fn commit_last_child<'a>(
344 &mut self,
345 targets: &mut Option<TargetsCursor<'a>>,
346 ) -> Result<(), StateProofError> {
347 let Some(child_path) = self.last_child_path() else { return Ok(()) };
348 let child =
349 self.child_stack.pop().expect("child_stack can't be empty if there's a child path");
350
351 // If the child is already an `RlpNode` then there is nothing to do, push it back on with no
352 // changes.
353 if let ProofTrieBranchChild::RlpNode(_) = child {
354 self.child_stack.push(child);
355 return Ok(())
356 }
357
358 // Only commit immediately if retained for the proof. Otherwise, defer conversion
359 // to pop_branch() to give DeferredEncoder time for async work.
360 if self.should_retain(targets, &child_path, true) {
361 let child_rlp_node = self.commit_child(targets, child_path, child)?;
362 self.child_stack.push(ProofTrieBranchChild::RlpNode(child_rlp_node));
363 } else {
364 self.child_stack.push(child);
365 }
366
367 Ok(())
368 }
369
370 /// Creates a new leaf node on a branch, setting its `state_mask` bit and pushing the leaf onto
371 /// the `child_stack`.
372 ///
373 /// # Panics
374 ///
375 /// - If `branch_stack` is empty
376 /// - If the leaf's nibble is already set in the branch's `state_mask`.
377 fn push_new_leaf<'a>(
378 &mut self,
379 targets: &mut Option<TargetsCursor<'a>>,
380 leaf_nibble: u8,
381 leaf_short_key: Nibbles,
382 leaf_val: VE::DeferredEncoder,
383 ) -> Result<(), StateProofError> {
384 // Before pushing the new leaf onto the `child_stack` we need to commit the previous last
385 // child, so that only `child_stack`'s final child is a non-RlpNode.
386 self.commit_last_child(targets)?;
387
388 // Once the last child is committed we set the new child's bit on the top branch's
389 // `state_mask` and push that new child.
390 let branch = self.branch_stack.last_mut().expect("branch_stack cannot be empty");
391
392 debug_assert!(!branch.state_mask.is_bit_set(leaf_nibble));
393 branch.state_mask.set_bit(leaf_nibble);
394
395 self.child_stack
396 .push(ProofTrieBranchChild::Leaf { short_key: leaf_short_key, value: leaf_val });
397
398 Ok(())
399 }
400
401 /// Pushes a new branch onto the `branch_stack` based on the path and short key of the last
402 /// child on the `child_stack` and the path of the next child which will be pushed on to the
403 /// stack after this call.
404 ///
405 /// Returns the nibble of the branch's `state_mask` which should be set for the new child, and
406 /// short key that the next child should use.
407 fn push_new_branch(&mut self, new_child_path: Nibbles) -> (u8, Nibbles) {
408 // First determine the new child's shortkey relative to the current branch. If there is no
409 // current branch then the short key is the full path.
410 let new_child_short_key = if self.branch_stack.is_empty() {
411 new_child_path
412 } else {
413 // When there is a current branch then trim off its path as well as the nibble that it
414 // has set for this leaf.
415 trim_nibbles_prefix(&new_child_path, self.branch_path.len() + 1)
416 };
417
418 // Get the new branch's first child, which is the child on the top of the stack with which
419 // the new child shares the same nibble on the current branch.
420 let first_child = self
421 .child_stack
422 .last_mut()
423 .expect("push_new_branch can't be called with empty child_stack");
424
425 let first_child_short_key = first_child.short_key();
426 debug_assert!(
427 !first_child_short_key.is_empty(),
428 "push_new_branch called when top child on stack is not a leaf or extension with a short key",
429 );
430
431 // Determine how many nibbles are shared between the new branch's first child and the new
432 // child. This common prefix will be the extension of the new branch
433 let common_prefix_len = first_child_short_key.common_prefix_length(&new_child_short_key);
434
435 // Trim off the common prefix from the first child's short key, plus one nibble which will
436 // stored by the new branch itself in its state mask.
437 let first_child_nibble = first_child_short_key.get_unchecked(common_prefix_len);
438 first_child.trim_short_key_prefix(common_prefix_len + 1);
439
440 // Similarly, trim off the common prefix, plus one nibble for the new branch, from the new
441 // child's short key.
442 let new_child_nibble = new_child_short_key.get_unchecked(common_prefix_len);
443 let new_child_short_key = trim_nibbles_prefix(&new_child_short_key, common_prefix_len + 1);
444
445 // Update the branch path to reflect the new branch about to be pushed. Its path will be
446 // the path of the previous branch, plus the nibble shared by each child, plus the parent
447 // extension (denoted by a non-zero `ext_len`). Since the new branch's path is a prefix of
448 // the original new_child_path we can just slice that.
449 //
450 // If the new branch is the first branch then we do not add the extra 1, as there is no
451 // nibble in a parent branch to account for.
452 let branch_path_len =
453 self.branch_path.len() + common_prefix_len + self.maybe_parent_nibble();
454 self.branch_path = new_child_path.slice_unchecked(0, branch_path_len);
455
456 // Push the new branch onto the `branch_stack`. We do not yet set the `state_mask` bit of
457 // the new child; whatever actually pushes the child onto the `child_stack` is expected to
458 // do that.
459 self.branch_stack.push(ProofTrieBranch {
460 ext_len: common_prefix_len as u8,
461 state_mask: TrieMask::new(1 << first_child_nibble),
462 masks: None,
463 });
464
465 trace!(
466 target: TRACE_TARGET,
467 ?new_child_path,
468 ?common_prefix_len,
469 ?first_child_nibble,
470 branch_path = ?self.branch_path,
471 "Pushed new branch",
472 );
473
474 (new_child_nibble, new_child_short_key)
475 }
476
477 /// Pops the top branch off of the `branch_stack`, hashes its children on the `child_stack`, and
478 /// replaces those children on the `child_stack`. The `branch_path` field will be updated
479 /// accordingly.
480 ///
481 /// # Panics
482 ///
483 /// This method panics if `branch_stack` is empty.
484 fn pop_branch<'a>(
485 &mut self,
486 targets: &mut Option<TargetsCursor<'a>>,
487 ) -> Result<(), StateProofError> {
488 trace!(
489 target: TRACE_TARGET,
490 branch = ?self.branch_stack.last(),
491 branch_path = ?self.branch_path,
492 child_stack_len = ?self.child_stack.len(),
493 "pop_branch: called",
494 );
495
496 // Ensure the final child on the child stack has been committed, as this method expects all
497 // children of the branch to have been committed.
498 self.commit_last_child(targets)?;
499
500 let mut rlp_nodes_buf = self.take_rlp_nodes_buf();
501 let branch = self.branch_stack.pop().expect("branch_stack cannot be empty");
502
503 // Take the branch's children off the stack, using the state mask to determine how many
504 // there are.
505 let num_children = branch.state_mask.count_ones() as usize;
506 debug_assert!(num_children > 1, "A branch must have at least two children");
507 debug_assert!(
508 self.child_stack.len() >= num_children,
509 "Stack is missing necessary children ({num_children:?})"
510 );
511
512 // Collect children into RlpNode Vec. Children are in lexicographic order.
513 for child in self.child_stack.drain(self.child_stack.len() - num_children..) {
514 let child_rlp_node = match child {
515 ProofTrieBranchChild::RlpNode(rlp_node) => rlp_node,
516 uncommitted_child => {
517 // Convert uncommitted child (not retained for proof) to RlpNode now.
518 self.rlp_encode_buf.clear();
519 let (rlp_node, freed_buf) =
520 uncommitted_child.into_rlp(&mut self.rlp_encode_buf)?;
521 if let Some(buf) = freed_buf {
522 self.rlp_nodes_bufs.push(buf);
523 }
524 rlp_node
525 }
526 };
527 rlp_nodes_buf.push(child_rlp_node);
528 }
529
530 debug_assert_eq!(
531 rlp_nodes_buf.len(),
532 branch.state_mask.count_ones() as usize,
533 "children length must match number of bits set in state_mask"
534 );
535
536 // Calculate the short key of the parent extension (if the branch has a parent extension).
537 // It's important to calculate this short key prior to modifying the `branch_path`.
538 let short_key = trim_nibbles_prefix(
539 &self.branch_path,
540 self.branch_path.len() - branch.ext_len as usize,
541 );
542
543 // Wrap the `BranchNode` so it can be pushed onto the child stack.
544 let mut branch_as_child = ProofTrieBranchChild::Branch {
545 node: BranchNode::new(rlp_nodes_buf, branch.state_mask),
546 masks: branch.masks,
547 };
548
549 // If there is an extension then encode the branch as an `RlpNode` and use it to construct
550 // the extension in its place
551 if !short_key.is_empty() {
552 let branch_rlp_node = self.commit_child(targets, self.branch_path, branch_as_child)?;
553 branch_as_child = ProofTrieBranchChild::Extension { short_key, child: branch_rlp_node };
554 };
555
556 self.child_stack.push(branch_as_child);
557
558 // Update the branch_path. If this branch is the only branch then only its extension needs
559 // to be trimmed, otherwise we also need to remove its nibble from its parent.
560 let new_path_len =
561 self.branch_path.len() - branch.ext_len as usize - self.maybe_parent_nibble();
562
563 debug_assert!(self.branch_path.len() >= new_path_len);
564 self.branch_path = self.branch_path.slice_unchecked(0, new_path_len);
565
566 Ok(())
567 }
568
569 /// Adds a single leaf for a key to the stack, possibly collapsing an existing branch and/or
570 /// creating a new one depending on the path of the key.
571 fn push_leaf<'a>(
572 &mut self,
573 targets: &mut Option<TargetsCursor<'a>>,
574 key: Nibbles,
575 val: VE::DeferredEncoder,
576 ) -> Result<(), StateProofError> {
577 loop {
578 trace!(
579 target: TRACE_TARGET,
580 ?key,
581 branch_stack_len = ?self.branch_stack.len(),
582 branch_path = ?self.branch_path,
583 child_stack_len = ?self.child_stack.len(),
584 "push_leaf: loop",
585 );
586
587 // Get the `state_mask` of the branch currently being built. If there are no branches
588 // on the stack then it means either the trie is empty or only a single leaf has been
589 // added previously.
590 let curr_branch_state_mask = match self.branch_stack.last() {
591 Some(curr_branch) => curr_branch.state_mask,
592 None if self.child_stack.is_empty() => {
593 // If the child stack is empty then this is the first leaf, push it and be done
594 self.child_stack
595 .push(ProofTrieBranchChild::Leaf { short_key: key, value: val });
596 return Ok(())
597 }
598 None => {
599 // If the child stack is not empty then it must only have a single other child
600 // which is either a leaf or extension with a non-zero short key.
601 debug_assert_eq!(self.child_stack.len(), 1);
602 debug_assert!(!self
603 .child_stack
604 .last()
605 .expect("already checked for emptiness")
606 .short_key()
607 .is_empty());
608 let (nibble, short_key) = self.push_new_branch(key);
609 self.push_new_leaf(targets, nibble, short_key, val)?;
610 return Ok(())
611 }
612 };
613
614 // Find the common prefix length, which is the number of nibbles shared between the
615 // current branch and the key.
616 let common_prefix_len = self.branch_path.common_prefix_length(&key);
617
618 // If the current branch does not share all of its nibbles with the new key then it is
619 // not the parent of the new key. In this case the current branch will have no more
620 // children. We can pop it and loop back to the top to try again with its parent branch.
621 if common_prefix_len < self.branch_path.len() {
622 self.pop_branch(targets)?;
623 continue
624 }
625
626 // If the current branch is a prefix of the new key then the leaf is a child of the
627 // branch. If the branch doesn't have the leaf's nibble set then the leaf can be added
628 // directly, otherwise a new branch must be created in-between this branch and that
629 // existing child.
630 let nibble = key.get_unchecked(common_prefix_len);
631 if curr_branch_state_mask.is_bit_set(nibble) {
632 // Push a new branch which splits the short key of the existing child at this
633 // nibble.
634 let (nibble, short_key) = self.push_new_branch(key);
635 // Push the new leaf onto the new branch.
636 self.push_new_leaf(targets, nibble, short_key, val)?;
637 } else {
638 let short_key = key.slice_unchecked(common_prefix_len + 1, key.len());
639 self.push_new_leaf(targets, nibble, short_key, val)?;
640 }
641
642 return Ok(())
643 }
644 }
645
646 /// Given the lower and upper bounds (exclusive) of a range of keys, iterates over the
647 /// `hashed_cursor` and calculates all trie nodes possible based on those keys. If the upper
648 /// bound is None then it is considered unbounded.
649 ///
650 /// It is expected that this method is "driven" by `next_uncached_key_range`, which decides
651 /// which ranges of keys need to be calculated based on what cached trie data is available.
652 #[instrument(
653 target = TRACE_TARGET,
654 level = "trace",
655 skip_all,
656 fields(?lower_bound, ?upper_bound),
657 )]
658 fn calculate_key_range<'a>(
659 &mut self,
660 value_encoder: &mut VE,
661 targets: &mut Option<TargetsCursor<'a>>,
662 hashed_cursor_current: &mut Option<(Nibbles, VE::DeferredEncoder)>,
663 lower_bound: Nibbles,
664 upper_bound: Option<Nibbles>,
665 ) -> Result<(), StateProofError> {
666 // A helper closure for mapping entries returned from the `hashed_cursor`, converting the
667 // key to Nibbles and immediately creating the DeferredValueEncoder so that encoding of the
668 // leaf value can begin ASAP.
669 let mut map_hashed_cursor_entry = |(key_b256, val): (B256, _)| {
670 debug_assert_eq!(key_b256.len(), 32);
671 // SAFETY: key is a B256 and so is exactly 32-bytes.
672 let key = unsafe { Nibbles::unpack_unchecked(key_b256.as_slice()) };
673 let val = value_encoder.deferred_encoder(key_b256, val);
674 (key, val)
675 };
676
677 // If the cursor hasn't been used, or the last iterated key is prior to this range's
678 // key range, then seek forward to at least the first key.
679 if hashed_cursor_current.as_ref().is_none_or(|(key, _)| key < &lower_bound) {
680 trace!(
681 target: TRACE_TARGET,
682 current=?hashed_cursor_current.as_ref().map(|(k, _)| k),
683 "Seeking hashed cursor to meet lower bound",
684 );
685
686 let lower_key = B256::right_padding_from(&lower_bound.pack());
687 *hashed_cursor_current =
688 self.hashed_cursor.seek(lower_key)?.map(&mut map_hashed_cursor_entry);
689 }
690
691 // Loop over all keys in the range, calling `push_leaf` on each.
692 while let Some((key, _)) = hashed_cursor_current.as_ref() &&
693 upper_bound.is_none_or(|upper_bound| key < &upper_bound)
694 {
695 let (key, val) =
696 core::mem::take(hashed_cursor_current).expect("while-let checks for Some");
697 self.push_leaf(targets, key, val)?;
698 *hashed_cursor_current = self.hashed_cursor.next()?.map(&mut map_hashed_cursor_entry);
699 }
700
701 trace!(target: TRACE_TARGET, "No further keys within range");
702 Ok(())
703 }
704
705 /// Constructs and returns a new [`ProofTrieBranch`] based on an existing [`BranchNodeCompact`].
706 #[inline]
707 const fn new_from_cached_branch(
708 cached_branch: &BranchNodeCompact,
709 ext_len: u8,
710 ) -> ProofTrieBranch {
711 ProofTrieBranch {
712 ext_len,
713 state_mask: TrieMask::new(0),
714 masks: Some(BranchNodeMasks {
715 tree_mask: cached_branch.tree_mask,
716 hash_mask: cached_branch.hash_mask,
717 }),
718 }
719 }
720
721 /// Pushes a new branch onto the `branch_stack` which is based on a cached branch obtained via
722 /// the trie cursor.
723 ///
724 /// If there is already a child at the top branch of `branch_stack` occupying this new branch's
725 /// nibble then that child will have its short-key split with another new branch, and this
726 /// cached branch will be a child of that splitting branch.
727 fn push_cached_branch<'a>(
728 &mut self,
729 targets: &mut Option<TargetsCursor<'a>>,
730 cached_path: Nibbles,
731 cached_branch: &BranchNodeCompact,
732 ) -> Result<(), StateProofError> {
733 debug_assert!(
734 cached_path.starts_with(&self.branch_path),
735 "push_cached_branch called with path {cached_path:?} which is not a child of current branch {:?}",
736 self.branch_path,
737 );
738
739 let parent_branch = self.branch_stack.last();
740
741 // If both stacks are empty then there were no leaves before this cached branch, push it and
742 // be done; the extension of the branch will be its full path.
743 if self.child_stack.is_empty() && parent_branch.is_none() {
744 self.branch_path = cached_path;
745 self.branch_stack
746 .push(Self::new_from_cached_branch(cached_branch, cached_path.len() as u8));
747 return Ok(())
748 }
749
750 // Get the nibble which should be set in the parent branch's `state_mask` for this new
751 // branch.
752 let cached_branch_nibble = cached_path.get_unchecked(self.branch_path.len());
753
754 // We calculate the `ext_len` of the new branch, and potentially update its nibble if a new
755 // parent branch is inserted here, based on the state of the parent branch.
756 let (cached_branch_nibble, ext_len) = if parent_branch
757 .is_none_or(|parent_branch| parent_branch.state_mask.is_bit_set(cached_branch_nibble))
758 {
759 // If the `child_stack` is not empty but the `branch_stack` is then it implies that
760 // there must be a leaf or extension at the root of the trie whose short-key will get
761 // split by a new branch, which will become the parent of both that leaf/extension and
762 // this new branch.
763 //
764 // Similarly, if there is a branch on the `branch_stack` but its `state_mask` bit for
765 // this new branch is already set, then there must be a leaf/extension with a short-key
766 // to be split.
767 debug_assert!(!self
768 .child_stack
769 .last()
770 .expect("already checked for emptiness")
771 .short_key()
772 .is_empty());
773
774 // Split that leaf/extension's short key with a new branch.
775 let (nibble, short_key) = self.push_new_branch(cached_path);
776 (nibble, short_key.len())
777 } else {
778 // If there is a parent branch but its `state_mask` bit for this branch is not set
779 // then we can simply calculate the `ext_len` based on the difference of each, minus
780 // 1 to account for the nibble in the `state_mask`.
781 (cached_branch_nibble, cached_path.len() - self.branch_path.len() - 1)
782 };
783
784 // `commit_last_child` relies on the last set bit of the parent branch's `state_mask` to
785 // determine the path of the last child on the `child_stack`. Since we are about to
786 // change that mask we need to commit that last child first.
787 self.commit_last_child(targets)?;
788
789 // When pushing a new branch we need to set its child nibble in the `state_mask` of
790 // its parent, if there is one.
791 if let Some(parent_branch) = self.branch_stack.last_mut() {
792 parent_branch.state_mask.set_bit(cached_branch_nibble);
793 }
794
795 // Finally update the `branch_path` and push the new branch.
796 self.branch_path = cached_path;
797 self.branch_stack.push(Self::new_from_cached_branch(cached_branch, ext_len as u8));
798
799 trace!(
800 target: TRACE_TARGET,
801 branch=?self.branch_stack.last(),
802 branch_path=?self.branch_path,
803 "Pushed cached branch",
804 );
805
806 Ok(())
807 }
808
809 /// Attempts to pop off the top branch of the `cached_branch_stack`, returning
810 /// [`PopCachedBranchOutcome::Popped`] on success. Returns other variants to indicate that the
811 /// stack is empty and what to do about it.
812 ///
813 /// This method only returns [`PopCachedBranchOutcome::CalculateLeaves`] if there is a cached
814 /// branch on top of the stack.
815 #[inline]
816 fn try_pop_cached_branch(
817 &mut self,
818 trie_cursor_state: &mut TrieCursorState,
819 sub_trie_prefix: &Nibbles,
820 uncalculated_lower_bound: &Option<Nibbles>,
821 ) -> Result<PopCachedBranchOutcome, StateProofError> {
822 // If there is a branch on top of the stack we use that.
823 if let Some(cached) = self.cached_branch_stack.pop() {
824 return Ok(PopCachedBranchOutcome::Popped(cached));
825 }
826
827 // There is no cached branch on the stack. It's possible that another one exists
828 // farther on in the trie, but we perform some checks first to prevent unnecessary
829 // attempts to find it.
830
831 // If the `uncalculated_lower_bound` is None it indicates that there can be no more
832 // leaf data, so similarly there can be no more branches.
833 let Some(uncalculated_lower_bound) = uncalculated_lower_bound else {
834 return Ok(PopCachedBranchOutcome::Exhausted)
835 };
836
837 // If [`TrieCursorState::path`] returns None it means that the cursor has been
838 // exhausted, so there can be no more cached data.
839 let Some(mut trie_cursor_path) = trie_cursor_state.path() else {
840 return Ok(PopCachedBranchOutcome::Exhausted)
841 };
842
843 // If the trie cursor is seeked to a branch whose leaves have already been processed
844 // then we can't use it, instead we seek forward and try again.
845 if trie_cursor_path < uncalculated_lower_bound {
846 *trie_cursor_state =
847 TrieCursorState::seeked(self.trie_cursor.seek(*uncalculated_lower_bound)?);
848
849 // Having just seeked forward we need to check if the cursor is now exhausted,
850 // extracting the new path at the same time.
851 if let Some(new_trie_cursor_path) = trie_cursor_state.path() {
852 trie_cursor_path = new_trie_cursor_path
853 } else {
854 return Ok(PopCachedBranchOutcome::Exhausted)
855 };
856 }
857
858 // If the trie cursor has exceeded the sub-trie then we consider it to be exhausted.
859 if !trie_cursor_path.starts_with(sub_trie_prefix) {
860 return Ok(PopCachedBranchOutcome::Exhausted)
861 }
862
863 // At this point we can be sure that the cursor is in an `Available` state. We know for
864 // sure it's not `Exhausted` because of the calls to `path` above, and we know it's not
865 // `Taken` because we push all taken branches onto the `cached_branch_stack`, and the
866 // stack is empty.
867 //
868 // We will use this `Available` cached branch as our next branch.
869 let cached = trie_cursor_state.take();
870 trace!(target: TRACE_TARGET, cached=?cached, "Pushed next trie node onto cached_branch_stack");
871
872 // If the calculated range is not caught up to the next cached branch it means there
873 // are portions of the trie prior to that branch which may need to be calculated;
874 // return the uncalculated range up to that branch to make that happen.
875 //
876 // If the next cached branch's path is all zeros then we can skip this catch-up step,
877 // because there cannot be any keys prior to that range.
878 let cached_path = &cached.0;
879 if uncalculated_lower_bound < cached_path && !PATH_ALL_ZEROS.starts_with(cached_path) {
880 let range = (*uncalculated_lower_bound, Some(*cached_path));
881 trace!(target: TRACE_TARGET, ?range, "Returning key range to calculate in order to catch up to cached branch");
882
883 // Push the cached branch onto the stack so it's available once the leaf range is done
884 // being calculated.
885 self.cached_branch_stack.push(cached);
886
887 return Ok(PopCachedBranchOutcome::CalculateLeaves(range));
888 }
889
890 Ok(PopCachedBranchOutcome::Popped(cached))
891 }
892
893 /// Accepts the current state of both hashed and trie cursors, and determines the next range of
894 /// hashed keys which need to be processed using [`Self::push_leaf`].
895 ///
896 /// This method will use cached branch node data from the trie cursor to skip over all possible
897 /// ranges of keys, to reduce computation as much as possible.
898 ///
899 /// # Returns
900 ///
901 /// - `None`: No more data to process, finish computation
902 ///
903 /// - `Some(lower, None)`: Indicates to call `push_leaf` on all keys starting at `lower`, with
904 /// no upper bound. This method won't be called again after this.
905 ///
906 /// - `Some(lower, Some(upper))`: Indicates to call `push_leaf` on all keys starting at `lower`,
907 /// up to but excluding `upper`, and then call this method once done.
908 #[instrument(target = TRACE_TARGET, level = "trace", skip_all)]
909 fn next_uncached_key_range<'a>(
910 &mut self,
911 targets: &mut Option<TargetsCursor<'a>>,
912 trie_cursor_state: &mut TrieCursorState,
913 sub_trie_prefix: &Nibbles,
914 sub_trie_upper_bound: Option<&Nibbles>,
915 mut uncalculated_lower_bound: Option<Nibbles>,
916 ) -> Result<Option<(Nibbles, Option<Nibbles>)>, StateProofError> {
917 // Pop any under-construction branches that are now complete.
918 // All trie data prior to the current cached branch, if any, has been computed. Any branches
919 // which were under-construction previously, and which are not on the same path as this
920 // cached branch, can be assumed to be completed; they will not have any further keys added.
921 // to them.
922 if let Some(cached_path) = self.cached_branch_stack.last().map(|kv| kv.0) {
923 while !cached_path.starts_with(&self.branch_path) {
924 self.pop_branch(targets)?;
925 }
926 }
927
928 loop {
929 // Pop the currently cached branch node.
930 //
931 // NOTE we pop off the `cached_branch_stack` because cloning the `BranchNodeCompact`
932 // means cloning an Arc, which incurs synchronization overhead. We have to be sure to
933 // push the cached branch back onto the stack once done.
934 let (cached_path, cached_branch) = match self.try_pop_cached_branch(
935 trie_cursor_state,
936 sub_trie_prefix,
937 &uncalculated_lower_bound,
938 )? {
939 PopCachedBranchOutcome::Popped(cached) => cached,
940 PopCachedBranchOutcome::Exhausted => {
941 // If cached branches are exhausted it's possible that there is still an
942 // unbounded range of leaves to be processed. `uncalculated_lower_bound` is
943 // used to return that range.
944 trace!(target: TRACE_TARGET, ?uncalculated_lower_bound, "Exhausted cached trie nodes");
945 return Ok(uncalculated_lower_bound
946 .map(|lower| (lower, sub_trie_upper_bound.copied())));
947 }
948 PopCachedBranchOutcome::CalculateLeaves(range) => {
949 return Ok(Some(range));
950 }
951 };
952
953 trace!(
954 target: TRACE_TARGET,
955 branch_path = ?self.branch_path,
956 branch_state_mask = ?self.branch_stack.last().map(|b| b.state_mask),
957 ?cached_path,
958 cached_branch_state_mask = ?cached_branch.state_mask,
959 cached_branch_hash_mask = ?cached_branch.hash_mask,
960 "loop",
961 );
962
963 // Since we've popped all branches which don't start with cached_path, branch_path at
964 // this point must be equal to or shorter than cached_path.
965 debug_assert!(
966 self.branch_path.len() < cached_path.len() || self.branch_path == cached_path,
967 "branch_path {:?} is different-or-longer-than cached_path {cached_path:?}",
968 self.branch_path
969 );
970
971 // If the branch_path != cached_path it means the branch_stack is either empty, or the
972 // top branch is the parent of this cached branch. Either way we push a branch
973 // corresponding to the cached one onto the stack, so we can begin constructing it.
974 if self.branch_path != cached_path {
975 self.push_cached_branch(targets, cached_path, &cached_branch)?;
976 }
977
978 // At this point the top of the branch stack is the same branch which was found in the
979 // cache.
980 let curr_branch =
981 self.branch_stack.last().expect("top of branch_stack corresponds to cached branch");
982
983 let cached_state_mask = cached_branch.state_mask.get();
984 let curr_state_mask = curr_branch.state_mask.get();
985
986 // Determine all child nibbles which are set in the cached branch but not the
987 // under-construction branch.
988 let next_child_nibbles = curr_state_mask ^ cached_state_mask;
989 debug_assert_eq!(
990 cached_state_mask | next_child_nibbles, cached_state_mask,
991 "curr_branch has state_mask bits set which aren't set on cached_branch. curr_branch:{:?}",
992 curr_state_mask,
993 );
994
995 // If there are no further children to construct for this branch then pop it off both
996 // stacks and loop using the parent branch.
997 if next_child_nibbles == 0 {
998 trace!(
999 target: TRACE_TARGET,
1000 path=?cached_path,
1001 ?curr_branch,
1002 ?cached_branch,
1003 "No further children, popping branch",
1004 );
1005 self.pop_branch(targets)?;
1006
1007 // no need to pop from `cached_branch_stack`, the current cached branch is already
1008 // popped (see note at the top of the loop).
1009
1010 // The just-popped branch is completely processed; we know there can be no more keys
1011 // with that prefix. Set the lower bound which can be returned from this method to
1012 // be the next possible prefix, if any.
1013 uncalculated_lower_bound = increment_and_strip_trailing_zeros(&cached_path);
1014
1015 continue
1016 }
1017
1018 // Determine the next nibble of the branch which has not yet been constructed, and
1019 // determine the child's full path.
1020 let child_nibble = next_child_nibbles.trailing_zeros() as u8;
1021 let child_path = self.child_path_at(child_nibble);
1022
1023 // If the `hash_mask` bit is set for the next child it means the child's hash is cached
1024 // in the `cached_branch`. We can use that instead of re-calculating the hash of the
1025 // entire sub-trie.
1026 //
1027 // If the child needs to be retained for a proof then we should not use the cached
1028 // hash, and instead continue on to calculate its node manually.
1029 if cached_branch.hash_mask.is_bit_set(child_nibble) {
1030 // Commit the last child. We do this here for two reasons:
1031 // - `commit_last_child` will check if the last child needs to be retained. We need
1032 // to check that before the subsequent `should_retain` call here to prevent
1033 // `targets` from being moved beyond the last child before it is checked.
1034 // - If we do end up using the cached hash value, then we will need to commit the
1035 // last child before pushing a new one onto the stack anyway.
1036 self.commit_last_child(targets)?;
1037
1038 if !self.should_retain(targets, &child_path, false) {
1039 // Pull this child's hash out of the cached branch node. To get the hash's index
1040 // we first need to calculate the mask of which cached hashes have already been
1041 // used by this branch (if any). The number of set bits in that mask will be the
1042 // index of the next hash in the array to use.
1043 let curr_hashed_used_mask = cached_branch.hash_mask.get() & curr_state_mask;
1044 let hash_idx = curr_hashed_used_mask.count_ones() as usize;
1045 let hash = cached_branch.hashes[hash_idx];
1046
1047 trace!(
1048 target: TRACE_TARGET,
1049 ?child_path,
1050 ?hash_idx,
1051 ?hash,
1052 "Using cached hash for child",
1053 );
1054
1055 self.child_stack.push(ProofTrieBranchChild::RlpNode(RlpNode::word_rlp(&hash)));
1056 self.branch_stack
1057 .last_mut()
1058 .expect("already asserted there is a last branch")
1059 .state_mask
1060 .set_bit(child_nibble);
1061
1062 // Update the `uncalculated_lower_bound` to indicate that the child whose bit
1063 // was just set is completely processed.
1064 uncalculated_lower_bound = increment_and_strip_trailing_zeros(&child_path);
1065
1066 // Push the current cached branch back onto the stack before looping.
1067 self.cached_branch_stack.push((cached_path, cached_branch));
1068
1069 continue
1070 }
1071 }
1072
1073 // We now want to check if there is a cached branch node at this child. The cached
1074 // branch node may be the node at this child directly, or this child may be an
1075 // extension and the cached branch is the child of that extension.
1076
1077 // All trie nodes prior to `child_path` will not be modified further, so we can seek the
1078 // trie cursor to the next cached node at-or-after `child_path`.
1079 if trie_cursor_state.path().is_some_and(|path| path < &child_path) {
1080 trace!(target: TRACE_TARGET, ?child_path, "Seeking trie cursor to child path");
1081 *trie_cursor_state = TrieCursorState::seeked(self.trie_cursor.seek(child_path)?);
1082 }
1083
1084 // If the next cached branch node is a child of `child_path` then we can assume it is
1085 // the cached branch for this child. We push it onto the `cached_branch_stack` and loop
1086 // back to the top.
1087 if let TrieCursorState::Available(next_cached_path, next_cached_branch) =
1088 &trie_cursor_state &&
1089 next_cached_path.starts_with(&child_path)
1090 {
1091 // Push the current cached branch back on before pushing its child and then looping
1092 self.cached_branch_stack.push((cached_path, cached_branch));
1093
1094 trace!(
1095 target: TRACE_TARGET,
1096 ?child_path,
1097 ?next_cached_path,
1098 ?next_cached_branch,
1099 "Pushing cached branch for child",
1100 );
1101 self.cached_branch_stack.push(trie_cursor_state.take());
1102 continue;
1103 }
1104
1105 // There is no cached data for the sub-trie at this child, we must recalculate the
1106 // sub-trie root (this child) using the leaves. Return the range of keys based on the
1107 // child path.
1108 let child_path_upper = increment_and_strip_trailing_zeros(&child_path);
1109 trace!(
1110 target: TRACE_TARGET,
1111 lower=?child_path,
1112 upper=?child_path_upper,
1113 "Returning sub-trie's key range to calculate",
1114 );
1115
1116 // Push the current cached branch back onto the stack before returning.
1117 self.cached_branch_stack.push((cached_path, cached_branch));
1118
1119 return Ok(Some((child_path, child_path_upper)));
1120 }
1121 }
1122
1123 /// Calculates trie nodes and retains proofs for targeted nodes within a sub-trie. The
1124 /// sub-trie's bounds are denoted by the `lower_bound` and `upper_bound` arguments,
1125 /// `upper_bound` is exclusive, None indicates unbounded.
1126 #[instrument(
1127 target = TRACE_TARGET,
1128 level = "trace",
1129 skip_all,
1130 fields(prefix=?sub_trie_targets.prefix),
1131 )]
1132 fn proof_subtrie<'a>(
1133 &mut self,
1134 value_encoder: &mut VE,
1135 trie_cursor_state: &mut TrieCursorState,
1136 hashed_cursor_current: &mut Option<(Nibbles, VE::DeferredEncoder)>,
1137 sub_trie_targets: SubTrieTargets<'a>,
1138 ) -> Result<(), StateProofError> {
1139 let sub_trie_upper_bound = sub_trie_targets.upper_bound();
1140
1141 // Wrap targets into a `TargetsCursor`. targets can be empty if we only want to calculate
1142 // the root, in which case we don't need a cursor.
1143 let mut targets = if sub_trie_targets.targets.is_empty() {
1144 None
1145 } else {
1146 Some(TargetsCursor::new(sub_trie_targets.targets))
1147 };
1148
1149 // Ensure initial state is cleared. By the end of the method call these should be empty once
1150 // again.
1151 debug_assert!(self.cached_branch_stack.is_empty());
1152 debug_assert!(self.branch_stack.is_empty());
1153 debug_assert!(self.branch_path.is_empty());
1154 debug_assert!(self.child_stack.is_empty());
1155
1156 // `next_uncached_key_range`, which will be called in the loop below, expects the trie
1157 // cursor to have already been seeked. If it's not yet seeked, or seeked to a prior node,
1158 // then we seek it to the prefix (the first possible node) to initialize it.
1159 if trie_cursor_state.before(&sub_trie_targets.prefix) {
1160 trace!(target: TRACE_TARGET, "Doing initial seek of trie cursor");
1161 *trie_cursor_state =
1162 TrieCursorState::seeked(self.trie_cursor.seek(sub_trie_targets.prefix)?);
1163 }
1164
1165 // `uncalculated_lower_bound` tracks the lower bound of node paths which have yet to be
1166 // visited, either via the hashed key cursor (`calculate_key_range`) or trie cursor
1167 // (`next_uncached_key_range`). If/when this becomes None then there are no further nodes
1168 // which could exist.
1169 let mut uncalculated_lower_bound = Some(sub_trie_targets.prefix);
1170
1171 trace!(target: TRACE_TARGET, "Starting loop");
1172 loop {
1173 // Save the previous lower bound to detect forward progress.
1174 let prev_uncalculated_lower_bound = uncalculated_lower_bound;
1175
1176 // Determine the range of keys of the overall trie which need to be re-computed.
1177 let Some((calc_lower_bound, calc_upper_bound)) = self.next_uncached_key_range(
1178 &mut targets,
1179 trie_cursor_state,
1180 &sub_trie_targets.prefix,
1181 sub_trie_upper_bound.as_ref(),
1182 prev_uncalculated_lower_bound,
1183 )?
1184 else {
1185 // If `next_uncached_key_range` determines that there can be no more keys then
1186 // complete the computation.
1187 break;
1188 };
1189
1190 // Forward-progress guard: detect trie inconsistencies that would cause infinite loops.
1191 // If `next_uncached_key_range` returns a range that starts before the previous
1192 // lower bound, we've gone backwards and would loop forever.
1193 //
1194 // This can specifically happen when there is a cached branch which shouldn't exist, or
1195 // if state mask bit is set on a cached branch which shouldn't be.
1196 if let Some(prev_lower) = prev_uncalculated_lower_bound.as_ref() &&
1197 calc_lower_bound < *prev_lower
1198 {
1199 let msg = format!(
1200 "next_uncached_key_range went backwards: calc_lower={calc_lower_bound:?} < \
1201 prev_lower={prev_lower:?}, calc_upper={calc_upper_bound:?}, prefix={:?}",
1202 sub_trie_targets.prefix,
1203 );
1204 error!(target: TRACE_TARGET, "{msg}");
1205 return Err(StateProofError::TrieInconsistency(msg));
1206 }
1207
1208 // Calculate the trie for that range of keys
1209 self.calculate_key_range(
1210 value_encoder,
1211 &mut targets,
1212 hashed_cursor_current,
1213 calc_lower_bound,
1214 calc_upper_bound,
1215 )?;
1216
1217 // Once outside `calculate_key_range`, `hashed_cursor_current` will be at the first key
1218 // after the range.
1219 //
1220 // If the `hashed_cursor_current` is None (exhausted), or not within the range of the
1221 // sub-trie, then there are no more keys at all, meaning the trie couldn't possibly have
1222 // more data and we should complete computation.
1223 if hashed_cursor_current
1224 .as_ref()
1225 .is_none_or(|(key, _)| !key.starts_with(&sub_trie_targets.prefix))
1226 {
1227 break;
1228 }
1229
1230 // The upper bound of previous calculation becomes the lower bound of the uncalculated
1231 // range, for which we'll once again check for cached data.
1232 uncalculated_lower_bound = calc_upper_bound;
1233 }
1234
1235 // Once there's no more leaves we can pop the remaining branches, if any.
1236 trace!(target: TRACE_TARGET, "Exited loop, popping remaining branches");
1237 while !self.branch_stack.is_empty() {
1238 self.pop_branch(&mut targets)?;
1239 }
1240
1241 // At this point the branch stack should be empty. If the child stack is empty it means no
1242 // keys were ever iterated from the hashed cursor in the first place. Otherwise there should
1243 // only be a single node left: the root node.
1244 debug_assert!(self.branch_stack.is_empty());
1245 debug_assert!(self.branch_path.is_empty());
1246 debug_assert!(self.child_stack.len() < 2);
1247
1248 // The `cached_branch_stack` may still have cached branches on it, as it's not affected by
1249 // `pop_branch`, but it is no longer needed and should be cleared.
1250 self.cached_branch_stack.clear();
1251
1252 // We always pop the root node off of the `child_stack` in order to empty it, however we
1253 // might not want to retain the node unless the `SubTrieTargets` indicates it.
1254 trace!(
1255 target: TRACE_TARGET,
1256 retain_root = ?sub_trie_targets.retain_root,
1257 child_stack_empty = self.child_stack.is_empty(),
1258 "Maybe retaining root",
1259 );
1260 match (sub_trie_targets.retain_root, self.child_stack.pop()) {
1261 (false, _) => {
1262 // Whether the root node is exists or not, we don't want it.
1263 }
1264 (true, None) => {
1265 // If `child_stack` is empty it means there was no keys at all, retain an empty
1266 // root node.
1267 self.retained_proofs.push(ProofTrieNode {
1268 path: Nibbles::new(), // root path
1269 node: TrieNode::EmptyRoot,
1270 masks: None,
1271 });
1272 }
1273 (true, Some(root_node)) => {
1274 // Encode and retain the root node.
1275 self.rlp_encode_buf.clear();
1276 let root_node =
1277 root_node.into_proof_trie_node(Nibbles::new(), &mut self.rlp_encode_buf)?;
1278 self.retained_proofs.push(root_node);
1279 }
1280 }
1281
1282 Ok(())
1283 }
1284
1285 /// Internal implementation of proof calculation. Assumes both cursors have already been reset.
1286 /// See docs on [`Self::proof`] for expected behavior.
1287 fn proof_inner(
1288 &mut self,
1289 value_encoder: &mut VE,
1290 targets: &mut [Target],
1291 ) -> Result<Vec<ProofTrieNode>, StateProofError> {
1292 // If there are no targets then nothing could be returned, return early.
1293 if targets.is_empty() {
1294 trace!(target: TRACE_TARGET, "Empty targets, returning");
1295 return Ok(Vec::new())
1296 }
1297
1298 // Initialize the variables which track the state of the two cursors. Both indicate the
1299 // cursors are unseeked.
1300 let mut trie_cursor_state = TrieCursorState::unseeked();
1301 let mut hashed_cursor_current: Option<(Nibbles, VE::DeferredEncoder)> = None;
1302
1303 // Divide targets into chunks, each chunk corresponding to a different sub-trie within the
1304 // overall trie, and handle all proofs within that sub-trie.
1305 for sub_trie_targets in iter_sub_trie_targets(targets) {
1306 self.proof_subtrie(
1307 value_encoder,
1308 &mut trie_cursor_state,
1309 &mut hashed_cursor_current,
1310 sub_trie_targets,
1311 )?;
1312 }
1313
1314 trace!(
1315 target: TRACE_TARGET,
1316 retained_proofs_len = ?self.retained_proofs.len(),
1317 "proof_inner: returning",
1318 );
1319 Ok(core::mem::take(&mut self.retained_proofs))
1320 }
1321
1322 /// Generate a proof for the given targets.
1323 ///
1324 /// Given a set of [`Target`]s, returns nodes whose paths are a prefix of any target. The
1325 /// returned nodes will be sorted depth-first by path.
1326 ///
1327 /// # Panics
1328 ///
1329 /// In debug builds, panics if the targets are not sorted lexicographically.
1330 #[instrument(target = TRACE_TARGET, level = "trace", skip_all)]
1331 pub fn proof(
1332 &mut self,
1333 value_encoder: &mut VE,
1334 targets: &mut [Target],
1335 ) -> Result<Vec<ProofTrieNode>, StateProofError> {
1336 self.trie_cursor.reset();
1337 self.hashed_cursor.reset();
1338 self.proof_inner(value_encoder, targets)
1339 }
1340
1341 /// Computes the root hash from a set of proof nodes.
1342 ///
1343 /// Returns `None` if there is no root node (partial proof), otherwise returns the hash of the
1344 /// root node.
1345 ///
1346 /// This method reuses the internal RLP encode buffer for efficiency.
1347 pub fn compute_root_hash(
1348 &mut self,
1349 proof_nodes: &[ProofTrieNode],
1350 ) -> Result<Option<B256>, StateProofError> {
1351 // Find the root node (node at empty path)
1352 let root_node = proof_nodes.iter().find(|node| node.path.is_empty());
1353
1354 let Some(root) = root_node else {
1355 return Ok(None);
1356 };
1357
1358 // Compute the hash of the root node
1359 self.rlp_encode_buf.clear();
1360 root.node.encode(&mut self.rlp_encode_buf);
1361 let root_hash = keccak256(&self.rlp_encode_buf);
1362
1363 Ok(Some(root_hash))
1364 }
1365
1366 /// Calculates the root node of the trie.
1367 ///
1368 /// This method does not accept targets nor retain proofs. Returns the root node which can
1369 /// be used to compute the root hash via [`Self::compute_root_hash`].
1370 #[instrument(target = TRACE_TARGET, level = "trace", skip(self, value_encoder))]
1371 pub fn root_node(&mut self, value_encoder: &mut VE) -> Result<ProofTrieNode, StateProofError> {
1372 // Initialize the variables which track the state of the two cursors. Both indicate the
1373 // cursors are unseeked.
1374 let mut trie_cursor_state = TrieCursorState::unseeked();
1375 let mut hashed_cursor_current: Option<(Nibbles, VE::DeferredEncoder)> = None;
1376
1377 static EMPTY_TARGETS: [Target; 0] = [];
1378 let sub_trie_targets =
1379 SubTrieTargets { prefix: Nibbles::new(), targets: &EMPTY_TARGETS, retain_root: true };
1380
1381 self.proof_subtrie(
1382 value_encoder,
1383 &mut trie_cursor_state,
1384 &mut hashed_cursor_current,
1385 sub_trie_targets,
1386 )?;
1387
1388 // proof_subtrie will retain the root node if retain_proof is true, regardless of if there
1389 // are any targets.
1390 let mut proofs = core::mem::take(&mut self.retained_proofs);
1391 trace!(
1392 target: TRACE_TARGET,
1393 proofs_len = ?proofs.len(),
1394 "root_node: extracting root",
1395 );
1396
1397 // The root node is at the empty path - it must exist since retain_root is true. Otherwise
1398 // targets was empty, so there should be no other retained proofs.
1399 debug_assert_eq!(
1400 proofs.len(), 1,
1401 "prefix is empty, retain_root is true, and targets is empty, so there must be only the root node"
1402 );
1403
1404 // Find and remove the root node (node at empty path)
1405 let root_node = proofs.pop().expect("prefix is empty, retain_root is true, and targets is empty, so there must be only the root node");
1406
1407 Ok(root_node)
1408 }
1409}
1410
1411/// A proof calculator for storage tries.
1412pub type StorageProofCalculator<TC, HC> = ProofCalculator<TC, HC, StorageValueEncoder>;
1413
1414impl<TC, HC> StorageProofCalculator<TC, HC>
1415where
1416 TC: TrieStorageCursor,
1417 HC: HashedStorageCursor<Value = U256>,
1418{
1419 /// Create a new [`StorageProofCalculator`] instance.
1420 pub fn new_storage(trie_cursor: TC, hashed_cursor: HC) -> Self {
1421 Self::new(trie_cursor, hashed_cursor)
1422 }
1423
1424 /// Generate a proof for a storage trie at the given hashed address.
1425 ///
1426 /// Given a set of [`Target`]s, returns nodes whose paths are a prefix of any target. The
1427 /// returned nodes will be sorted depth-first by path.
1428 ///
1429 /// # Panics
1430 ///
1431 /// In debug builds, panics if the targets are not sorted lexicographically.
1432 #[instrument(target = TRACE_TARGET, level = "trace", skip(self, targets))]
1433 pub fn storage_proof(
1434 &mut self,
1435 hashed_address: B256,
1436 targets: &mut [Target],
1437 ) -> Result<Vec<ProofTrieNode>, StateProofError> {
1438 self.hashed_cursor.set_hashed_address(hashed_address);
1439
1440 // Shortcut: check if storage is empty
1441 if self.hashed_cursor.is_storage_empty()? {
1442 // Return a single EmptyRoot node at the root path
1443 return Ok(vec![ProofTrieNode {
1444 path: Nibbles::default(),
1445 node: TrieNode::EmptyRoot,
1446 masks: None,
1447 }])
1448 }
1449
1450 // Don't call `set_hashed_address` on the trie cursor until after the previous shortcut has
1451 // been checked.
1452 self.trie_cursor.set_hashed_address(hashed_address);
1453
1454 // Create a mutable storage value encoder
1455 let mut storage_value_encoder = StorageValueEncoder;
1456 self.proof_inner(&mut storage_value_encoder, targets)
1457 }
1458
1459 /// Calculates the root node of a storage trie.
1460 ///
1461 /// This method does not accept targets nor retain proofs. Returns the root node which can
1462 /// be used to compute the root hash via [`Self::compute_root_hash`].
1463 #[instrument(target = TRACE_TARGET, level = "trace", skip(self))]
1464 pub fn storage_root_node(
1465 &mut self,
1466 hashed_address: B256,
1467 ) -> Result<ProofTrieNode, StateProofError> {
1468 self.hashed_cursor.set_hashed_address(hashed_address);
1469
1470 if self.hashed_cursor.is_storage_empty()? {
1471 return Ok(ProofTrieNode {
1472 path: Nibbles::default(),
1473 node: TrieNode::EmptyRoot,
1474 masks: None,
1475 })
1476 }
1477
1478 // Don't call `set_hashed_address` on the trie cursor until after the previous shortcut has
1479 // been checked.
1480 self.trie_cursor.set_hashed_address(hashed_address);
1481
1482 // Create a mutable storage value encoder
1483 let mut storage_value_encoder = StorageValueEncoder;
1484 self.root_node(&mut storage_value_encoder)
1485 }
1486}
1487
1488/// Helper type wrapping a slice of [`Target`]s, primarily used to iterate through targets in
1489/// [`ProofCalculator::should_retain`].
1490///
1491/// It is assumed that the underlying slice is never empty, and that the iterator is never
1492/// exhausted.
1493struct TargetsCursor<'a> {
1494 targets: &'a [Target],
1495 i: usize,
1496}
1497
1498impl<'a> TargetsCursor<'a> {
1499 /// Wraps a slice of [`Target`]s with the `TargetsCursor`.
1500 ///
1501 /// # Panics
1502 ///
1503 /// Will panic in debug mode if called with an empty slice.
1504 fn new(targets: &'a [Target]) -> Self {
1505 debug_assert!(!targets.is_empty());
1506 Self { targets, i: 0 }
1507 }
1508
1509 /// Returns the current and next [`Target`] that the cursor is pointed at.
1510 fn current(&self) -> (&'a Target, Option<&'a Target>) {
1511 (&self.targets[self.i], self.targets.get(self.i + 1))
1512 }
1513
1514 /// Iterates the cursor forward.
1515 ///
1516 /// # Panics
1517 ///
1518 /// Will panic if the cursor is exhausted.
1519 fn next(&mut self) -> (&'a Target, Option<&'a Target>) {
1520 self.i += 1;
1521 debug_assert!(self.i < self.targets.len());
1522 self.current()
1523 }
1524
1525 // Iterate forwards over the slice, starting from the [`Target`] after the current.
1526 fn skip_iter(&self) -> impl Iterator<Item = &'a Target> {
1527 self.targets[self.i + 1..].iter()
1528 }
1529
1530 /// Iterated backwards over the slice, starting from the [`Target`] previous to the current.
1531 fn rev_iter(&self) -> impl Iterator<Item = &'a Target> {
1532 self.targets[..self.i].iter().rev()
1533 }
1534}
1535
1536/// Used to track the state of the trie cursor, allowing us to differentiate between a branch having
1537/// been taken (used as a cached branch) and the cursor having been exhausted.
1538#[derive(Debug)]
1539enum TrieCursorState {
1540 /// The initial state of the cursor, indicating it's never been seeked.
1541 Unseeked,
1542 /// Cursor is seeked to this path and the node has not been used yet.
1543 Available(Nibbles, BranchNodeCompact),
1544 /// Cursor is seeked to this path, but the node has been used.
1545 Taken(Nibbles),
1546 /// Cursor has been exhausted.
1547 Exhausted,
1548}
1549
1550impl TrieCursorState {
1551 /// Creates a [`Self::Unseeked`] based on an entry returned from the cursor itself.
1552 const fn unseeked() -> Self {
1553 Self::Unseeked
1554 }
1555
1556 /// Creates a [`Self`] based on an entry returned from the cursor itself.
1557 fn seeked(entry: Option<(Nibbles, BranchNodeCompact)>) -> Self {
1558 entry.map_or(Self::Exhausted, |(path, node)| Self::Available(path, node))
1559 }
1560
1561 /// Returns the path the cursor is seeked to, or None if it's exhausted.
1562 ///
1563 /// # Panics
1564 ///
1565 /// Panics if the cursor is unseeked.
1566 const fn path(&self) -> Option<&Nibbles> {
1567 match self {
1568 Self::Unseeked => panic!("cursor is unseeked"),
1569 Self::Available(path, _) | Self::Taken(path) => Some(path),
1570 Self::Exhausted => None,
1571 }
1572 }
1573
1574 /// Returns true if the cursor is unseeked, or is seeked to a node prior to the given one.
1575 fn before(&self, path: &Nibbles) -> bool {
1576 match self {
1577 Self::Unseeked => true,
1578 Self::Available(seeked_to, _) | Self::Taken(seeked_to) => path < seeked_to,
1579 Self::Exhausted => false,
1580 }
1581 }
1582
1583 /// Takes the path and node from a [`Self::Available`]. Panics if not [`Self::Available`].
1584 fn take(&mut self) -> (Nibbles, BranchNodeCompact) {
1585 let Self::Available(path, _) = self else {
1586 panic!("take called on non-Available: {self:?}")
1587 };
1588
1589 let path = *path;
1590 let Self::Available(path, node) = core::mem::replace(self, Self::Taken(path)) else {
1591 unreachable!("already checked that self is Self::Available");
1592 };
1593
1594 (path, node)
1595 }
1596}
1597
1598/// Describes the state of the currently cached branch node (if any).
1599enum PopCachedBranchOutcome {
1600 /// Cached branch has been popped from the `cached_branch_stack` and is ready to be used.
1601 Popped((Nibbles, BranchNodeCompact)),
1602 /// All cached branches have been exhausted.
1603 Exhausted,
1604 /// Need to calculate leaves from this range (exclusive upper) before the cached branch
1605 /// (catch-up range). If None then
1606 CalculateLeaves((Nibbles, Option<Nibbles>)),
1607}
1608
1609/// Increments the nibbles and strips any trailing zeros.
1610///
1611/// This function wraps `Nibbles::increment` and when it returns a value with trailing zeros,
1612/// it strips those zeros using bit manipulation on the underlying U256.
1613fn increment_and_strip_trailing_zeros(nibbles: &Nibbles) -> Option<Nibbles> {
1614 let mut result = nibbles.increment()?;
1615
1616 // If result is empty, just return it
1617 if result.is_empty() {
1618 return Some(result);
1619 }
1620
1621 // Get access to the underlying U256 to detect trailing zeros
1622 let uint_val = *result.as_mut_uint_unchecked();
1623 let non_zero_prefix_len = 64 - (uint_val.trailing_zeros() / 4);
1624 result.truncate(non_zero_prefix_len);
1625
1626 Some(result)
1627}
1628
1629#[cfg(test)]
1630mod tests {
1631 use super::*;
1632 use crate::{
1633 hashed_cursor::{
1634 mock::MockHashedCursorFactory, HashedCursorFactory, HashedCursorMetricsCache,
1635 InstrumentedHashedCursor,
1636 },
1637 proof::Proof,
1638 trie_cursor::{
1639 depth_first, mock::MockTrieCursorFactory, InstrumentedTrieCursor, TrieCursorFactory,
1640 TrieCursorMetricsCache,
1641 },
1642 };
1643 use alloy_primitives::map::{B256Map, B256Set};
1644 use alloy_rlp::Decodable;
1645 use itertools::Itertools;
1646 use reth_primitives_traits::Account;
1647 use reth_trie_common::{
1648 updates::{StorageTrieUpdates, TrieUpdates},
1649 HashedPostState, MultiProofTargets, TrieNode,
1650 };
1651
1652 /// Target to use with the `tracing` crate.
1653 static TRACE_TARGET: &str = "trie::proof_v2::tests";
1654
1655 /// A test harness for comparing `ProofCalculator` and legacy `Proof` implementations.
1656 ///
1657 /// This harness creates mock cursor factories from a `HashedPostState` and provides
1658 /// a method to test that both proof implementations produce equivalent results.
1659 struct ProofTestHarness {
1660 /// Mock factory for trie cursors (empty by default for leaf-only tests)
1661 trie_cursor_factory: MockTrieCursorFactory,
1662 /// Mock factory for hashed cursors, populated from `HashedPostState`
1663 hashed_cursor_factory: MockHashedCursorFactory,
1664 /// The expected state root, calculated by `StateRoot`
1665 expected_root: B256,
1666 }
1667
1668 impl ProofTestHarness {
1669 /// Creates a new test harness from a `HashedPostState`.
1670 ///
1671 /// The `HashedPostState` is used to populate the mock hashed cursor factory directly.
1672 /// The trie cursor factory is initialized from `TrieUpdates` generated by `StateRoot`.
1673 fn new(post_state: HashedPostState) -> Self {
1674 // Create empty trie cursor factory to serve as the initial state for StateRoot
1675 // Ensure that there's a storage trie dataset for every account, to make
1676 // `MockTrieCursorFactory` happy.
1677 let storage_tries: B256Map<_> = post_state
1678 .accounts
1679 .keys()
1680 .copied()
1681 .map(|addr| (addr, StorageTrieUpdates::default()))
1682 .collect();
1683
1684 let empty_trie_cursor_factory = MockTrieCursorFactory::from_trie_updates(TrieUpdates {
1685 storage_tries: storage_tries.clone(),
1686 ..Default::default()
1687 });
1688
1689 // Create mock hashed cursor factory from the post state
1690 let hashed_cursor_factory = MockHashedCursorFactory::from_hashed_post_state(post_state);
1691
1692 // Generate TrieUpdates using StateRoot
1693 let (expected_root, mut trie_updates) =
1694 crate::StateRoot::new(empty_trie_cursor_factory, hashed_cursor_factory.clone())
1695 .root_with_updates()
1696 .expect("StateRoot should succeed");
1697
1698 // Continue using empty storage tries for each account, to keep `MockTrieCursorFactory`
1699 // happy.
1700 trie_updates.storage_tries = storage_tries;
1701
1702 // Initialize trie cursor factory from the generated TrieUpdates
1703 let trie_cursor_factory = MockTrieCursorFactory::from_trie_updates(trie_updates);
1704
1705 Self { trie_cursor_factory, hashed_cursor_factory, expected_root }
1706 }
1707
1708 /// Asserts that `ProofCalculator` and legacy `Proof` produce equivalent results for account
1709 /// proofs.
1710 ///
1711 /// This method calls both implementations with the given account targets and compares
1712 /// the results.
1713 fn assert_proof(
1714 &self,
1715 targets: impl IntoIterator<Item = Target>,
1716 ) -> Result<(), StateProofError> {
1717 let targets_vec = targets.into_iter().collect::<Vec<_>>();
1718
1719 // Convert Target keys to MultiProofTargets for legacy implementation
1720 // For account-only proofs, each account maps to an empty storage set
1721 // Legacy implementation only uses the keys, not the prefix
1722 let legacy_targets = targets_vec
1723 .iter()
1724 .map(|target| (B256::from_slice(&target.key.pack()), B256Set::default()))
1725 .collect::<MultiProofTargets>();
1726
1727 // Create ProofCalculator (proof_v2) with account cursors
1728 let trie_cursor = self.trie_cursor_factory.account_trie_cursor()?;
1729 let hashed_cursor = self.hashed_cursor_factory.hashed_account_cursor()?;
1730
1731 // Collect metrics for cursors
1732 let mut trie_cursor_metrics = TrieCursorMetricsCache::default();
1733 let trie_cursor = InstrumentedTrieCursor::new(trie_cursor, &mut trie_cursor_metrics);
1734 let mut hashed_cursor_metrics = HashedCursorMetricsCache::default();
1735 let hashed_cursor =
1736 InstrumentedHashedCursor::new(hashed_cursor, &mut hashed_cursor_metrics);
1737
1738 // Call ProofCalculator::proof with account targets
1739 let mut value_encoder = SyncAccountValueEncoder::new(
1740 self.trie_cursor_factory.clone(),
1741 self.hashed_cursor_factory.clone(),
1742 );
1743 let mut proof_calculator = ProofCalculator::new(trie_cursor, hashed_cursor);
1744 let proof_v2_result =
1745 proof_calculator.proof(&mut value_encoder, &mut targets_vec.clone())?;
1746
1747 // Output metrics
1748 trace!(target: TRACE_TARGET, ?trie_cursor_metrics, "V2 trie cursor metrics");
1749 trace!(target: TRACE_TARGET, ?hashed_cursor_metrics, "V2 hashed cursor metrics");
1750
1751 // Call Proof::multiproof (legacy implementation)
1752 let proof_legacy_result =
1753 Proof::new(self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone())
1754 .with_branch_node_masks(true)
1755 .multiproof(legacy_targets)?;
1756
1757 // Helper function to check if a node path matches at least one target
1758 let node_matches_target = |node_path: &Nibbles| -> bool {
1759 targets_vec.iter().any(|target| {
1760 // Node path must be a prefix of the target's key
1761 target.key.starts_with(node_path) &&
1762 // Node path must be at least `min_len` long
1763 node_path.len() >= target.min_len as usize
1764 })
1765 };
1766
1767 // Decode and sort legacy proof nodes, filtering to only those that match at least one
1768 // target
1769 let proof_legacy_nodes = proof_legacy_result
1770 .account_subtree
1771 .iter()
1772 .filter(|(path, _)| node_matches_target(path))
1773 .map(|(path, node_enc)| {
1774 let mut buf = node_enc.as_ref();
1775 let node = TrieNode::decode(&mut buf)
1776 .expect("legacy implementation should not produce malformed proof nodes");
1777
1778 // The legacy proof calculator will calculate masks for the root node, even
1779 // though we never store the root node so the masks for it aren't really valid.
1780 let masks = if path.is_empty() {
1781 None
1782 } else {
1783 proof_legacy_result.branch_node_masks.get(path).copied()
1784 };
1785
1786 ProofTrieNode { path: *path, node, masks }
1787 })
1788 .sorted_by(|a, b| depth_first::cmp(&a.path, &b.path))
1789 .collect::<Vec<_>>();
1790
1791 // Basic comparison: both should succeed and produce identical results
1792 pretty_assertions::assert_eq!(proof_legacy_nodes, proof_v2_result);
1793
1794 // Also test root_node - get a fresh calculator and verify it returns the root node
1795 // that hashes to the expected root
1796 let trie_cursor = self.trie_cursor_factory.account_trie_cursor()?;
1797 let hashed_cursor = self.hashed_cursor_factory.hashed_account_cursor()?;
1798 let mut value_encoder = SyncAccountValueEncoder::new(
1799 self.trie_cursor_factory.clone(),
1800 self.hashed_cursor_factory.clone(),
1801 );
1802 let mut proof_calculator = ProofCalculator::new(trie_cursor, hashed_cursor);
1803 let root_node = proof_calculator.root_node(&mut value_encoder)?;
1804
1805 // The root node should be at the empty path
1806 assert!(root_node.path.is_empty(), "root_node should return node at empty path");
1807
1808 // The hash of the root node should match the expected root from legacy StateRoot
1809 let root_hash = proof_calculator
1810 .compute_root_hash(&[root_node])?
1811 .expect("root_node returns a node at empty path");
1812 pretty_assertions::assert_eq!(self.expected_root, root_hash);
1813
1814 Ok(())
1815 }
1816 }
1817
1818 mod proptest_tests {
1819 use super::*;
1820 use alloy_primitives::{map::B256Map, U256};
1821 use proptest::prelude::*;
1822 use reth_trie_common::HashedPostState;
1823
1824 /// Generate a strategy for Account values
1825 fn account_strategy() -> impl Strategy<Value = Account> {
1826 (any::<u64>(), any::<u64>(), any::<[u8; 32]>()).prop_map(
1827 |(nonce, balance, code_hash)| Account {
1828 nonce,
1829 balance: U256::from(balance),
1830 bytecode_hash: Some(B256::from(code_hash)),
1831 },
1832 )
1833 }
1834
1835 /// Generate a strategy for `HashedPostState` with random accounts
1836 fn hashed_post_state_strategy() -> impl Strategy<Value = HashedPostState> {
1837 prop::collection::vec((any::<[u8; 32]>(), account_strategy()), 0..=100).prop_map(
1838 |accounts| {
1839 let account_map = accounts
1840 .into_iter()
1841 .map(|(addr_bytes, account)| (B256::from(addr_bytes), Some(account)))
1842 .collect::<B256Map<_>>();
1843
1844 HashedPostState { accounts: account_map, ..Default::default() }
1845 },
1846 )
1847 }
1848
1849 /// Generate a strategy for proof targets that are 80% from the `HashedPostState` accounts
1850 /// and 20% random keys. Each target has a random `min_len` of 0..16.
1851 fn proof_targets_strategy(account_keys: Vec<B256>) -> impl Strategy<Value = Vec<Target>> {
1852 let num_accounts = account_keys.len();
1853
1854 // Generate between 0 and (num_accounts + 5) targets
1855 let target_count = 0..=(num_accounts + 5);
1856
1857 target_count.prop_flat_map(move |count| {
1858 let account_keys = account_keys.clone();
1859 prop::collection::vec(
1860 (
1861 prop::bool::weighted(0.8).prop_flat_map(move |from_accounts| {
1862 if from_accounts && !account_keys.is_empty() {
1863 // 80% chance: pick from existing account keys
1864 prop::sample::select(account_keys.clone()).boxed()
1865 } else {
1866 // 20% chance: generate random B256
1867 any::<[u8; 32]>().prop_map(B256::from).boxed()
1868 }
1869 }),
1870 0u8..16u8, // Random min_len from 0 to 15
1871 )
1872 .prop_map(|(key, min_len)| Target::new(key).with_min_len(min_len)),
1873 count,
1874 )
1875 })
1876 }
1877
1878 proptest! {
1879 #![proptest_config(ProptestConfig::with_cases(8000))]
1880 #[test]
1881 /// Tests that ProofCalculator produces valid proofs for randomly generated
1882 /// HashedPostState with proof targets.
1883 ///
1884 /// This test:
1885 /// - Generates random accounts in a HashedPostState
1886 /// - Generates proof targets: 80% from existing account keys, 20% random
1887 /// - Creates a test harness with the generated state
1888 /// - Calls assert_proof with the generated targets
1889 /// - Verifies both ProofCalculator and legacy Proof produce equivalent results
1890 fn proptest_proof_with_targets(
1891 (post_state, targets) in hashed_post_state_strategy()
1892 .prop_flat_map(|post_state| {
1893 let mut account_keys: Vec<B256> = post_state.accounts.keys().copied().collect();
1894 // Sort to ensure deterministic order when using PROPTEST_RNG_SEED
1895 account_keys.sort_unstable();
1896 let targets_strategy = proof_targets_strategy(account_keys);
1897 (Just(post_state), targets_strategy)
1898 })
1899 ) {
1900 reth_tracing::init_test_tracing();
1901 let harness = ProofTestHarness::new(post_state);
1902
1903 harness.assert_proof(targets).expect("Proof generation failed");
1904 }
1905 }
1906 }
1907
1908 #[test]
1909 fn test_big_trie() {
1910 use rand::prelude::*;
1911
1912 reth_tracing::init_test_tracing();
1913 let mut rng = rand::rngs::SmallRng::seed_from_u64(1);
1914
1915 let mut rand_b256 = || {
1916 let mut buf: [u8; 32] = [0; 32];
1917 rng.fill_bytes(&mut buf);
1918 B256::from_slice(&buf)
1919 };
1920
1921 // Generate random HashedPostState.
1922 let mut post_state = HashedPostState::default();
1923 for _ in 0..10240 {
1924 let hashed_addr = rand_b256();
1925 let account = Account { bytecode_hash: Some(hashed_addr), ..Default::default() };
1926 post_state.accounts.insert(hashed_addr, Some(account));
1927 }
1928
1929 // Collect targets; partially from real keys, partially random keys which probably won't
1930 // exist.
1931 let mut targets = post_state.accounts.keys().copied().collect::<Vec<_>>();
1932 for _ in 0..post_state.accounts.len() / 5 {
1933 targets.push(rand_b256());
1934 }
1935 targets.sort();
1936
1937 // Create test harness
1938 let harness = ProofTestHarness::new(post_state);
1939
1940 // Assert the proof (convert B256 to Target with no min_len for this test)
1941 harness
1942 .assert_proof(targets.into_iter().map(Target::new))
1943 .expect("Proof generation failed");
1944 }
1945
1946 #[test]
1947 fn test_increment_and_strip_trailing_zeros() {
1948 let test_cases: Vec<(Nibbles, Option<Nibbles>)> = vec![
1949 // Basic increment without trailing zeros
1950 (Nibbles::from_nibbles([0x1, 0x2, 0x3]), Some(Nibbles::from_nibbles([0x1, 0x2, 0x4]))),
1951 // Increment with trailing zeros - should be stripped
1952 (Nibbles::from_nibbles([0x0, 0x0, 0xF]), Some(Nibbles::from_nibbles([0x0, 0x1]))),
1953 (Nibbles::from_nibbles([0x0, 0xF, 0xF]), Some(Nibbles::from_nibbles([0x1]))),
1954 // Overflow case
1955 (Nibbles::from_nibbles([0xF, 0xF, 0xF]), None),
1956 // Empty nibbles
1957 (Nibbles::new(), None),
1958 // Single nibble
1959 (Nibbles::from_nibbles([0x5]), Some(Nibbles::from_nibbles([0x6]))),
1960 // All Fs except last - results in trailing zeros after increment
1961 (Nibbles::from_nibbles([0xE, 0xF, 0xF]), Some(Nibbles::from_nibbles([0xF]))),
1962 ];
1963
1964 for (input, expected) in test_cases {
1965 let result = increment_and_strip_trailing_zeros(&input);
1966 assert_eq!(result, expected, "Failed for input: {:?}", input);
1967 }
1968 }
1969
1970 #[test]
1971 fn test_failing_proptest_case_0() {
1972 use alloy_primitives::{hex, map::B256Map};
1973
1974 reth_tracing::init_test_tracing();
1975
1976 // Helper function to create B256 from hex string
1977 let b256 = |s: &str| B256::from_slice(&hex::decode(s).unwrap());
1978
1979 // Create the HashedPostState from test case input
1980 let mut accounts = B256Map::default();
1981
1982 // Define all account data from test case input
1983 let account_data = [
1984 (
1985 "9f3a475db85ff1f5b5e82d8614ee4afc670d27aefb9a43da0bd863a54acf1fe6",
1986 8396790837504194281u64,
1987 9224366602005816983u64,
1988 "103c5b0538f4e37944321a30f5cb1f7005d2ee70998106f34f36d7adb838c789",
1989 ),
1990 (
1991 "c736258fdfd23d73ec4c5e54b8c3b58e26726b361d438ef48670f028286b70ca",
1992 9193115115482903760u64,
1993 4515164289866465875u64,
1994 "9f24ef3ab0b4893b0ec38d0e9b00f239da072ccf093b0b24f1ea1f99547abe55",
1995 ),
1996 (
1997 "780a3476520090f97e847181aee17515c5ea30b7607775103df16d2b6611a87a",
1998 8404772182417755681u64,
1999 16639574952778823617u64,
2000 "214b12bee666ce8c64c6bbbcfafa0c3e55b4b05a8724ec4182b9a6caa774c56d",
2001 ),
2002 (
2003 "23ebfa849308a5d02c3048040217cd1f4b71fb01a9b54dafe541284ebec2bcce",
2004 17978809803974566048u64,
2005 11093542035392742776u64,
2006 "5384dfda8f1935d98e463c00a96960ff24e4d4893ec21e5ece0d272df33ac7e9",
2007 ),
2008 (
2009 "348e476c24fac841b11d358431b4526db09edc9f39906e0ac8809886a04f3c5a",
2010 9422945522568453583u64,
2011 9737072818780682487u64,
2012 "79f8f25b2cbb7485c5c7b627917c0f562f012d3d7ddd486212c90fbea0cf686e",
2013 ),
2014 (
2015 "830536ee6c8f780a1cd760457345b79fc09476018a59cf3e8fd427a793d99633",
2016 16497625187081138489u64,
2017 15143978245385012455u64,
2018 "00ede4000cc2a16fca7e930761aaf30d1fddcc3803f0009d6a0742b4ee519342",
2019 ),
2020 (
2021 "806c74b024b2fe81f077ea93d2936c489689f7fe024febc3a0fb71a8a9f22fbc",
2022 8103477314050566918u64,
2023 1383893458340561723u64,
2024 "690ed176136174c4f0cc442e6dcbcf6e7b577e30fc052430b6060f97af1f8e85",
2025 ),
2026 (
2027 "b903d962ffc520877f14e1e8328160e5b22f8086b0f7e9cba7a373a8376028a0",
2028 12972727566246296372u64,
2029 1130659127924527352u64,
2030 "cadf1f09d8e6a0d945a58ccd2ff36e2ae99f8146f02be96873e84bef0462d64a",
2031 ),
2032 (
2033 "d36a16afff0097e06b2c28bd795b889265e2ceff9a086173113fbeb6f7a9bc42",
2034 15682404502571860137u64,
2035 2025886798818635036u64,
2036 "c2cee70663e9ff1b521e2e1602e88723da52ccdc7a69e370cde9595af435e654",
2037 ),
2038 (
2039 "f3e8461cba0b84f5b81f8ca63d0456cb567e701ec1d6e77b1a03624c5018389b",
2040 5663749586038550112u64,
2041 7681243595728002238u64,
2042 "072c547c3ab9744bcd2ed9dbd813bd62866a673f4ca5d46939b65e9507be0e70",
2043 ),
2044 (
2045 "40b71840b6f43a493b32f4aa755e02d572012392fd582c81a513a169447e194c",
2046 518207789203399614u64,
2047 317311275468085815u64,
2048 "85541d48471bf639c2574600a9b637338c49729ba9e741f157cc6ebaae139da0",
2049 ),
2050 (
2051 "3f77cd91ceb7d335dd2527c29e79aaf94f14141438740051eb0163d86c35bcc9",
2052 16227517944662106096u64,
2053 12646193931088343779u64,
2054 "54999911d82dd63d526429275115fa98f6a560bc2d8e00be24962e91e38d7182",
2055 ),
2056 (
2057 "5cd903814ba84daa6956572411cd1bf4d48a8e230003d28cc3f942697bf8debb",
2058 5096288383163945009u64,
2059 17919982845103509853u64,
2060 "6a53c812e713f1bfe6bf21954f291140c60ec3f2ef353ecdae5dc7b263a37282",
2061 ),
2062 (
2063 "23f3602c95fd98d7fbe48a326ae1549030a2c7574099432cce5b458182f16bf2",
2064 11136020130962086191u64,
2065 12045219101880183180u64,
2066 "ce53fb9b108a3ee90db8469e44948ba3263ca8d8a0d92a076c9516f9a3d30bd1",
2067 ),
2068 (
2069 "be86489b3594a9da83e04a9ff81c8d68d528b8b9d31f3942d1c5856a4a8c5af7",
2070 16293506537092575994u64,
2071 536238712429663046u64,
2072 "a2af0607ade21241386ecfb3780aa90514f43595941daeff8dd599c203cde30a",
2073 ),
2074 (
2075 "97bcd85ee5d6033bdf86397e8b26f711912948a7298114be27ca5499ea99725f",
2076 3086656672041156193u64,
2077 8667446575959669532u64,
2078 "0474377538684a991ffc9b41f970b48e65eda9e07c292e60861258ef87d45272",
2079 ),
2080 (
2081 "40065932e6c70eb907e4f2a89ec772f5382ca90a49ef44c4ae21155b9decdcc0",
2082 17152529399128063686u64,
2083 3643450822628960860u64,
2084 "d5f6198c64c797f455f5b44062bb136734f508f9cdd02d8d69d24100ac8d6252",
2085 ),
2086 (
2087 "c136436c2db6b2ebd14985e2c883e73c6d8fd95ace54bfefae9eeca47b7da800",
2088 727585093455815585u64,
2089 521742371554431881u64,
2090 "3dfad04a6eb46d175b63e96943c7d636c56d61063277e25557aace95820432da",
2091 ),
2092 (
2093 "9ea50348595593788645394eb041ac4f75ee4d6a4840b9cf1ed304e895060791",
2094 8654829249939415079u64,
2095 15623358443672184321u64,
2096 "61bb0d6ffcd5b32d0ee34a3b7dfb1c495888059be02b255dd1fa3be02fa1ddbd",
2097 ),
2098 (
2099 "5abc714353ad6abda44a609f9b61f310f5b0a7df55ccf553dc2db3edda18ca17",
2100 5732104102609402825u64,
2101 15720007305337585794u64,
2102 "8b55b7e9c6f54057322c5e0610b33b3137f1fcd46f7d4af1aca797c7b5fff033",
2103 ),
2104 (
2105 "e270b59e6e56100f9e2813f263884ba5f74190a1770dd88cd9603266174e0a6b",
2106 4728642361690813205u64,
2107 6762867306120182099u64,
2108 "5e9aa1ff854504b4bfea4a7f0175866eba04e88e14e57ac08dddc63d6917bf47",
2109 ),
2110 (
2111 "78286294c6fb6823bb8b2b2ddb7a1e71ee64e05c9ba33b0eb8bb6654c64a8259",
2112 6032052879332640150u64,
2113 498315069638377858u64,
2114 "799ef578ffb51a5ec42484e788d6ada4f13f0ff73e1b7b3e6d14d58caae9319a",
2115 ),
2116 (
2117 "af1b85cf284b0cb59a4bfb0f699194bcd6ad4538f27057d9d93dc7a95c1ff32e",
2118 1647153930670480138u64,
2119 13109595411418593026u64,
2120 "429dcdf4748c0047b0dd94f3ad12b5e62bbadf8302525cc5d2aad9c9c746696f",
2121 ),
2122 (
2123 "0152b7a0626771a2518de84c01e52839e7821a655f9dcb9a174d8f52b64b7086",
2124 3915492299782594412u64,
2125 9550071871839879785u64,
2126 "4d5e6ce993dfc9597585ae2b4bacd6d055fefc56ae825666c83e0770e4aa0527",
2127 ),
2128 (
2129 "9ea9b8a4f6bce1dba63290b81f4d1b88dfeac3e244856904a5c9d4086a10271b",
2130 8824593031424861220u64,
2131 15831101445348312026u64,
2132 "a07602b4dd5cba679562061b7c5c0344b2edd6eba36aa97ca57a6fe01ed80a48",
2133 ),
2134 (
2135 "d7b26c2d8f85b74423a57a3da56c61829340f65967791bab849c90b5e1547e7a",
2136 12723258987146468813u64,
2137 10714399360315276559u64,
2138 "3705e57b27d931188c0d2017ab62577355b0cdda4173203478a8562a0cdcae0c",
2139 ),
2140 (
2141 "da354ceca117552482e628937931870a28e9d4416f47a58ee77176d0b760c75b",
2142 1580954430670112951u64,
2143 14920857341852745222u64,
2144 "a13d6b0123daa2e662699ac55a2d0ed1d2e73a02ed00ee5a4dd34db8dea2a37e",
2145 ),
2146 (
2147 "53140d0c8b90b4c3c49e0604879d0dc036e914c4c4f799f1ccae357fef2613e3",
2148 12521658365236780592u64,
2149 11630410585145916252u64,
2150 "46f06ce1435a7a0fd3476bbcffe4aac88c33a7fcf50080270b715d25c93d96d7",
2151 ),
2152 (
2153 "4b1c151815da6f18f27e98890eac1f7d43b80f3386c7c7d15ee0e43a7edfe0a6",
2154 9575643484508382933u64,
2155 3471795678079408573u64,
2156 "a9e6a8fac46c5fc61ae07bddc223e9f105f567ad039d2312a03431d1f24d8b2c",
2157 ),
2158 (
2159 "39436357a2bcd906e58fb88238be2ddb2e43c8a5590332e3aee1d1134a0d0ba4",
2160 10171391804125392783u64,
2161 2915644784933705108u64,
2162 "1d5db03f07137da9d3af85096ed51a4ff64bb476a79bf4294850438867fe3833",
2163 ),
2164 (
2165 "5fbe8d9d6a12b061a94a72436caec331ab1fd4e472c3bb4688215788c5e9bcd9",
2166 5663512925993713993u64,
2167 18170240962605758111u64,
2168 "bd5d601cbcb47bd84d410bafec72f2270fceb1ed2ed11499a1e218a9f89a9f7f",
2169 ),
2170 (
2171 "f2e29a909dd31b38e9b92b2b2d214e822ebddb26183cd077d4009773854ab099",
2172 7512894577556564068u64,
2173 15905517369556068583u64,
2174 "a36e66ce11eca7900248c518e12c6c08d659d609f4cbd98468292de7adf780f2",
2175 ),
2176 (
2177 "3eb82e6d6e964ca56b50cc54bdd55bb470c67a4932aba48d27d175d1be2542aa",
2178 12645567232869276853u64,
2179 8416544129280224452u64,
2180 "d177f246a45cc76d39a8ee06b32d8c076c986106b9a8e0455a0b41d00fe3cbde",
2181 ),
2182 (
2183 "c903731014f6a5b4b45174ef5f9d5a2895a19d1308292f25aa323fda88acc938",
2184 5989992708726918818u64,
2185 17462460601463602125u64,
2186 "01241c61ad1c8adc27e5a1096ab6c643af0fbb6e2818ef77272b70e5c3624abc",
2187 ),
2188 (
2189 "ef46410ab47113a78c27e100ed1b476f82a8789012bd95a047a4b23385596f53",
2190 11884362385049322305u64,
2191 619908411193297508u64,
2192 "e9b4c929e26077ac1fd5a771ea5badc7e9ddb58a20a2a797389c63b3dd3df00d",
2193 ),
2194 (
2195 "be336bc6722bb787d542f4ef8ecb6f46a449557ca7b69b8668b6fed19dfa73b7",
2196 11490216175357680195u64,
2197 13136528075688203375u64,
2198 "31bfd807f92e6d5dc5c534e9ad0cb29d00c6f0ae7d7b5f1e65f8e683de0bce59",
2199 ),
2200 (
2201 "39599e5828a8f102b8a6808103ae7df29b838fe739d8b73f72f8f0d282ca5a47",
2202 6957481657451522177u64,
2203 4196708540027060724u64,
2204 "968a12d79704b313471ece148cb4e26b8b11620db2a9ee6da0f5dc200801f555",
2205 ),
2206 (
2207 "acd99530bb14ca9a7fac3df8eebfd8cdd234b0f6f7c3893a20bc159a4fd54df5",
2208 9792913946138032169u64,
2209 9219321015500590384u64,
2210 "db45a98128770a329c82c904ceee21d3917f6072b8bd260e46218f65656c964c",
2211 ),
2212 (
2213 "453b80a0b11f237011c57630034ed46888ad96f4300a58aea24c0fe4a5472f68",
2214 14407140330317286994u64,
2215 5783848199433986576u64,
2216 "b8cded0b4efd6bf2282a4f8b3c353f74821714f84df9a6ab25131edc7fdad00f",
2217 ),
2218 (
2219 "23e464d1e9b413a4a6b378cee3a0405ec6ccbb4d418372d1b42d3fde558d48d1",
2220 1190974500816796805u64,
2221 1621159728666344828u64,
2222 "d677f41d273754da3ab8080b605ae07a7193c9f35f6318b809e42a1fdf594be3",
2223 ),
2224 (
2225 "d0e590648dec459aca50edf44251627bab5a36029a0c748b1ddf86b7b887425b",
2226 4807164391931567365u64,
2227 4256042233199858200u64,
2228 "a8677de59ab856516a03663730af54c55a79169346c3d958b564e5ee35d8622b",
2229 ),
2230 (
2231 "72387dbaaaf2c39175d8c067558b869ba7bdc6234bc63ee97a53fea1d988ff39",
2232 5046042574093452325u64,
2233 3088471405044806123u64,
2234 "83c226621506b07073936aec3c87a8e2ef34dd42e504adc2bbab39ede49aa77f",
2235 ),
2236 (
2237 "de6874ca2b9dd8b4347c25d32b882a2a7c127b127d6c5e00d073ab3853339d0e",
2238 6112730660331874479u64,
2239 10943246617310133253u64,
2240 "a0c96a69e5ab3e3fe1a1a2fd0e5e68035ff3c7b2985e4e6b8407d4c377600c6f",
2241 ),
2242 (
2243 "b0d8689e08b983e578d6a0c136b76952497087ee144369af653a0a1b231eeb28",
2244 15612408165265483596u64,
2245 13112504741499957010u64,
2246 "4fc49edeff215f1d54dfd2e60a14a3de2abecbe845db2148c7aee32c65f3c91c",
2247 ),
2248 (
2249 "29d7fb6b714cbdd1be95c4a268cef7f544329642ae05fab26dc251bbc773085e",
2250 17509162400681223655u64,
2251 5075629528173950353u64,
2252 "781ecb560ef8cf0bcfa96b8d12075f4cf87ad52d69dfb2c72801206eded135bd",
2253 ),
2254 (
2255 "85dbf7074c93a4e39b67cc504b35351ee16c1fab437a7fb9e5d9320be1d9c13c",
2256 17692199403267011109u64,
2257 7069378948726478427u64,
2258 "a3ff0d8dee5aa0214460f5b03a70bd76ef00ac8c07f07c0b3d82c9c57e4c72a9",
2259 ),
2260 (
2261 "7bd5a9f3126b4a681afac9a177c6ff7f3dd80d8d7fd5a821a705221c96975ded",
2262 17807965607151214145u64,
2263 5562549152802999850u64,
2264 "dbc3861943b7372e49698b1c5b0e4255b7c93e9fa2c13d6a4405172ab0db9a5b",
2265 ),
2266 (
2267 "496d13d45dbe7eb02fee23c914ac9fefdf86cf5c937c520719fc6a31b3fcf8d9",
2268 13446203348342334214u64,
2269 332407928246785326u64,
2270 "d2d73f15fcdc12adce25b911aa4551dcf900e225761e254eb6392cbd414e389c",
2271 ),
2272 (
2273 "b2f0a0127fc74a35dec5515b1c7eb8a3833ca99925049c47cd109ec94678e6c5",
2274 9683373807753869342u64,
2275 7570798132195583433u64,
2276 "e704110433e5ab17858c5fbe4f1b6d692942d5f5981cac68372d06066bee97fe",
2277 ),
2278 (
2279 "d5f65171b17d7720411905ef138e84b9d1f459e2b248521c449f1781aafd675e",
2280 10088287051097617949u64,
2281 185695341767856973u64,
2282 "8d784c4171e242af4187f30510cd298106b7e68cd3088444a055cb1f3893ba28",
2283 ),
2284 (
2285 "7dcbec5c20fbf1d69665d4b9cdc450fea2d0098e78084bce0a864fea4ba016b0",
2286 13908816056510478374u64,
2287 17793990636863600193u64,
2288 "18e9026372d91e116faf813ce3ba9d7fadef2bb3b779be6efeba8a4ecd9e1f38",
2289 ),
2290 (
2291 "d4f772f4bf1cfa4dad4b55962b50900da8657a4961dabbdf0664f3cd42d368f8",
2292 16438076732493217366u64,
2293 18419670900047275588u64,
2294 "b9fd16b16b3a8fab4d9c47f452d9ce4aad530edeb06ee6830589078db2f79382",
2295 ),
2296 (
2297 "2d009535f82b1813ce2ca7236ceae7864c1e4d3644a1acd02656919ef1aa55d0",
2298 10206924399607440433u64,
2299 3986996560633257271u64,
2300 "db49e225bd427768599a7c06d7aee432121fa3179505f9ee8c717f51c7fa8c54",
2301 ),
2302 (
2303 "b1d7a292df12e505e7433c7e850e9efc81a8931b65f3354a66402894b6d5ba76",
2304 8215550459234533539u64,
2305 10241096845089693964u64,
2306 "5567813b312cb811909a01d14ee8f7ec4d239198ea2d37243123e1de2317e1af",
2307 ),
2308 (
2309 "85120d6f43ea9258accf6a87e49cd5461d9b3735a4dc623f9fbcc669cbdd1ce6",
2310 17566770568845511328u64,
2311 8686605711223432099u64,
2312 "e163f4fcd17acf5714ee48278732808601e861cd4c4c24326cd24431aab1d0ce",
2313 ),
2314 (
2315 "48fe4c22080c6e702f7af0e97fb5354c1c14ff4616c6fc4ac8a4491d4b9b3473",
2316 14371024664575587429u64,
2317 15149464181957728462u64,
2318 "061dec7af4b41bdd056306a8b13b71d574a49a4595884b1a77674f5150d4509d",
2319 ),
2320 (
2321 "29d14b014fa3cabbb3b4808e751e81f571de6d0e727cae627318a5fd82fef517",
2322 9612395342616083334u64,
2323 3700617080099093094u64,
2324 "f7b33a2d2784441f77f0cc1c87930e79bea3332a921269b500e81d823108561c",
2325 ),
2326 ];
2327
2328 // Insert all accounts
2329 for (addr, nonce, balance, code_hash) in &account_data {
2330 accounts.insert(
2331 b256(addr),
2332 Some(Account {
2333 nonce: *nonce,
2334 balance: U256::from(*balance),
2335 bytecode_hash: Some(b256(code_hash)),
2336 }),
2337 );
2338 }
2339
2340 let post_state = HashedPostState { accounts, storages: Default::default() };
2341
2342 // Create test harness
2343 let harness = ProofTestHarness::new(post_state);
2344
2345 // Create targets from test case input - these are Nibbles in hex form
2346 let targets = vec![
2347 Target::new(b256("0153000000000000000000000000000000000000000000000000000000000000"))
2348 .with_min_len(2),
2349 Target::new(b256("0000000000000000000000000000000000000000000000000000000000000000"))
2350 .with_min_len(2),
2351 Target::new(b256("2300000000000000000000000000000000000000000000000000000000000000"))
2352 .with_min_len(2),
2353 ];
2354
2355 // Test proof generation
2356 harness.assert_proof(targets).expect("Proof generation failed");
2357 }
2358}