reth_trie/proof_v2/mod.rs
1//! Proof calculation version 2: Leaf-only implementation.
2//!
3//! This module provides a rewritten proof calculator that:
4//! - Uses only leaf data (HashedAccounts/Storages) to generate proofs
5//! - Returns proof nodes sorted lexicographically by path
6//! - Automatically resets after each calculation
7//! - Re-uses cursors across calculations
8//! - Supports generic value types with lazy evaluation
9
10use crate::{
11 hashed_cursor::{HashedCursor, HashedStorageCursor},
12 trie_cursor::{depth_first, TrieCursor, TrieStorageCursor},
13};
14use alloy_primitives::{B256, U256};
15use alloy_rlp::Encodable;
16use alloy_trie::{BranchNodeCompact, TrieMask};
17use reth_execution_errors::trie::StateProofError;
18use reth_trie_common::{BranchNode, Nibbles, ProofTrieNode, RlpNode, TrieMasks, TrieNode};
19use std::cmp::Ordering;
20use tracing::{instrument, trace};
21
22mod value;
23pub use value::*;
24
25mod node;
26use node::*;
27
28mod target;
29pub use target::*;
30
31/// Target to use with the `tracing` crate.
32static TRACE_TARGET: &str = "trie::proof_v2";
33
34/// Number of bytes to pre-allocate for [`ProofCalculator`]'s `rlp_encode_buf` field.
35const RLP_ENCODE_BUF_SIZE: usize = 1024;
36
37/// A [`Nibbles`] which contains 64 zero nibbles.
38static PATH_ALL_ZEROS: Nibbles = {
39 let mut path = Nibbles::new();
40 let mut i = 0;
41 while i < 64 {
42 path.push_unchecked(0);
43 i += 1;
44 }
45 path
46};
47
48/// A proof calculator that generates merkle proofs using only leaf data.
49///
50/// The calculator:
51/// - Accepts one or more B256 proof targets sorted lexicographically
52/// - Returns proof nodes sorted lexicographically by path
53/// - Automatically resets after each calculation
54/// - Re-uses cursors from one calculation to the next
55#[derive(Debug)]
56pub struct ProofCalculator<TC, HC, VE: LeafValueEncoder> {
57 /// Trie cursor for traversing stored branch nodes.
58 trie_cursor: TC,
59 /// Hashed cursor for iterating over leaf data.
60 hashed_cursor: HC,
61 /// Branches which are currently in the process of being constructed, each being a child of
62 /// the previous one.
63 branch_stack: Vec<ProofTrieBranch>,
64 /// The path of the last branch in `branch_stack`.
65 branch_path: Nibbles,
66 /// Children of branches in the `branch_stack`.
67 ///
68 /// Each branch in `branch_stack` tracks which children are in this stack using its
69 /// `state_mask`; the number of children the branch has in this stack is equal to the number of
70 /// bits set in its `state_mask`.
71 ///
72 /// The children for the bottom branch in `branch_stack` are found at the bottom of this stack,
73 /// and so on. When a branch is removed from `branch_stack` its children are removed from this
74 /// one, and the branch is pushed onto this stack in their place (see [`Self::pop_branch`].
75 ///
76 /// Children on the `child_stack` are converted to [`ProofTrieBranchChild::RlpNode`]s via the
77 /// [`Self::commit_child`] method. Committing a child indicates that no further changes are
78 /// expected to happen to it (e.g. splitting its short key when inserting a new branch). Given
79 /// that keys are consumed in lexicographical order, only the last child on the stack can
80 /// ever be modified, and therefore all children besides the last are expected to be
81 /// [`ProofTrieBranchChild::RlpNode`]s.
82 child_stack: Vec<ProofTrieBranchChild<VE::DeferredEncoder>>,
83 /// Cached branch data pulled from the `trie_cursor`. The calculator will use the cached
84 /// [`BranchNodeCompact::hashes`] to skip over the calculation of sub-tries in the overall
85 /// trie. The cached hashes cannot be used for any paths which are prefixes of a proof target.
86 cached_branch_stack: Vec<(Nibbles, BranchNodeCompact)>,
87 /// The proofs which will be returned from the calculation. This gets taken at the end of every
88 /// proof call.
89 retained_proofs: Vec<ProofTrieNode>,
90 /// Free-list of re-usable buffers of [`RlpNode`]s, used for encoding branch nodes to RLP.
91 ///
92 /// We are generally able to re-use these buffers across different branch nodes for the
93 /// duration of a proof calculation, but occasionally we will lose one when a branch
94 /// node is returned as a `ProofTrieNode`.
95 rlp_nodes_bufs: Vec<Vec<RlpNode>>,
96 /// Re-usable byte buffer, used for RLP encoding.
97 rlp_encode_buf: Vec<u8>,
98}
99
100impl<TC, HC, VE: LeafValueEncoder> ProofCalculator<TC, HC, VE> {
101 /// Create a new [`ProofCalculator`] instance for calculating account proofs.
102 pub fn new(trie_cursor: TC, hashed_cursor: HC) -> Self {
103 Self {
104 trie_cursor,
105 hashed_cursor,
106 branch_stack: Vec::<_>::with_capacity(64),
107 branch_path: Nibbles::new(),
108 child_stack: Vec::<_>::new(),
109 cached_branch_stack: Vec::<_>::with_capacity(64),
110 retained_proofs: Vec::<_>::new(),
111 rlp_nodes_bufs: Vec::<_>::new(),
112 rlp_encode_buf: Vec::<_>::with_capacity(RLP_ENCODE_BUF_SIZE),
113 }
114 }
115}
116
117impl<TC, HC, VE> ProofCalculator<TC, HC, VE>
118where
119 TC: TrieCursor,
120 HC: HashedCursor,
121 VE: LeafValueEncoder<Value = HC::Value>,
122{
123 /// Takes a re-usable `RlpNode` buffer from the internal free-list, or allocates a new one if
124 /// the free-list is empty.
125 ///
126 /// The returned Vec will have a length of zero.
127 fn take_rlp_nodes_buf(&mut self) -> Vec<RlpNode> {
128 self.rlp_nodes_bufs
129 .pop()
130 .map(|mut buf| {
131 buf.clear();
132 buf
133 })
134 .unwrap_or_else(|| Vec::with_capacity(16))
135 }
136
137 // Returns zero if `branch_stack` is empty, one otherwise.
138 //
139 // This is used when working with the `ext_len` field of [`ProofTrieBranch`]. The `ext_len` is
140 // calculated by taking the difference of the current `branch_path` and the new branch's path;
141 // if the new branch has a parent branch (ie `branch_stack` is not empty) then 1 is subtracted
142 // from the `ext_len` to account for the child's nibble on the parent.
143 #[inline]
144 const fn maybe_parent_nibble(&self) -> usize {
145 !self.branch_stack.is_empty() as usize
146 }
147
148 /// Returns true if the proof of a node at the given path should be retained. A node is retained
149 /// if its path is a prefix of any target.
150 ///
151 /// This may move the `targets` iterator forward if the given path comes after the current
152 /// target.
153 ///
154 /// This method takes advantage of the [`std::slice::Iter`] component of [`TargetsCursor`] to
155 /// check the minimum number of targets. In general it looks at a current target and the next
156 /// target simultaneously, forming an end-exclusive range.
157 ///
158 /// ```text
159 /// * Given targets: [ 0x012, 0x045, 0x678 ]
160 /// * targets.current() returns:
161 /// - (0x012, Some(0x045)): covers (0x012..0x045)
162 /// - (0x045, Some(0x678)): covers (0x045..0x678)
163 /// - (0x678, None): covers (0x678..)
164 /// ```
165 ///
166 /// As long as the path which is passed in lies within that range we can continue to use the
167 /// current target. Once the path goes beyond that range (ie path >= next target) then we can be
168 /// sure that no further paths will be in the range, and we can iterate forward.
169 ///
170 /// ```text
171 /// * Given:
172 /// - path: 0x04
173 /// - targets.current() returns (0x012, Some(0x045))
174 ///
175 /// * 0x04 comes _after_ 0x045 in depth-first order, so (0x012..0x045) does not contain 0x04.
176 ///
177 /// * targets.next() is called.
178 ///
179 /// * targets.current() now returns (0x045, Some(0x678)). This does contain 0x04.
180 ///
181 /// * 0x04 is a prefix of 0x045, and so is retained.
182 /// ```
183 fn should_retain<'a>(
184 &self,
185 targets: &mut TargetsCursor<'a>,
186 path: &Nibbles,
187 check_min_len: bool,
188 ) -> bool {
189 let (mut lower, mut upper) = targets.current();
190
191 trace!(target: TRACE_TARGET, ?path, target = ?lower, "should_retain: called");
192 debug_assert!(self.retained_proofs.last().is_none_or(
193 |ProofTrieNode { path: last_retained_path, .. }| {
194 depth_first::cmp(path, last_retained_path) == Ordering::Greater
195 }
196 ),
197 "should_retain called with path {path:?} which is not after previously retained node {:?} in depth-first order",
198 self.retained_proofs.last().map(|n| n.path),
199 );
200
201 loop {
202 // If the node in question is a prefix of the target then we do not iterate targets
203 // further.
204 //
205 // Even if the node is a prefix of the target's key, if the target has a non-zero
206 // `min_len` it indicates that the node should only be retained if it is
207 // longer than that value.
208 //
209 // _However_ even if the node doesn't match the target due to the target's `min_len`, it
210 // may match other targets whose keys match this node. So we search forwards and
211 // backwards for all targets which might match this node, and check against the
212 // `min_len` of each.
213 //
214 // For example, given a branch 0xabc, with children at 0, 1, and 2, and targets:
215 // - key: 0xabc0, min_len: 2
216 // - key: 0xabc1, min_len: 1
217 // - key: 0xabc2, min_len: 4 <-- current
218 // - key: 0xabc3, min_len: 3
219 //
220 // When the branch node at 0xabc is visited it will be after the targets has iterated
221 // forward to 0xabc2 (because all children will have been visited already). At this
222 // point the target for 0xabc2 will not match the branch due to its prefix, but any of
223 // the other targets would, so we need to check those as well.
224 if lower.key.starts_with(path) {
225 return !check_min_len ||
226 (path.len() >= lower.min_len as usize ||
227 targets
228 .skip_iter()
229 .take_while(|target| target.key.starts_with(path))
230 .any(|target| path.len() >= target.min_len as usize) ||
231 targets
232 .rev_iter()
233 .take_while(|target| target.key.starts_with(path))
234 .any(|target| path.len() >= target.min_len as usize))
235 }
236
237 // If the path isn't in the current range then iterate forward until it is (or until
238 // there is no upper bound, indicating unbounded).
239 if upper.is_some_and(|upper| depth_first::cmp(path, &upper.key) != Ordering::Less) {
240 (lower, upper) = targets.next();
241 trace!(target: TRACE_TARGET, target = ?lower, "upper target <= path, next target");
242 } else {
243 return false
244 }
245 }
246 }
247
248 /// Takes a child which has been removed from the `child_stack` and converts it to an
249 /// [`RlpNode`].
250 ///
251 /// Calling this method indicates that the child will not undergo any further modifications, and
252 /// therefore can be retained as a proof node if applicable.
253 fn commit_child<'a>(
254 &mut self,
255 targets: &mut TargetsCursor<'a>,
256 child_path: Nibbles,
257 child: ProofTrieBranchChild<VE::DeferredEncoder>,
258 ) -> Result<RlpNode, StateProofError> {
259 // If the child is already an `RlpNode` then there is nothing to do.
260 if let ProofTrieBranchChild::RlpNode(rlp_node) = child {
261 return Ok(rlp_node)
262 }
263
264 // If we should retain the child then do so.
265 if self.should_retain(targets, &child_path, true) {
266 trace!(target: TRACE_TARGET, ?child_path, "Retaining child");
267
268 // Convert to `ProofTrieNode`, which will be what is retained.
269 //
270 // If this node is a branch then its `rlp_nodes_buf` will be taken and not returned to
271 // the `rlp_nodes_bufs` free-list.
272 self.rlp_encode_buf.clear();
273 let proof_node = child.into_proof_trie_node(child_path, &mut self.rlp_encode_buf)?;
274
275 // Use the `ProofTrieNode` to encode the `RlpNode`, and then push it onto retained
276 // nodes before returning.
277 self.rlp_encode_buf.clear();
278 proof_node.node.encode(&mut self.rlp_encode_buf);
279
280 self.retained_proofs.push(proof_node);
281 return Ok(RlpNode::from_rlp(&self.rlp_encode_buf));
282 }
283
284 // If the child path is not being retained then we convert directly to an `RlpNode`
285 // using `into_rlp`. Since we are not retaining the node we can recover any `RlpNode`
286 // buffers for the free-list here, hence why we do this as a separate logical branch.
287 self.rlp_encode_buf.clear();
288 let (child_rlp_node, freed_rlp_nodes_buf) = child.into_rlp(&mut self.rlp_encode_buf)?;
289
290 // If there is an `RlpNode` buffer which can be re-used then push it onto the free-list.
291 if let Some(buf) = freed_rlp_nodes_buf {
292 self.rlp_nodes_bufs.push(buf);
293 }
294
295 Ok(child_rlp_node)
296 }
297
298 /// Returns the path of the child of the currently under-construction branch at the given
299 /// nibble.
300 #[inline]
301 fn child_path_at(&self, nibble: u8) -> Nibbles {
302 let mut child_path = self.branch_path;
303 debug_assert!(child_path.len() < 64);
304 child_path.push_unchecked(nibble);
305 child_path
306 }
307
308 /// Returns index of the highest nibble which is set in the mask.
309 ///
310 /// # Panics
311 ///
312 /// Will panic in debug mode if the mask is empty.
313 #[inline]
314 fn highest_set_nibble(mask: TrieMask) -> u8 {
315 debug_assert!(!mask.is_empty());
316 (u16::BITS - mask.leading_zeros() - 1) as u8
317 }
318
319 /// Returns the path of the child on top of the `child_stack`, or the root path if the stack is
320 /// empty.
321 fn last_child_path(&self) -> Nibbles {
322 // If there is no branch under construction then the top child must be the root child.
323 let Some(branch) = self.branch_stack.last() else {
324 return Nibbles::new();
325 };
326
327 self.child_path_at(Self::highest_set_nibble(branch.state_mask))
328 }
329
330 /// Calls [`Self::commit_child`] on the last child of `child_stack`, replacing it with a
331 /// [`ProofTrieBranchChild::RlpNode`].
332 ///
333 /// If `child_stack` is empty then this is a no-op.
334 ///
335 /// NOTE that this method call relies on the `state_mask` of the top branch of the
336 /// `branch_stack` to determine the last child's path. When committing the last child prior to
337 /// pushing a new child, it's important to set the new child's `state_mask` bit _after_ the call
338 /// to this method.
339 fn commit_last_child<'a>(
340 &mut self,
341 targets: &mut TargetsCursor<'a>,
342 ) -> Result<(), StateProofError> {
343 let Some(child) = self.child_stack.pop() else { return Ok(()) };
344
345 // If the child is already an `RlpNode` then there is nothing to do, push it back on with no
346 // changes.
347 if let ProofTrieBranchChild::RlpNode(_) = child {
348 self.child_stack.push(child);
349 return Ok(())
350 }
351
352 let child_path = self.last_child_path();
353 // TODO theoretically `commit_child` only needs to convert to an `RlpNode` if it's going to
354 // retain the proof, otherwise we could leave the child as-is on the stack and convert it
355 // when popping the branch, giving more time to the DeferredEncoder to do async work.
356 let child_rlp_node = self.commit_child(targets, child_path, child)?;
357
358 // Replace the child on the stack
359 self.child_stack.push(ProofTrieBranchChild::RlpNode(child_rlp_node));
360 Ok(())
361 }
362
363 /// Creates a new leaf node on a branch, setting its `state_mask` bit and pushing the leaf onto
364 /// the `child_stack`.
365 ///
366 /// # Panics
367 ///
368 /// - If `branch_stack` is empty
369 /// - If the leaf's nibble is already set in the branch's `state_mask`.
370 fn push_new_leaf<'a>(
371 &mut self,
372 targets: &mut TargetsCursor<'a>,
373 leaf_nibble: u8,
374 leaf_short_key: Nibbles,
375 leaf_val: VE::DeferredEncoder,
376 ) -> Result<(), StateProofError> {
377 // Before pushing the new leaf onto the `child_stack` we need to commit the previous last
378 // child, so that only `child_stack`'s final child is a non-RlpNode.
379 self.commit_last_child(targets)?;
380
381 // Once the last child is committed we set the new child's bit on the top branch's
382 // `state_mask` and push that new child.
383 let branch = self.branch_stack.last_mut().expect("branch_stack cannot be empty");
384
385 debug_assert!(!branch.state_mask.is_bit_set(leaf_nibble));
386 branch.state_mask.set_bit(leaf_nibble);
387
388 self.child_stack
389 .push(ProofTrieBranchChild::Leaf { short_key: leaf_short_key, value: leaf_val });
390
391 Ok(())
392 }
393
394 /// Pushes a new branch onto the `branch_stack` based on the path and short key of the last
395 /// child on the `child_stack` and the path of the next child which will be pushed on to the
396 /// stack after this call.
397 ///
398 /// Returns the nibble of the branch's `state_mask` which should be set for the new child, and
399 /// short key that the next child should use.
400 fn push_new_branch(&mut self, new_child_path: Nibbles) -> (u8, Nibbles) {
401 // First determine the new child's shortkey relative to the current branch. If there is no
402 // current branch then the short key is the full path.
403 let new_child_short_key = if self.branch_stack.is_empty() {
404 new_child_path
405 } else {
406 // When there is a current branch then trim off its path as well as the nibble that it
407 // has set for this leaf.
408 trim_nibbles_prefix(&new_child_path, self.branch_path.len() + 1)
409 };
410
411 // Get the new branch's first child, which is the child on the top of the stack with which
412 // the new child shares the same nibble on the current branch.
413 let first_child = self
414 .child_stack
415 .last_mut()
416 .expect("push_new_branch can't be called with empty child_stack");
417
418 let first_child_short_key = first_child.short_key();
419 debug_assert!(
420 !first_child_short_key.is_empty(),
421 "push_new_branch called when top child on stack is not a leaf or extension with a short key",
422 );
423
424 // Determine how many nibbles are shared between the new branch's first child and the new
425 // child. This common prefix will be the extension of the new branch
426 let common_prefix_len = first_child_short_key.common_prefix_length(&new_child_short_key);
427
428 // Trim off the common prefix from the first child's short key, plus one nibble which will
429 // stored by the new branch itself in its state mask.
430 let first_child_nibble = first_child_short_key.get_unchecked(common_prefix_len);
431 first_child.trim_short_key_prefix(common_prefix_len + 1);
432
433 // Similarly, trim off the common prefix, plus one nibble for the new branch, from the new
434 // child's short key.
435 let new_child_nibble = new_child_short_key.get_unchecked(common_prefix_len);
436 let new_child_short_key = trim_nibbles_prefix(&new_child_short_key, common_prefix_len + 1);
437
438 // Update the branch path to reflect the new branch about to be pushed. Its path will be
439 // the path of the previous branch, plus the nibble shared by each child, plus the parent
440 // extension (denoted by a non-zero `ext_len`). Since the new branch's path is a prefix of
441 // the original new_child_path we can just slice that.
442 //
443 // If the new branch is the first branch then we do not add the extra 1, as there is no
444 // nibble in a parent branch to account for.
445 let branch_path_len =
446 self.branch_path.len() + common_prefix_len + self.maybe_parent_nibble();
447 self.branch_path = new_child_path.slice_unchecked(0, branch_path_len);
448
449 // Push the new branch onto the `branch_stack`. We do not yet set the `state_mask` bit of
450 // the new child; whatever actually pushes the child onto the `child_stack` is expected to
451 // do that.
452 self.branch_stack.push(ProofTrieBranch {
453 ext_len: common_prefix_len as u8,
454 state_mask: TrieMask::new(1 << first_child_nibble),
455 masks: TrieMasks::none(),
456 });
457
458 trace!(
459 target: TRACE_TARGET,
460 ?new_child_path,
461 ?common_prefix_len,
462 ?first_child_nibble,
463 branch_path = ?self.branch_path,
464 "Pushed new branch",
465 );
466
467 (new_child_nibble, new_child_short_key)
468 }
469
470 /// Pops the top branch off of the `branch_stack`, hashes its children on the `child_stack`, and
471 /// replaces those children on the `child_stack`. The `branch_path` field will be updated
472 /// accordingly.
473 ///
474 /// # Panics
475 ///
476 /// This method panics if `branch_stack` is empty.
477 fn pop_branch<'a>(&mut self, targets: &mut TargetsCursor<'a>) -> Result<(), StateProofError> {
478 trace!(
479 target: TRACE_TARGET,
480 branch = ?self.branch_stack.last(),
481 branch_path = ?self.branch_path,
482 child_stack_len = ?self.child_stack.len(),
483 "pop_branch: called",
484 );
485
486 // Ensure the final child on the child stack has been committed, as this method expects all
487 // children of the branch to have been committed.
488 self.commit_last_child(targets)?;
489
490 let mut rlp_nodes_buf = self.take_rlp_nodes_buf();
491 let branch = self.branch_stack.pop().expect("branch_stack cannot be empty");
492
493 // Take the branch's children off the stack, using the state mask to determine how many
494 // there are.
495 let num_children = branch.state_mask.count_ones() as usize;
496 debug_assert!(num_children > 1, "A branch must have at least two children");
497 debug_assert!(
498 self.child_stack.len() >= num_children,
499 "Stack is missing necessary children ({num_children:?})"
500 );
501
502 // Collect children into an `RlpNode` Vec by committing and pushing each of them.
503 for (idx, child) in
504 self.child_stack.drain(self.child_stack.len() - num_children..).enumerate()
505 {
506 let ProofTrieBranchChild::RlpNode(child_rlp_node) = child else {
507 panic!(
508 "all branch children must have been committed, found {} at index {idx:?}",
509 std::any::type_name_of_val(&child)
510 );
511 };
512 rlp_nodes_buf.push(child_rlp_node);
513 }
514
515 debug_assert_eq!(
516 rlp_nodes_buf.len(),
517 branch.state_mask.count_ones() as usize,
518 "children length must match number of bits set in state_mask"
519 );
520
521 // Calculate the short key of the parent extension (if the branch has a parent extension).
522 // It's important to calculate this short key prior to modifying the `branch_path`.
523 let short_key = trim_nibbles_prefix(
524 &self.branch_path,
525 self.branch_path.len() - branch.ext_len as usize,
526 );
527
528 // Wrap the `BranchNode` so it can be pushed onto the child stack.
529 let mut branch_as_child = ProofTrieBranchChild::Branch {
530 node: BranchNode::new(rlp_nodes_buf, branch.state_mask),
531 masks: branch.masks,
532 };
533
534 // If there is an extension then encode the branch as an `RlpNode` and use it to construct
535 // the extension in its place
536 if !short_key.is_empty() {
537 let branch_rlp_node = self.commit_child(targets, self.branch_path, branch_as_child)?;
538 branch_as_child = ProofTrieBranchChild::Extension { short_key, child: branch_rlp_node };
539 };
540
541 self.child_stack.push(branch_as_child);
542
543 // Update the branch_path. If this branch is the only branch then only its extension needs
544 // to be trimmed, otherwise we also need to remove its nibble from its parent.
545 let new_path_len =
546 self.branch_path.len() - branch.ext_len as usize - self.maybe_parent_nibble();
547
548 debug_assert!(self.branch_path.len() >= new_path_len);
549 self.branch_path = self.branch_path.slice_unchecked(0, new_path_len);
550
551 Ok(())
552 }
553
554 /// Adds a single leaf for a key to the stack, possibly collapsing an existing branch and/or
555 /// creating a new one depending on the path of the key.
556 fn push_leaf<'a>(
557 &mut self,
558 targets: &mut TargetsCursor<'a>,
559 key: Nibbles,
560 val: VE::DeferredEncoder,
561 ) -> Result<(), StateProofError> {
562 loop {
563 trace!(
564 target: TRACE_TARGET,
565 ?key,
566 branch_stack_len = ?self.branch_stack.len(),
567 branch_path = ?self.branch_path,
568 child_stack_len = ?self.child_stack.len(),
569 "push_leaf: loop",
570 );
571
572 // Get the `state_mask` of the branch currently being built. If there are no branches
573 // on the stack then it means either the trie is empty or only a single leaf has been
574 // added previously.
575 let curr_branch_state_mask = match self.branch_stack.last() {
576 Some(curr_branch) => curr_branch.state_mask,
577 None if self.child_stack.is_empty() => {
578 // If the child stack is empty then this is the first leaf, push it and be done
579 self.child_stack
580 .push(ProofTrieBranchChild::Leaf { short_key: key, value: val });
581 return Ok(())
582 }
583 None => {
584 // If the child stack is not empty then it must only have a single other child
585 // which is either a leaf or extension with a non-zero short key.
586 debug_assert_eq!(self.child_stack.len(), 1);
587 debug_assert!(!self
588 .child_stack
589 .last()
590 .expect("already checked for emptiness")
591 .short_key()
592 .is_empty());
593 let (nibble, short_key) = self.push_new_branch(key);
594 self.push_new_leaf(targets, nibble, short_key, val)?;
595 return Ok(())
596 }
597 };
598
599 // Find the common prefix length, which is the number of nibbles shared between the
600 // current branch and the key.
601 let common_prefix_len = self.branch_path.common_prefix_length(&key);
602
603 // If the current branch does not share all of its nibbles with the new key then it is
604 // not the parent of the new key. In this case the current branch will have no more
605 // children. We can pop it and loop back to the top to try again with its parent branch.
606 if common_prefix_len < self.branch_path.len() {
607 self.pop_branch(targets)?;
608 continue
609 }
610
611 // If the current branch is a prefix of the new key then the leaf is a child of the
612 // branch. If the branch doesn't have the leaf's nibble set then the leaf can be added
613 // directly, otherwise a new branch must be created in-between this branch and that
614 // existing child.
615 let nibble = key.get_unchecked(common_prefix_len);
616 if curr_branch_state_mask.is_bit_set(nibble) {
617 // Push a new branch which splits the short key of the existing child at this
618 // nibble.
619 let (nibble, short_key) = self.push_new_branch(key);
620 // Push the new leaf onto the new branch.
621 self.push_new_leaf(targets, nibble, short_key, val)?;
622 } else {
623 let short_key = key.slice_unchecked(common_prefix_len + 1, key.len());
624 self.push_new_leaf(targets, nibble, short_key, val)?;
625 }
626
627 return Ok(())
628 }
629 }
630
631 /// Given the lower and upper bounds (exclusive) of a range of keys, iterates over the
632 /// `hashed_cursor` and calculates all trie nodes possible based on those keys. If the upper
633 /// bound is None then it is considered unbounded.
634 ///
635 /// It is expected that this method is "driven" by `next_uncached_key_range`, which decides
636 /// which ranges of keys need to be calculated based on what cached trie data is available.
637 #[instrument(
638 target = TRACE_TARGET,
639 level = "trace",
640 skip_all,
641 fields(?lower_bound, ?upper_bound),
642 )]
643 fn calculate_key_range<'a>(
644 &mut self,
645 value_encoder: &VE,
646 targets: &mut TargetsCursor<'a>,
647 hashed_cursor_current: &mut Option<(Nibbles, VE::DeferredEncoder)>,
648 lower_bound: Nibbles,
649 upper_bound: Option<Nibbles>,
650 ) -> Result<(), StateProofError> {
651 // A helper closure for mapping entries returned from the `hashed_cursor`, converting the
652 // key to Nibbles and immediately creating the DeferredValueEncoder so that encoding of the
653 // leaf value can begin ASAP.
654 let map_hashed_cursor_entry = |(key_b256, val): (B256, _)| {
655 debug_assert_eq!(key_b256.len(), 32);
656 // SAFETY: key is a B256 and so is exactly 32-bytes.
657 let key = unsafe { Nibbles::unpack_unchecked(key_b256.as_slice()) };
658 let val = value_encoder.deferred_encoder(key_b256, val);
659 (key, val)
660 };
661
662 // If the cursor hasn't been used, or the last iterated key is prior to this range's
663 // key range, then seek forward to at least the first key.
664 if hashed_cursor_current.as_ref().is_none_or(|(key, _)| key < &lower_bound) {
665 trace!(
666 target: TRACE_TARGET,
667 current=?hashed_cursor_current.as_ref().map(|(k, _)| k),
668 "Seeking hashed cursor to meet lower bound",
669 );
670
671 let lower_key = B256::right_padding_from(&lower_bound.pack());
672 *hashed_cursor_current =
673 self.hashed_cursor.seek(lower_key)?.map(map_hashed_cursor_entry);
674 }
675
676 // Loop over all keys in the range, calling `push_leaf` on each.
677 while let Some((key, _)) = hashed_cursor_current.as_ref() &&
678 upper_bound.is_none_or(|upper_bound| key < &upper_bound)
679 {
680 let (key, val) =
681 core::mem::take(hashed_cursor_current).expect("while-let checks for Some");
682 self.push_leaf(targets, key, val)?;
683 *hashed_cursor_current = self.hashed_cursor.next()?.map(map_hashed_cursor_entry);
684 }
685
686 trace!(target: TRACE_TARGET, "No further keys within range");
687 Ok(())
688 }
689
690 /// Constructs and returns a new [`ProofTrieBranch`] based on an existing [`BranchNodeCompact`].
691 #[inline]
692 const fn new_from_cached_branch(
693 cached_branch: &BranchNodeCompact,
694 ext_len: u8,
695 ) -> ProofTrieBranch {
696 ProofTrieBranch {
697 ext_len,
698 state_mask: TrieMask::new(0),
699 masks: TrieMasks {
700 tree_mask: Some(cached_branch.tree_mask),
701 hash_mask: Some(cached_branch.hash_mask),
702 },
703 }
704 }
705
706 /// Pushes a new branch onto the `branch_stack` which is based on a cached branch obtained via
707 /// the trie cursor.
708 ///
709 /// If there is already a child at the top branch of `branch_stack` occupying this new branch's
710 /// nibble then that child will have its short-key split with another new branch, and this
711 /// cached branch will be a child of that splitting branch.
712 fn push_cached_branch<'a>(
713 &mut self,
714 targets: &mut TargetsCursor<'a>,
715 cached_path: Nibbles,
716 cached_branch: &BranchNodeCompact,
717 ) -> Result<(), StateProofError> {
718 debug_assert!(
719 cached_path.starts_with(&self.branch_path),
720 "push_cached_branch called with path {cached_path:?} which is not a child of current branch {:?}",
721 self.branch_path,
722 );
723
724 let parent_branch = self.branch_stack.last();
725
726 // If both stacks are empty then there were no leaves before this cached branch, push it and
727 // be done; the extension of the branch will be its full path.
728 if self.child_stack.is_empty() && parent_branch.is_none() {
729 self.branch_path = cached_path;
730 self.branch_stack
731 .push(Self::new_from_cached_branch(cached_branch, cached_path.len() as u8));
732 return Ok(())
733 }
734
735 // Get the nibble which should be set in the parent branch's `state_mask` for this new
736 // branch.
737 let cached_branch_nibble = cached_path.get_unchecked(self.branch_path.len());
738
739 // We calculate the `ext_len` of the new branch, and potentially update its nibble if a new
740 // parent branch is inserted here, based on the state of the parent branch.
741 let (cached_branch_nibble, ext_len) = if parent_branch
742 .is_none_or(|parent_branch| parent_branch.state_mask.is_bit_set(cached_branch_nibble))
743 {
744 // If the `child_stack` is not empty but the `branch_stack` is then it implies that
745 // there must be a leaf or extension at the root of the trie whose short-key will get
746 // split by a new branch, which will become the parent of both that leaf/extension and
747 // this new branch.
748 //
749 // Similarly, if there is a branch on the `branch_stack` but its `state_mask` bit for
750 // this new branch is already set, then there must be a leaf/extension with a short-key
751 // to be split.
752 debug_assert!(!self
753 .child_stack
754 .last()
755 .expect("already checked for emptiness")
756 .short_key()
757 .is_empty());
758
759 // Split that leaf/extension's short key with a new branch.
760 let (nibble, short_key) = self.push_new_branch(cached_path);
761 (nibble, short_key.len())
762 } else {
763 // If there is a parent branch but its `state_mask` bit for this branch is not set
764 // then we can simply calculate the `ext_len` based on the difference of each, minus
765 // 1 to account for the nibble in the `state_mask`.
766 (cached_branch_nibble, cached_path.len() - self.branch_path.len() - 1)
767 };
768
769 // `commit_last_child` relies on the last set bit of the parent branch's `state_mask` to
770 // determine the path of the last child on the `child_stack`. Since we are about to
771 // change that mask we need to commit that last child first.
772 self.commit_last_child(targets)?;
773
774 // When pushing a new branch we need to set its child nibble in the `state_mask` of
775 // its parent, if there is one.
776 if let Some(parent_branch) = self.branch_stack.last_mut() {
777 parent_branch.state_mask.set_bit(cached_branch_nibble);
778 }
779
780 // Finally update the `branch_path` and push the new branch.
781 self.branch_path = cached_path;
782 self.branch_stack.push(Self::new_from_cached_branch(cached_branch, ext_len as u8));
783
784 trace!(
785 target: TRACE_TARGET,
786 branch=?self.branch_stack.last(),
787 branch_path=?self.branch_path,
788 "Pushed cached branch",
789 );
790
791 Ok(())
792 }
793
794 /// Attempts to pop off the top branch of the `cached_branch_stack`, returning
795 /// [`PopCachedBranchOutcome::Popped`] on success. Returns other variants to indicate that the
796 /// stack is empty and what to do about it.
797 ///
798 /// This method only returns [`PopCachedBranchOutcome::CalculateLeaves`] if there is a cached
799 /// branch on top of the stack.
800 #[inline]
801 fn try_pop_cached_branch(
802 &mut self,
803 trie_cursor_state: &mut TrieCursorState,
804 sub_trie_prefix: &Nibbles,
805 uncalculated_lower_bound: &Option<Nibbles>,
806 ) -> Result<PopCachedBranchOutcome, StateProofError> {
807 // If there is a branch on top of the stack we use that.
808 if let Some(cached) = self.cached_branch_stack.pop() {
809 return Ok(PopCachedBranchOutcome::Popped(cached));
810 }
811
812 // There is no cached branch on the stack. It's possible that another one exists
813 // farther on in the trie, but we perform some checks first to prevent unnecessary
814 // attempts to find it.
815
816 // If the `uncalculated_lower_bound` is None it indicates that there can be no more
817 // leaf data, so similarly there can be no more branches.
818 let Some(uncalculated_lower_bound) = uncalculated_lower_bound else {
819 return Ok(PopCachedBranchOutcome::Exhausted)
820 };
821
822 // If [`TrieCursorState::path`] returns None it means that the cursor has been
823 // exhausted, so there can be no more cached data.
824 let Some(mut trie_cursor_path) = trie_cursor_state.path() else {
825 return Ok(PopCachedBranchOutcome::Exhausted)
826 };
827
828 // If the trie cursor is seeked to a branch whose leaves have already been processed
829 // then we can't use it, instead we seek forward and try again.
830 if trie_cursor_path < uncalculated_lower_bound {
831 *trie_cursor_state =
832 TrieCursorState::seeked(self.trie_cursor.seek(*uncalculated_lower_bound)?);
833
834 // Having just seeked forward we need to check if the cursor is now exhausted,
835 // extracting the new path at the same time.
836 if let Some(new_trie_cursor_path) = trie_cursor_state.path() {
837 trie_cursor_path = new_trie_cursor_path
838 } else {
839 return Ok(PopCachedBranchOutcome::Exhausted)
840 };
841 }
842
843 // If the trie cursor has exceeded the sub-trie then we consider it to be exhausted.
844 if !trie_cursor_path.starts_with(sub_trie_prefix) {
845 return Ok(PopCachedBranchOutcome::Exhausted)
846 }
847
848 // At this point we can be sure that the cursor is in an `Available` state. We know for
849 // sure it's not `Exhausted` because of the calls to `path` above, and we know it's not
850 // `Taken` because we push all taken branches onto the `cached_branch_stack`, and the
851 // stack is empty.
852 //
853 // We will use this `Available` cached branch as our next branch.
854 let cached = trie_cursor_state.take();
855 trace!(target: TRACE_TARGET, cached=?cached, "Pushed next trie node onto cached_branch_stack");
856
857 // If the calculated range is not caught up to the next cached branch it means there
858 // are portions of the trie prior to that branch which may need to be calculated;
859 // return the uncalculated range up to that branch to make that happen.
860 //
861 // If the next cached branch's path is all zeros then we can skip this catch-up step,
862 // because there cannot be any keys prior to that range.
863 let cached_path = &cached.0;
864 if uncalculated_lower_bound < cached_path && !PATH_ALL_ZEROS.starts_with(cached_path) {
865 let range = (*uncalculated_lower_bound, Some(*cached_path));
866 trace!(target: TRACE_TARGET, ?range, "Returning key range to calculate in order to catch up to cached branch");
867
868 // Push the cached branch onto the stack so it's available once the leaf range is done
869 // being calculated.
870 self.cached_branch_stack.push(cached);
871
872 return Ok(PopCachedBranchOutcome::CalculateLeaves(range));
873 }
874
875 Ok(PopCachedBranchOutcome::Popped(cached))
876 }
877
878 /// Accepts the current state of both hashed and trie cursors, and determines the next range of
879 /// hashed keys which need to be processed using [`Self::push_leaf`].
880 ///
881 /// This method will use cached branch node data from the trie cursor to skip over all possible
882 /// ranges of keys, to reduce computation as much as possible.
883 ///
884 /// # Returns
885 ///
886 /// - `None`: No more data to process, finish computation
887 ///
888 /// - `Some(lower, None)`: Indicates to call `push_leaf` on all keys starting at `lower`, with
889 /// no upper bound. This method won't be called again after this.
890 ///
891 /// - `Some(lower, Some(upper))`: Indicates to call `push_leaf` on all keys starting at `lower`,
892 /// up to but excluding `upper`, and then call this method once done.
893 #[instrument(target = TRACE_TARGET, level = "trace", skip_all)]
894 fn next_uncached_key_range<'a>(
895 &mut self,
896 targets: &mut TargetsCursor<'a>,
897 trie_cursor_state: &mut TrieCursorState,
898 sub_trie_prefix: &Nibbles,
899 sub_trie_upper_bound: Option<&Nibbles>,
900 mut uncalculated_lower_bound: Option<Nibbles>,
901 ) -> Result<Option<(Nibbles, Option<Nibbles>)>, StateProofError> {
902 // Pop any under-construction branches that are now complete.
903 // All trie data prior to the current cached branch, if any, has been computed. Any branches
904 // which were under-construction previously, and which are not on the same path as this
905 // cached branch, can be assumed to be completed; they will not have any further keys added.
906 // to them.
907 if let Some(cached_path) = self.cached_branch_stack.last().map(|kv| kv.0) {
908 while !cached_path.starts_with(&self.branch_path) {
909 self.pop_branch(targets)?;
910 }
911 }
912
913 loop {
914 // Pop the currently cached branch node.
915 //
916 // NOTE we pop off the `cached_branch_stack` because cloning the `BranchNodeCompact`
917 // means cloning an Arc, which incurs synchronization overhead. We have to be sure to
918 // push the cached branch back onto the stack once done.
919 let (cached_path, cached_branch) = match self.try_pop_cached_branch(
920 trie_cursor_state,
921 sub_trie_prefix,
922 &uncalculated_lower_bound,
923 )? {
924 PopCachedBranchOutcome::Popped(cached) => cached,
925 PopCachedBranchOutcome::Exhausted => {
926 // If cached branches are exhausted it's possible that there is still an
927 // unbounded range of leaves to be processed. `uncalculated_lower_bound` is
928 // used to return that range.
929 trace!(target: TRACE_TARGET, ?uncalculated_lower_bound, "Exhausted cached trie nodes");
930 return Ok(uncalculated_lower_bound
931 .map(|lower| (lower, sub_trie_upper_bound.copied())));
932 }
933 PopCachedBranchOutcome::CalculateLeaves(range) => {
934 return Ok(Some(range));
935 }
936 };
937
938 trace!(
939 target: TRACE_TARGET,
940 branch_path = ?self.branch_path,
941 branch_state_mask = ?self.branch_stack.last().map(|b| b.state_mask),
942 ?cached_path,
943 cached_branch_state_mask = ?cached_branch.state_mask,
944 cached_branch_hash_mask = ?cached_branch.hash_mask,
945 "loop",
946 );
947
948 // Since we've popped all branches which don't start with cached_path, branch_path at
949 // this point must be equal to or shorter than cached_path.
950 debug_assert!(
951 self.branch_path.len() < cached_path.len() || self.branch_path == cached_path,
952 "branch_path {:?} is different-or-longer-than cached_path {cached_path:?}",
953 self.branch_path
954 );
955
956 // If the branch_path != cached_path it means the branch_stack is either empty, or the
957 // top branch is the parent of this cached branch. Either way we push a branch
958 // corresponding to the cached one onto the stack, so we can begin constructing it.
959 if self.branch_path != cached_path {
960 self.push_cached_branch(targets, cached_path, &cached_branch)?;
961 }
962
963 // At this point the top of the branch stack is the same branch which was found in the
964 // cache.
965 let curr_branch =
966 self.branch_stack.last().expect("top of branch_stack corresponds to cached branch");
967
968 let cached_state_mask = cached_branch.state_mask.get();
969 let curr_state_mask = curr_branch.state_mask.get();
970
971 // Determine all child nibbles which are set in the cached branch but not the
972 // under-construction branch.
973 let next_child_nibbles = curr_state_mask ^ cached_state_mask;
974 debug_assert_eq!(
975 cached_state_mask | next_child_nibbles, cached_state_mask,
976 "curr_branch has state_mask bits set which aren't set on cached_branch. curr_branch:{:?}",
977 curr_state_mask,
978 );
979
980 // If there are no further children to construct for this branch then pop it off both
981 // stacks and loop using the parent branch.
982 if next_child_nibbles == 0 {
983 trace!(
984 target: TRACE_TARGET,
985 path=?cached_path,
986 ?curr_branch,
987 ?cached_branch,
988 "No further children, popping branch",
989 );
990 self.pop_branch(targets)?;
991
992 // no need to pop from `cached_branch_stack`, the current cached branch is already
993 // popped (see note at the top of the loop).
994
995 // The just-popped branch is completely processed; we know there can be no more keys
996 // with that prefix. Set the lower bound which can be returned from this method to
997 // be the next possible prefix, if any.
998 uncalculated_lower_bound = increment_and_strip_trailing_zeros(&cached_path);
999
1000 continue
1001 }
1002
1003 // Determine the next nibble of the branch which has not yet been constructed, and
1004 // determine the child's full path.
1005 let child_nibble = next_child_nibbles.trailing_zeros() as u8;
1006 let child_path = self.child_path_at(child_nibble);
1007
1008 // If the `hash_mask` bit is set for the next child it means the child's hash is cached
1009 // in the `cached_branch`. We can use that instead of re-calculating the hash of the
1010 // entire sub-trie.
1011 //
1012 // If the child needs to be retained for a proof then we should not use the cached
1013 // hash, and instead continue on to calculate its node manually.
1014 if cached_branch.hash_mask.is_bit_set(child_nibble) {
1015 // Commit the last child. We do this here for two reasons:
1016 // - `commit_last_child` will check if the last child needs to be retained. We need
1017 // to check that before the subsequent `should_retain` call here to prevent
1018 // `targets` from being moved beyond the last child before it is checked.
1019 // - If we do end up using the cached hash value, then we will need to commit the
1020 // last child before pushing a new one onto the stack anyway.
1021 self.commit_last_child(targets)?;
1022
1023 if !self.should_retain(targets, &child_path, false) {
1024 // Pull this child's hash out of the cached branch node. To get the hash's index
1025 // we first need to calculate the mask of which cached hashes have already been
1026 // used by this branch (if any). The number of set bits in that mask will be the
1027 // index of the next hash in the array to use.
1028 let curr_hashed_used_mask = cached_branch.hash_mask.get() & curr_state_mask;
1029 let hash_idx = curr_hashed_used_mask.count_ones() as usize;
1030 let hash = cached_branch.hashes[hash_idx];
1031
1032 trace!(
1033 target: TRACE_TARGET,
1034 ?child_path,
1035 ?hash_idx,
1036 ?hash,
1037 "Using cached hash for child",
1038 );
1039
1040 self.child_stack.push(ProofTrieBranchChild::RlpNode(RlpNode::word_rlp(&hash)));
1041 self.branch_stack
1042 .last_mut()
1043 .expect("already asserted there is a last branch")
1044 .state_mask
1045 .set_bit(child_nibble);
1046
1047 // Update the `uncalculated_lower_bound` to indicate that the child whose bit
1048 // was just set is completely processed.
1049 uncalculated_lower_bound = increment_and_strip_trailing_zeros(&child_path);
1050
1051 // Push the current cached branch back onto the stack before looping.
1052 self.cached_branch_stack.push((cached_path, cached_branch));
1053
1054 continue
1055 }
1056 }
1057
1058 // We now want to check if there is a cached branch node at this child. The cached
1059 // branch node may be the node at this child directly, or this child may be an
1060 // extension and the cached branch is the child of that extension.
1061
1062 // All trie nodes prior to `child_path` will not be modified further, so we can seek the
1063 // trie cursor to the next cached node at-or-after `child_path`.
1064 if trie_cursor_state.path().is_some_and(|path| path < &child_path) {
1065 trace!(target: TRACE_TARGET, ?child_path, "Seeking trie cursor to child path");
1066 *trie_cursor_state = TrieCursorState::seeked(self.trie_cursor.seek(child_path)?);
1067 }
1068
1069 // If the next cached branch node is a child of `child_path` then we can assume it is
1070 // the cached branch for this child. We push it onto the `cached_branch_stack` and loop
1071 // back to the top.
1072 if let TrieCursorState::Available(next_cached_path, next_cached_branch) =
1073 &trie_cursor_state &&
1074 next_cached_path.starts_with(&child_path)
1075 {
1076 // Push the current cached branch back on before pushing its child and then looping
1077 self.cached_branch_stack.push((cached_path, cached_branch));
1078
1079 trace!(
1080 target: TRACE_TARGET,
1081 ?child_path,
1082 ?next_cached_path,
1083 ?next_cached_branch,
1084 "Pushing cached branch for child",
1085 );
1086 self.cached_branch_stack.push(trie_cursor_state.take());
1087 continue;
1088 }
1089
1090 // There is no cached data for the sub-trie at this child, we must recalculate the
1091 // sub-trie root (this child) using the leaves. Return the range of keys based on the
1092 // child path.
1093 let child_path_upper = increment_and_strip_trailing_zeros(&child_path);
1094 trace!(
1095 target: TRACE_TARGET,
1096 lower=?child_path,
1097 upper=?child_path_upper,
1098 "Returning sub-trie's key range to calculate",
1099 );
1100
1101 // Push the current cached branch back onto the stack before returning.
1102 self.cached_branch_stack.push((cached_path, cached_branch));
1103
1104 return Ok(Some((child_path, child_path_upper)));
1105 }
1106 }
1107
1108 /// Calculates trie nodes and retains proofs for targeted nodes within a sub-trie. The
1109 /// sub-trie's bounds are denoted by the `lower_bound` and `upper_bound` arguments,
1110 /// `upper_bound` is exclusive, None indicates unbounded.
1111 #[instrument(
1112 target = TRACE_TARGET,
1113 level = "trace",
1114 skip_all,
1115 fields(prefix=?sub_trie_targets.prefix),
1116 )]
1117 fn proof_subtrie<'a>(
1118 &mut self,
1119 value_encoder: &VE,
1120 trie_cursor_state: &mut TrieCursorState,
1121 hashed_cursor_current: &mut Option<(Nibbles, VE::DeferredEncoder)>,
1122 sub_trie_targets: SubTrieTargets<'a>,
1123 ) -> Result<(), StateProofError> {
1124 let sub_trie_upper_bound = sub_trie_targets.upper_bound();
1125
1126 // Wrap targets into a `TargetsCursor`.
1127 let mut targets = TargetsCursor::new(sub_trie_targets.targets);
1128
1129 // Ensure initial state is cleared. By the end of the method call these should be empty once
1130 // again.
1131 debug_assert!(self.cached_branch_stack.is_empty());
1132 debug_assert!(self.branch_stack.is_empty());
1133 debug_assert!(self.branch_path.is_empty());
1134 debug_assert!(self.child_stack.is_empty());
1135
1136 // `next_uncached_key_range`, which will be called in the loop below, expects the trie
1137 // cursor to have already been seeked. If it's not yet seeked, or seeked to a prior node,
1138 // then we seek it to the prefix (the first possible node) to initialize it.
1139 if trie_cursor_state.before(&sub_trie_targets.prefix) {
1140 trace!(target: TRACE_TARGET, "Doing initial seek of trie cursor");
1141 *trie_cursor_state =
1142 TrieCursorState::seeked(self.trie_cursor.seek(sub_trie_targets.prefix)?);
1143 }
1144
1145 // `uncalculated_lower_bound` tracks the lower bound of node paths which have yet to be
1146 // visited, either via the hashed key cursor (`calculate_key_range`) or trie cursor
1147 // (`next_uncached_key_range`). If/when this becomes None then there are no further nodes
1148 // which could exist.
1149 let mut uncalculated_lower_bound = Some(sub_trie_targets.prefix);
1150
1151 trace!(target: TRACE_TARGET, "Starting loop");
1152 loop {
1153 // Determine the range of keys of the overall trie which need to be re-computed.
1154 let Some((calc_lower_bound, calc_upper_bound)) = self.next_uncached_key_range(
1155 &mut targets,
1156 trie_cursor_state,
1157 &sub_trie_targets.prefix,
1158 sub_trie_upper_bound.as_ref(),
1159 uncalculated_lower_bound,
1160 )?
1161 else {
1162 // If `next_uncached_key_range` determines that there can be no more keys then
1163 // complete the computation.
1164 break;
1165 };
1166
1167 // Calculate the trie for that range of keys
1168 self.calculate_key_range(
1169 value_encoder,
1170 &mut targets,
1171 hashed_cursor_current,
1172 calc_lower_bound,
1173 calc_upper_bound,
1174 )?;
1175
1176 // Once outside `calculate_key_range`, `hashed_cursor_current` will be at the first key
1177 // after the range.
1178 //
1179 // If the `hashed_cursor_current` is None (exhausted), or not within the range of the
1180 // sub-trie, then there are no more keys at all, meaning the trie couldn't possibly have
1181 // more data and we should complete computation.
1182 if hashed_cursor_current
1183 .as_ref()
1184 .is_none_or(|(key, _)| !key.starts_with(&sub_trie_targets.prefix))
1185 {
1186 break;
1187 }
1188
1189 // The upper bound of previous calculation becomes the lower bound of the uncalculated
1190 // range, for which we'll once again check for cached data.
1191 uncalculated_lower_bound = calc_upper_bound;
1192 }
1193
1194 // Once there's no more leaves we can pop the remaining branches, if any.
1195 trace!(target: TRACE_TARGET, "Exited loop, popping remaining branches");
1196 while !self.branch_stack.is_empty() {
1197 self.pop_branch(&mut targets)?;
1198 }
1199
1200 // At this point the branch stack should be empty. If the child stack is empty it means no
1201 // keys were ever iterated from the hashed cursor in the first place. Otherwise there should
1202 // only be a single node left: the root node.
1203 debug_assert!(self.branch_stack.is_empty());
1204 debug_assert!(self.branch_path.is_empty());
1205 debug_assert!(self.child_stack.len() < 2);
1206
1207 // The `cached_branch_stack` may still have cached branches on it, as it's not affected by
1208 // `pop_branch`, but it is no longer needed and should be cleared.
1209 self.cached_branch_stack.clear();
1210
1211 // We always pop the root node off of the `child_stack` in order to empty it, however we
1212 // might not want to retain the node unless the `SubTrieTargets` indicates it.
1213 trace!(
1214 target: TRACE_TARGET,
1215 retain_root = ?sub_trie_targets.retain_root,
1216 child_stack_empty = self.child_stack.is_empty(),
1217 "Maybe retaining root",
1218 );
1219 match (sub_trie_targets.retain_root, self.child_stack.pop()) {
1220 (false, _) => {
1221 // Whether the root node is exists or not, we don't want it.
1222 }
1223 (true, None) => {
1224 // If `child_stack` is empty it means there was no keys at all, retain an empty
1225 // root node.
1226 self.retained_proofs.push(ProofTrieNode {
1227 path: Nibbles::new(), // root path
1228 node: TrieNode::EmptyRoot,
1229 masks: TrieMasks::none(),
1230 });
1231 }
1232 (true, Some(root_node)) => {
1233 // Encode and retain the root node.
1234 self.rlp_encode_buf.clear();
1235 let root_node =
1236 root_node.into_proof_trie_node(Nibbles::new(), &mut self.rlp_encode_buf)?;
1237 self.retained_proofs.push(root_node);
1238 }
1239 }
1240
1241 Ok(())
1242 }
1243
1244 /// Internal implementation of proof calculation. Assumes both cursors have already been reset.
1245 /// See docs on [`Self::proof`] for expected behavior.
1246 fn proof_inner(
1247 &mut self,
1248 value_encoder: &VE,
1249 targets: &mut [Target],
1250 ) -> Result<Vec<ProofTrieNode>, StateProofError> {
1251 // If there are no targets then nothing could be returned, return early.
1252 if targets.is_empty() {
1253 trace!(target: TRACE_TARGET, "Empty targets, returning");
1254 return Ok(Vec::new())
1255 }
1256
1257 // Initialize the variables which track the state of the two cursors. Both indicated the
1258 // cursors are unseeked.
1259 let mut trie_cursor_state = TrieCursorState::unseeked();
1260 let mut hashed_cursor_current: Option<(Nibbles, VE::DeferredEncoder)> = None;
1261
1262 // Divide targets into chunks, each chunk corresponding to a different sub-trie within the
1263 // overall trie, and handle all proofs within that sub-trie.
1264 for sub_trie_targets in iter_sub_trie_targets(targets) {
1265 self.proof_subtrie(
1266 value_encoder,
1267 &mut trie_cursor_state,
1268 &mut hashed_cursor_current,
1269 sub_trie_targets,
1270 )?;
1271 }
1272
1273 trace!(
1274 target: TRACE_TARGET,
1275 retained_proofs_len = ?self.retained_proofs.len(),
1276 "proof_inner: returning",
1277 );
1278 Ok(core::mem::take(&mut self.retained_proofs))
1279 }
1280}
1281
1282impl<TC, HC, VE> ProofCalculator<TC, HC, VE>
1283where
1284 TC: TrieCursor,
1285 HC: HashedCursor,
1286 VE: LeafValueEncoder<Value = HC::Value>,
1287{
1288 /// Generate a proof for the given targets.
1289 ///
1290 /// Given a set of [`Target`]s, returns nodes whose paths are a prefix of any target. The
1291 /// returned nodes will be sorted depth-first by path.
1292 ///
1293 /// # Panics
1294 ///
1295 /// In debug builds, panics if the targets are not sorted lexicographically.
1296 #[instrument(target = TRACE_TARGET, level = "trace", skip_all)]
1297 pub fn proof(
1298 &mut self,
1299 value_encoder: &VE,
1300 targets: &mut [Target],
1301 ) -> Result<Vec<ProofTrieNode>, StateProofError> {
1302 self.trie_cursor.reset();
1303 self.hashed_cursor.reset();
1304 self.proof_inner(value_encoder, targets)
1305 }
1306}
1307
1308/// A proof calculator for storage tries.
1309pub type StorageProofCalculator<TC, HC> = ProofCalculator<TC, HC, StorageValueEncoder>;
1310
1311impl<TC, HC> StorageProofCalculator<TC, HC>
1312where
1313 TC: TrieStorageCursor,
1314 HC: HashedStorageCursor<Value = U256>,
1315{
1316 /// Create a new [`StorageProofCalculator`] instance.
1317 pub fn new_storage(trie_cursor: TC, hashed_cursor: HC) -> Self {
1318 Self::new(trie_cursor, hashed_cursor)
1319 }
1320
1321 /// Generate a proof for a storage trie at the given hashed address.
1322 ///
1323 /// Given a set of [`Target`]s, returns nodes whose paths are a prefix of any target. The
1324 /// returned nodes will be sorted depth-first by path.
1325 ///
1326 /// # Panics
1327 ///
1328 /// In debug builds, panics if the targets are not sorted lexicographically.
1329 #[instrument(target = TRACE_TARGET, level = "trace", skip(self, targets))]
1330 pub fn storage_proof(
1331 &mut self,
1332 hashed_address: B256,
1333 targets: &mut [Target],
1334 ) -> Result<Vec<ProofTrieNode>, StateProofError> {
1335 /// Static storage value encoder instance used by all storage proofs.
1336 static STORAGE_VALUE_ENCODER: StorageValueEncoder = StorageValueEncoder;
1337
1338 self.hashed_cursor.set_hashed_address(hashed_address);
1339
1340 // Shortcut: check if storage is empty
1341 if self.hashed_cursor.is_storage_empty()? {
1342 // Return a single EmptyRoot node at the root path
1343 return Ok(vec![ProofTrieNode {
1344 path: Nibbles::default(),
1345 node: TrieNode::EmptyRoot,
1346 masks: TrieMasks::none(),
1347 }])
1348 }
1349
1350 // Don't call `set_hashed_address` on the trie cursor until after the previous shortcut has
1351 // been checked.
1352 self.trie_cursor.set_hashed_address(hashed_address);
1353
1354 // Use the static StorageValueEncoder and pass it to proof_inner
1355 self.proof_inner(&STORAGE_VALUE_ENCODER, targets)
1356 }
1357}
1358
1359/// Helper type wrapping a slice of [`Target`]s, primarily used to iterate through targets in
1360/// [`ProofCalculator::should_retain`].
1361///
1362/// It is assumed that the underlying slice is never empty, and that the iterator is never
1363/// exhausted.
1364struct TargetsCursor<'a> {
1365 targets: &'a [Target],
1366 i: usize,
1367}
1368
1369impl<'a> TargetsCursor<'a> {
1370 /// Wraps a slice of [`Target`]s with the `TargetsCursor`.
1371 ///
1372 /// # Panics
1373 ///
1374 /// Will panic in debug mode if called with an empty slice.
1375 fn new(targets: &'a [Target]) -> Self {
1376 debug_assert!(!targets.is_empty());
1377 Self { targets, i: 0 }
1378 }
1379
1380 /// Returns the current and next [`Target`] that the cursor is pointed at.
1381 fn current(&self) -> (&'a Target, Option<&'a Target>) {
1382 (&self.targets[self.i], self.targets.get(self.i + 1))
1383 }
1384
1385 /// Iterates the cursor forward.
1386 ///
1387 /// # Panics
1388 ///
1389 /// Will panic if the cursor is exhausted.
1390 fn next(&mut self) -> (&'a Target, Option<&'a Target>) {
1391 self.i += 1;
1392 debug_assert!(self.i < self.targets.len());
1393 self.current()
1394 }
1395
1396 // Iterate forwards over the slice, starting from the [`Target`] after the current.
1397 fn skip_iter(&self) -> impl Iterator<Item = &'a Target> {
1398 self.targets[self.i + 1..].iter()
1399 }
1400
1401 /// Iterated backwards over the slice, starting from the [`Target`] previous to the current.
1402 fn rev_iter(&self) -> impl Iterator<Item = &'a Target> {
1403 self.targets[..self.i].iter().rev()
1404 }
1405}
1406
1407/// Used to track the state of the trie cursor, allowing us to differentiate between a branch having
1408/// been taken (used as a cached branch) and the cursor having been exhausted.
1409#[derive(Debug)]
1410enum TrieCursorState {
1411 /// The initial state of the cursor, indicating it's never been seeked.
1412 Unseeked,
1413 /// Cursor is seeked to this path and the node has not been used yet.
1414 Available(Nibbles, BranchNodeCompact),
1415 /// Cursor is seeked to this path, but the node has been used.
1416 Taken(Nibbles),
1417 /// Cursor has been exhausted.
1418 Exhausted,
1419}
1420
1421impl TrieCursorState {
1422 /// Creates a [`Self::Unseeked`] based on an entry returned from the cursor itself.
1423 const fn unseeked() -> Self {
1424 Self::Unseeked
1425 }
1426
1427 /// Creates a [`Self`] based on an entry returned from the cursor itself.
1428 fn seeked(entry: Option<(Nibbles, BranchNodeCompact)>) -> Self {
1429 entry.map_or(Self::Exhausted, |(path, node)| Self::Available(path, node))
1430 }
1431
1432 /// Returns the path the cursor is seeked to, or None if it's exhausted.
1433 ///
1434 /// # Panics
1435 ///
1436 /// Panics if the cursor is unseeked.
1437 const fn path(&self) -> Option<&Nibbles> {
1438 match self {
1439 Self::Unseeked => panic!("cursor is unseeked"),
1440 Self::Available(path, _) | Self::Taken(path) => Some(path),
1441 Self::Exhausted => None,
1442 }
1443 }
1444
1445 /// Returns true if the cursor is unseeked, or is seeked to a node prior to the given one.
1446 fn before(&self, path: &Nibbles) -> bool {
1447 match self {
1448 Self::Unseeked => true,
1449 Self::Available(seeked_to, _) | Self::Taken(seeked_to) => path < seeked_to,
1450 Self::Exhausted => false,
1451 }
1452 }
1453
1454 /// Takes the path and node from a [`Self::Available`]. Panics if not [`Self::Available`].
1455 fn take(&mut self) -> (Nibbles, BranchNodeCompact) {
1456 let Self::Available(path, _) = self else {
1457 panic!("take called on non-Available: {self:?}")
1458 };
1459
1460 let path = *path;
1461 let Self::Available(path, node) = core::mem::replace(self, Self::Taken(path)) else {
1462 unreachable!("already checked that self is Self::Available");
1463 };
1464
1465 (path, node)
1466 }
1467}
1468
1469/// Describes the state of the currently cached branch node (if any).
1470enum PopCachedBranchOutcome {
1471 /// Cached branch has been popped from the `cached_branch_stack` and is ready to be used.
1472 Popped((Nibbles, BranchNodeCompact)),
1473 /// All cached branches have been exhausted.
1474 Exhausted,
1475 /// Need to calculate leaves from this range (exclusive upper) before the cached branch
1476 /// (catch-up range). If None then
1477 CalculateLeaves((Nibbles, Option<Nibbles>)),
1478}
1479
1480/// Increments the nibbles and strips any trailing zeros.
1481///
1482/// This function wraps `Nibbles::increment` and when it returns a value with trailing zeros,
1483/// it strips those zeros using bit manipulation on the underlying U256.
1484fn increment_and_strip_trailing_zeros(nibbles: &Nibbles) -> Option<Nibbles> {
1485 let mut result = nibbles.increment()?;
1486
1487 // If result is empty, just return it
1488 if result.is_empty() {
1489 return Some(result);
1490 }
1491
1492 // Get access to the underlying U256 to detect trailing zeros
1493 let uint_val = *result.as_mut_uint_unchecked();
1494 let non_zero_prefix_len = 64 - (uint_val.trailing_zeros() / 4);
1495 result.truncate(non_zero_prefix_len);
1496
1497 Some(result)
1498}
1499
1500#[cfg(test)]
1501mod tests {
1502 use super::*;
1503 use crate::{
1504 hashed_cursor::{
1505 mock::MockHashedCursorFactory, HashedCursorFactory, HashedCursorMetricsCache,
1506 InstrumentedHashedCursor,
1507 },
1508 proof::Proof,
1509 trie_cursor::{
1510 depth_first, mock::MockTrieCursorFactory, InstrumentedTrieCursor, TrieCursorFactory,
1511 TrieCursorMetricsCache,
1512 },
1513 };
1514 use alloy_primitives::map::{B256Map, B256Set};
1515 use alloy_rlp::Decodable;
1516 use itertools::Itertools;
1517 use reth_primitives_traits::Account;
1518 use reth_trie_common::{
1519 updates::{StorageTrieUpdates, TrieUpdates},
1520 HashedPostState, MultiProofTargets, TrieNode,
1521 };
1522
1523 /// Target to use with the `tracing` crate.
1524 static TRACE_TARGET: &str = "trie::proof_v2::tests";
1525
1526 /// A test harness for comparing `ProofCalculator` and legacy `Proof` implementations.
1527 ///
1528 /// This harness creates mock cursor factories from a `HashedPostState` and provides
1529 /// a method to test that both proof implementations produce equivalent results.
1530 struct ProofTestHarness {
1531 /// Mock factory for trie cursors (empty by default for leaf-only tests)
1532 trie_cursor_factory: MockTrieCursorFactory,
1533 /// Mock factory for hashed cursors, populated from `HashedPostState`
1534 hashed_cursor_factory: MockHashedCursorFactory,
1535 }
1536
1537 impl ProofTestHarness {
1538 /// Creates a new test harness from a `HashedPostState`.
1539 ///
1540 /// The `HashedPostState` is used to populate the mock hashed cursor factory directly.
1541 /// The trie cursor factory is initialized from `TrieUpdates` generated by `StateRoot`.
1542 fn new(post_state: HashedPostState) -> Self {
1543 // Create empty trie cursor factory to serve as the initial state for StateRoot
1544 // Ensure that there's a storage trie dataset for every account, to make
1545 // `MockTrieCursorFactory` happy.
1546 let storage_tries: B256Map<_> = post_state
1547 .accounts
1548 .keys()
1549 .copied()
1550 .map(|addr| (addr, StorageTrieUpdates::default()))
1551 .collect();
1552
1553 let empty_trie_cursor_factory = MockTrieCursorFactory::from_trie_updates(TrieUpdates {
1554 storage_tries: storage_tries.clone(),
1555 ..Default::default()
1556 });
1557
1558 // Create mock hashed cursor factory from the post state
1559 let hashed_cursor_factory = MockHashedCursorFactory::from_hashed_post_state(post_state);
1560
1561 // Generate TrieUpdates using StateRoot
1562 let (_root, mut trie_updates) =
1563 crate::StateRoot::new(empty_trie_cursor_factory, hashed_cursor_factory.clone())
1564 .root_with_updates()
1565 .expect("StateRoot should succeed");
1566
1567 // Continue using empty storage tries for each account, to keep `MockTrieCursorFactory`
1568 // happy.
1569 trie_updates.storage_tries = storage_tries;
1570
1571 // Initialize trie cursor factory from the generated TrieUpdates
1572 let trie_cursor_factory = MockTrieCursorFactory::from_trie_updates(trie_updates);
1573
1574 Self { trie_cursor_factory, hashed_cursor_factory }
1575 }
1576
1577 /// Asserts that `ProofCalculator` and legacy `Proof` produce equivalent results for account
1578 /// proofs.
1579 ///
1580 /// This method calls both implementations with the given account targets and compares
1581 /// the results.
1582 fn assert_proof(
1583 &self,
1584 targets: impl IntoIterator<Item = Target>,
1585 ) -> Result<(), StateProofError> {
1586 let targets_vec = targets.into_iter().collect::<Vec<_>>();
1587
1588 // Convert Target keys to MultiProofTargets for legacy implementation
1589 // For account-only proofs, each account maps to an empty storage set
1590 // Legacy implementation only uses the keys, not the prefix
1591 let legacy_targets = targets_vec
1592 .iter()
1593 .map(|target| (B256::from_slice(&target.key.pack()), B256Set::default()))
1594 .collect::<MultiProofTargets>();
1595
1596 // Create ProofCalculator (proof_v2) with account cursors
1597 let trie_cursor = self.trie_cursor_factory.account_trie_cursor()?;
1598 let hashed_cursor = self.hashed_cursor_factory.hashed_account_cursor()?;
1599
1600 // Collect metrics for cursors
1601 let mut trie_cursor_metrics = TrieCursorMetricsCache::default();
1602 let trie_cursor = InstrumentedTrieCursor::new(trie_cursor, &mut trie_cursor_metrics);
1603 let mut hashed_cursor_metrics = HashedCursorMetricsCache::default();
1604 let hashed_cursor =
1605 InstrumentedHashedCursor::new(hashed_cursor, &mut hashed_cursor_metrics);
1606
1607 // Call ProofCalculator::proof with account targets
1608 let value_encoder = SyncAccountValueEncoder::new(
1609 self.trie_cursor_factory.clone(),
1610 self.hashed_cursor_factory.clone(),
1611 );
1612 let mut proof_calculator = ProofCalculator::new(trie_cursor, hashed_cursor);
1613 let proof_v2_result =
1614 proof_calculator.proof(&value_encoder, &mut targets_vec.clone())?;
1615
1616 // Output metrics
1617 trace!(target: TRACE_TARGET, ?trie_cursor_metrics, "V2 trie cursor metrics");
1618 trace!(target: TRACE_TARGET, ?hashed_cursor_metrics, "V2 hashed cursor metrics");
1619
1620 // Call Proof::multiproof (legacy implementation)
1621 let proof_legacy_result =
1622 Proof::new(self.trie_cursor_factory.clone(), self.hashed_cursor_factory.clone())
1623 .with_branch_node_masks(true)
1624 .multiproof(legacy_targets)?;
1625
1626 // Helper function to check if a node path matches at least one target
1627 let node_matches_target = |node_path: &Nibbles| -> bool {
1628 targets_vec.iter().any(|target| {
1629 // Node path must be a prefix of the target's key
1630 target.key.starts_with(node_path) &&
1631 // Node path must be at least `min_len` long
1632 node_path.len() >= target.min_len as usize
1633 })
1634 };
1635
1636 // Decode and sort legacy proof nodes, filtering to only those that match at least one
1637 // target
1638 let proof_legacy_nodes = proof_legacy_result
1639 .account_subtree
1640 .iter()
1641 .filter(|(path, _)| node_matches_target(path))
1642 .map(|(path, node_enc)| {
1643 let mut buf = node_enc.as_ref();
1644 let node = TrieNode::decode(&mut buf)
1645 .expect("legacy implementation should not produce malformed proof nodes");
1646
1647 // The legacy proof calculator will calculate masks for the root node, even
1648 // though we never store the root node so the masks for it aren't really valid.
1649 let masks = if path.is_empty() {
1650 TrieMasks::none()
1651 } else if let Some(branch_masks) =
1652 proof_legacy_result.branch_node_masks.get(path)
1653 {
1654 TrieMasks {
1655 hash_mask: Some(branch_masks.hash_mask),
1656 tree_mask: Some(branch_masks.tree_mask),
1657 }
1658 } else {
1659 TrieMasks::none()
1660 };
1661
1662 ProofTrieNode { path: *path, node, masks }
1663 })
1664 .sorted_by(|a, b| depth_first::cmp(&a.path, &b.path))
1665 .collect::<Vec<_>>();
1666
1667 // Basic comparison: both should succeed and produce identical results
1668 pretty_assertions::assert_eq!(proof_legacy_nodes, proof_v2_result);
1669
1670 Ok(())
1671 }
1672 }
1673
1674 mod proptest_tests {
1675 use super::*;
1676 use alloy_primitives::{map::B256Map, U256};
1677 use proptest::prelude::*;
1678 use reth_trie_common::HashedPostState;
1679
1680 /// Generate a strategy for Account values
1681 fn account_strategy() -> impl Strategy<Value = Account> {
1682 (any::<u64>(), any::<u64>(), any::<[u8; 32]>()).prop_map(
1683 |(nonce, balance, code_hash)| Account {
1684 nonce,
1685 balance: U256::from(balance),
1686 bytecode_hash: Some(B256::from(code_hash)),
1687 },
1688 )
1689 }
1690
1691 /// Generate a strategy for `HashedPostState` with random accounts
1692 fn hashed_post_state_strategy() -> impl Strategy<Value = HashedPostState> {
1693 prop::collection::vec((any::<[u8; 32]>(), account_strategy()), 0..=100).prop_map(
1694 |accounts| {
1695 let account_map = accounts
1696 .into_iter()
1697 .map(|(addr_bytes, account)| (B256::from(addr_bytes), Some(account)))
1698 .collect::<B256Map<_>>();
1699
1700 HashedPostState { accounts: account_map, ..Default::default() }
1701 },
1702 )
1703 }
1704
1705 /// Generate a strategy for proof targets that are 80% from the `HashedPostState` accounts
1706 /// and 20% random keys. Each target has a random `min_len` of 0..16.
1707 fn proof_targets_strategy(account_keys: Vec<B256>) -> impl Strategy<Value = Vec<Target>> {
1708 let num_accounts = account_keys.len();
1709
1710 // Generate between 0 and (num_accounts + 5) targets
1711 let target_count = 0..=(num_accounts + 5);
1712
1713 target_count.prop_flat_map(move |count| {
1714 let account_keys = account_keys.clone();
1715 prop::collection::vec(
1716 (
1717 prop::bool::weighted(0.8).prop_flat_map(move |from_accounts| {
1718 if from_accounts && !account_keys.is_empty() {
1719 // 80% chance: pick from existing account keys
1720 prop::sample::select(account_keys.clone()).boxed()
1721 } else {
1722 // 20% chance: generate random B256
1723 any::<[u8; 32]>().prop_map(B256::from).boxed()
1724 }
1725 }),
1726 0u8..16u8, // Random min_len from 0 to 15
1727 )
1728 .prop_map(|(key, min_len)| Target::new(key).with_min_len(min_len)),
1729 count,
1730 )
1731 })
1732 }
1733
1734 proptest! {
1735 #![proptest_config(ProptestConfig::with_cases(8000))]
1736 #[test]
1737 /// Tests that ProofCalculator produces valid proofs for randomly generated
1738 /// HashedPostState with proof targets.
1739 ///
1740 /// This test:
1741 /// - Generates random accounts in a HashedPostState
1742 /// - Generates proof targets: 80% from existing account keys, 20% random
1743 /// - Creates a test harness with the generated state
1744 /// - Calls assert_proof with the generated targets
1745 /// - Verifies both ProofCalculator and legacy Proof produce equivalent results
1746 fn proptest_proof_with_targets(
1747 (post_state, targets) in hashed_post_state_strategy()
1748 .prop_flat_map(|post_state| {
1749 let mut account_keys: Vec<B256> = post_state.accounts.keys().copied().collect();
1750 // Sort to ensure deterministic order when using PROPTEST_RNG_SEED
1751 account_keys.sort_unstable();
1752 let targets_strategy = proof_targets_strategy(account_keys);
1753 (Just(post_state), targets_strategy)
1754 })
1755 ) {
1756 reth_tracing::init_test_tracing();
1757 let harness = ProofTestHarness::new(post_state);
1758
1759 // Pass generated targets to both implementations
1760 harness.assert_proof(targets).expect("Proof generation failed");
1761 }
1762 }
1763 }
1764
1765 #[test]
1766 fn test_big_trie() {
1767 use rand::prelude::*;
1768
1769 reth_tracing::init_test_tracing();
1770 let mut rng = rand::rngs::SmallRng::seed_from_u64(1);
1771
1772 let mut rand_b256 = || {
1773 let mut buf: [u8; 32] = [0; 32];
1774 rng.fill_bytes(&mut buf);
1775 B256::from_slice(&buf)
1776 };
1777
1778 // Generate random HashedPostState.
1779 let mut post_state = HashedPostState::default();
1780 for _ in 0..10240 {
1781 let hashed_addr = rand_b256();
1782 let account = Account { bytecode_hash: Some(hashed_addr), ..Default::default() };
1783 post_state.accounts.insert(hashed_addr, Some(account));
1784 }
1785
1786 // Collect targets; partially from real keys, partially random keys which probably won't
1787 // exist.
1788 let num_real_targets = post_state.accounts.len() * 5;
1789 let mut targets =
1790 post_state.accounts.keys().copied().sorted().take(num_real_targets).collect::<Vec<_>>();
1791 for _ in 0..post_state.accounts.len() / 5 {
1792 targets.push(rand_b256());
1793 }
1794 targets.sort();
1795
1796 // Create test harness
1797 let harness = ProofTestHarness::new(post_state);
1798
1799 // Assert the proof (convert B256 to Target with no min_len for this test)
1800 harness
1801 .assert_proof(targets.into_iter().map(Target::new))
1802 .expect("Proof generation failed");
1803 }
1804
1805 #[test]
1806 fn test_increment_and_strip_trailing_zeros() {
1807 let test_cases: Vec<(Nibbles, Option<Nibbles>)> = vec![
1808 // Basic increment without trailing zeros
1809 (Nibbles::from_nibbles([0x1, 0x2, 0x3]), Some(Nibbles::from_nibbles([0x1, 0x2, 0x4]))),
1810 // Increment with trailing zeros - should be stripped
1811 (Nibbles::from_nibbles([0x0, 0x0, 0xF]), Some(Nibbles::from_nibbles([0x0, 0x1]))),
1812 (Nibbles::from_nibbles([0x0, 0xF, 0xF]), Some(Nibbles::from_nibbles([0x1]))),
1813 // Overflow case
1814 (Nibbles::from_nibbles([0xF, 0xF, 0xF]), None),
1815 // Empty nibbles
1816 (Nibbles::new(), None),
1817 // Single nibble
1818 (Nibbles::from_nibbles([0x5]), Some(Nibbles::from_nibbles([0x6]))),
1819 // All Fs except last - results in trailing zeros after increment
1820 (Nibbles::from_nibbles([0xE, 0xF, 0xF]), Some(Nibbles::from_nibbles([0xF]))),
1821 ];
1822
1823 for (input, expected) in test_cases {
1824 let result = increment_and_strip_trailing_zeros(&input);
1825 assert_eq!(result, expected, "Failed for input: {:?}", input);
1826 }
1827 }
1828
1829 #[test]
1830 fn test_failing_proptest_case_0() {
1831 use alloy_primitives::{hex, map::B256Map};
1832
1833 reth_tracing::init_test_tracing();
1834
1835 // Helper function to create B256 from hex string
1836 let b256 = |s: &str| B256::from_slice(&hex::decode(s).unwrap());
1837
1838 // Create the HashedPostState from test case input
1839 let mut accounts = B256Map::default();
1840
1841 // Define all account data from test case input
1842 let account_data = [
1843 (
1844 "9f3a475db85ff1f5b5e82d8614ee4afc670d27aefb9a43da0bd863a54acf1fe6",
1845 8396790837504194281u64,
1846 9224366602005816983u64,
1847 "103c5b0538f4e37944321a30f5cb1f7005d2ee70998106f34f36d7adb838c789",
1848 ),
1849 (
1850 "c736258fdfd23d73ec4c5e54b8c3b58e26726b361d438ef48670f028286b70ca",
1851 9193115115482903760u64,
1852 4515164289866465875u64,
1853 "9f24ef3ab0b4893b0ec38d0e9b00f239da072ccf093b0b24f1ea1f99547abe55",
1854 ),
1855 (
1856 "780a3476520090f97e847181aee17515c5ea30b7607775103df16d2b6611a87a",
1857 8404772182417755681u64,
1858 16639574952778823617u64,
1859 "214b12bee666ce8c64c6bbbcfafa0c3e55b4b05a8724ec4182b9a6caa774c56d",
1860 ),
1861 (
1862 "23ebfa849308a5d02c3048040217cd1f4b71fb01a9b54dafe541284ebec2bcce",
1863 17978809803974566048u64,
1864 11093542035392742776u64,
1865 "5384dfda8f1935d98e463c00a96960ff24e4d4893ec21e5ece0d272df33ac7e9",
1866 ),
1867 (
1868 "348e476c24fac841b11d358431b4526db09edc9f39906e0ac8809886a04f3c5a",
1869 9422945522568453583u64,
1870 9737072818780682487u64,
1871 "79f8f25b2cbb7485c5c7b627917c0f562f012d3d7ddd486212c90fbea0cf686e",
1872 ),
1873 (
1874 "830536ee6c8f780a1cd760457345b79fc09476018a59cf3e8fd427a793d99633",
1875 16497625187081138489u64,
1876 15143978245385012455u64,
1877 "00ede4000cc2a16fca7e930761aaf30d1fddcc3803f0009d6a0742b4ee519342",
1878 ),
1879 (
1880 "806c74b024b2fe81f077ea93d2936c489689f7fe024febc3a0fb71a8a9f22fbc",
1881 8103477314050566918u64,
1882 1383893458340561723u64,
1883 "690ed176136174c4f0cc442e6dcbcf6e7b577e30fc052430b6060f97af1f8e85",
1884 ),
1885 (
1886 "b903d962ffc520877f14e1e8328160e5b22f8086b0f7e9cba7a373a8376028a0",
1887 12972727566246296372u64,
1888 1130659127924527352u64,
1889 "cadf1f09d8e6a0d945a58ccd2ff36e2ae99f8146f02be96873e84bef0462d64a",
1890 ),
1891 (
1892 "d36a16afff0097e06b2c28bd795b889265e2ceff9a086173113fbeb6f7a9bc42",
1893 15682404502571860137u64,
1894 2025886798818635036u64,
1895 "c2cee70663e9ff1b521e2e1602e88723da52ccdc7a69e370cde9595af435e654",
1896 ),
1897 (
1898 "f3e8461cba0b84f5b81f8ca63d0456cb567e701ec1d6e77b1a03624c5018389b",
1899 5663749586038550112u64,
1900 7681243595728002238u64,
1901 "072c547c3ab9744bcd2ed9dbd813bd62866a673f4ca5d46939b65e9507be0e70",
1902 ),
1903 (
1904 "40b71840b6f43a493b32f4aa755e02d572012392fd582c81a513a169447e194c",
1905 518207789203399614u64,
1906 317311275468085815u64,
1907 "85541d48471bf639c2574600a9b637338c49729ba9e741f157cc6ebaae139da0",
1908 ),
1909 (
1910 "3f77cd91ceb7d335dd2527c29e79aaf94f14141438740051eb0163d86c35bcc9",
1911 16227517944662106096u64,
1912 12646193931088343779u64,
1913 "54999911d82dd63d526429275115fa98f6a560bc2d8e00be24962e91e38d7182",
1914 ),
1915 (
1916 "5cd903814ba84daa6956572411cd1bf4d48a8e230003d28cc3f942697bf8debb",
1917 5096288383163945009u64,
1918 17919982845103509853u64,
1919 "6a53c812e713f1bfe6bf21954f291140c60ec3f2ef353ecdae5dc7b263a37282",
1920 ),
1921 (
1922 "23f3602c95fd98d7fbe48a326ae1549030a2c7574099432cce5b458182f16bf2",
1923 11136020130962086191u64,
1924 12045219101880183180u64,
1925 "ce53fb9b108a3ee90db8469e44948ba3263ca8d8a0d92a076c9516f9a3d30bd1",
1926 ),
1927 (
1928 "be86489b3594a9da83e04a9ff81c8d68d528b8b9d31f3942d1c5856a4a8c5af7",
1929 16293506537092575994u64,
1930 536238712429663046u64,
1931 "a2af0607ade21241386ecfb3780aa90514f43595941daeff8dd599c203cde30a",
1932 ),
1933 (
1934 "97bcd85ee5d6033bdf86397e8b26f711912948a7298114be27ca5499ea99725f",
1935 3086656672041156193u64,
1936 8667446575959669532u64,
1937 "0474377538684a991ffc9b41f970b48e65eda9e07c292e60861258ef87d45272",
1938 ),
1939 (
1940 "40065932e6c70eb907e4f2a89ec772f5382ca90a49ef44c4ae21155b9decdcc0",
1941 17152529399128063686u64,
1942 3643450822628960860u64,
1943 "d5f6198c64c797f455f5b44062bb136734f508f9cdd02d8d69d24100ac8d6252",
1944 ),
1945 (
1946 "c136436c2db6b2ebd14985e2c883e73c6d8fd95ace54bfefae9eeca47b7da800",
1947 727585093455815585u64,
1948 521742371554431881u64,
1949 "3dfad04a6eb46d175b63e96943c7d636c56d61063277e25557aace95820432da",
1950 ),
1951 (
1952 "9ea50348595593788645394eb041ac4f75ee4d6a4840b9cf1ed304e895060791",
1953 8654829249939415079u64,
1954 15623358443672184321u64,
1955 "61bb0d6ffcd5b32d0ee34a3b7dfb1c495888059be02b255dd1fa3be02fa1ddbd",
1956 ),
1957 (
1958 "5abc714353ad6abda44a609f9b61f310f5b0a7df55ccf553dc2db3edda18ca17",
1959 5732104102609402825u64,
1960 15720007305337585794u64,
1961 "8b55b7e9c6f54057322c5e0610b33b3137f1fcd46f7d4af1aca797c7b5fff033",
1962 ),
1963 (
1964 "e270b59e6e56100f9e2813f263884ba5f74190a1770dd88cd9603266174e0a6b",
1965 4728642361690813205u64,
1966 6762867306120182099u64,
1967 "5e9aa1ff854504b4bfea4a7f0175866eba04e88e14e57ac08dddc63d6917bf47",
1968 ),
1969 (
1970 "78286294c6fb6823bb8b2b2ddb7a1e71ee64e05c9ba33b0eb8bb6654c64a8259",
1971 6032052879332640150u64,
1972 498315069638377858u64,
1973 "799ef578ffb51a5ec42484e788d6ada4f13f0ff73e1b7b3e6d14d58caae9319a",
1974 ),
1975 (
1976 "af1b85cf284b0cb59a4bfb0f699194bcd6ad4538f27057d9d93dc7a95c1ff32e",
1977 1647153930670480138u64,
1978 13109595411418593026u64,
1979 "429dcdf4748c0047b0dd94f3ad12b5e62bbadf8302525cc5d2aad9c9c746696f",
1980 ),
1981 (
1982 "0152b7a0626771a2518de84c01e52839e7821a655f9dcb9a174d8f52b64b7086",
1983 3915492299782594412u64,
1984 9550071871839879785u64,
1985 "4d5e6ce993dfc9597585ae2b4bacd6d055fefc56ae825666c83e0770e4aa0527",
1986 ),
1987 (
1988 "9ea9b8a4f6bce1dba63290b81f4d1b88dfeac3e244856904a5c9d4086a10271b",
1989 8824593031424861220u64,
1990 15831101445348312026u64,
1991 "a07602b4dd5cba679562061b7c5c0344b2edd6eba36aa97ca57a6fe01ed80a48",
1992 ),
1993 (
1994 "d7b26c2d8f85b74423a57a3da56c61829340f65967791bab849c90b5e1547e7a",
1995 12723258987146468813u64,
1996 10714399360315276559u64,
1997 "3705e57b27d931188c0d2017ab62577355b0cdda4173203478a8562a0cdcae0c",
1998 ),
1999 (
2000 "da354ceca117552482e628937931870a28e9d4416f47a58ee77176d0b760c75b",
2001 1580954430670112951u64,
2002 14920857341852745222u64,
2003 "a13d6b0123daa2e662699ac55a2d0ed1d2e73a02ed00ee5a4dd34db8dea2a37e",
2004 ),
2005 (
2006 "53140d0c8b90b4c3c49e0604879d0dc036e914c4c4f799f1ccae357fef2613e3",
2007 12521658365236780592u64,
2008 11630410585145916252u64,
2009 "46f06ce1435a7a0fd3476bbcffe4aac88c33a7fcf50080270b715d25c93d96d7",
2010 ),
2011 (
2012 "4b1c151815da6f18f27e98890eac1f7d43b80f3386c7c7d15ee0e43a7edfe0a6",
2013 9575643484508382933u64,
2014 3471795678079408573u64,
2015 "a9e6a8fac46c5fc61ae07bddc223e9f105f567ad039d2312a03431d1f24d8b2c",
2016 ),
2017 (
2018 "39436357a2bcd906e58fb88238be2ddb2e43c8a5590332e3aee1d1134a0d0ba4",
2019 10171391804125392783u64,
2020 2915644784933705108u64,
2021 "1d5db03f07137da9d3af85096ed51a4ff64bb476a79bf4294850438867fe3833",
2022 ),
2023 (
2024 "5fbe8d9d6a12b061a94a72436caec331ab1fd4e472c3bb4688215788c5e9bcd9",
2025 5663512925993713993u64,
2026 18170240962605758111u64,
2027 "bd5d601cbcb47bd84d410bafec72f2270fceb1ed2ed11499a1e218a9f89a9f7f",
2028 ),
2029 (
2030 "f2e29a909dd31b38e9b92b2b2d214e822ebddb26183cd077d4009773854ab099",
2031 7512894577556564068u64,
2032 15905517369556068583u64,
2033 "a36e66ce11eca7900248c518e12c6c08d659d609f4cbd98468292de7adf780f2",
2034 ),
2035 (
2036 "3eb82e6d6e964ca56b50cc54bdd55bb470c67a4932aba48d27d175d1be2542aa",
2037 12645567232869276853u64,
2038 8416544129280224452u64,
2039 "d177f246a45cc76d39a8ee06b32d8c076c986106b9a8e0455a0b41d00fe3cbde",
2040 ),
2041 (
2042 "c903731014f6a5b4b45174ef5f9d5a2895a19d1308292f25aa323fda88acc938",
2043 5989992708726918818u64,
2044 17462460601463602125u64,
2045 "01241c61ad1c8adc27e5a1096ab6c643af0fbb6e2818ef77272b70e5c3624abc",
2046 ),
2047 (
2048 "ef46410ab47113a78c27e100ed1b476f82a8789012bd95a047a4b23385596f53",
2049 11884362385049322305u64,
2050 619908411193297508u64,
2051 "e9b4c929e26077ac1fd5a771ea5badc7e9ddb58a20a2a797389c63b3dd3df00d",
2052 ),
2053 (
2054 "be336bc6722bb787d542f4ef8ecb6f46a449557ca7b69b8668b6fed19dfa73b7",
2055 11490216175357680195u64,
2056 13136528075688203375u64,
2057 "31bfd807f92e6d5dc5c534e9ad0cb29d00c6f0ae7d7b5f1e65f8e683de0bce59",
2058 ),
2059 (
2060 "39599e5828a8f102b8a6808103ae7df29b838fe739d8b73f72f8f0d282ca5a47",
2061 6957481657451522177u64,
2062 4196708540027060724u64,
2063 "968a12d79704b313471ece148cb4e26b8b11620db2a9ee6da0f5dc200801f555",
2064 ),
2065 (
2066 "acd99530bb14ca9a7fac3df8eebfd8cdd234b0f6f7c3893a20bc159a4fd54df5",
2067 9792913946138032169u64,
2068 9219321015500590384u64,
2069 "db45a98128770a329c82c904ceee21d3917f6072b8bd260e46218f65656c964c",
2070 ),
2071 (
2072 "453b80a0b11f237011c57630034ed46888ad96f4300a58aea24c0fe4a5472f68",
2073 14407140330317286994u64,
2074 5783848199433986576u64,
2075 "b8cded0b4efd6bf2282a4f8b3c353f74821714f84df9a6ab25131edc7fdad00f",
2076 ),
2077 (
2078 "23e464d1e9b413a4a6b378cee3a0405ec6ccbb4d418372d1b42d3fde558d48d1",
2079 1190974500816796805u64,
2080 1621159728666344828u64,
2081 "d677f41d273754da3ab8080b605ae07a7193c9f35f6318b809e42a1fdf594be3",
2082 ),
2083 (
2084 "d0e590648dec459aca50edf44251627bab5a36029a0c748b1ddf86b7b887425b",
2085 4807164391931567365u64,
2086 4256042233199858200u64,
2087 "a8677de59ab856516a03663730af54c55a79169346c3d958b564e5ee35d8622b",
2088 ),
2089 (
2090 "72387dbaaaf2c39175d8c067558b869ba7bdc6234bc63ee97a53fea1d988ff39",
2091 5046042574093452325u64,
2092 3088471405044806123u64,
2093 "83c226621506b07073936aec3c87a8e2ef34dd42e504adc2bbab39ede49aa77f",
2094 ),
2095 (
2096 "de6874ca2b9dd8b4347c25d32b882a2a7c127b127d6c5e00d073ab3853339d0e",
2097 6112730660331874479u64,
2098 10943246617310133253u64,
2099 "a0c96a69e5ab3e3fe1a1a2fd0e5e68035ff3c7b2985e4e6b8407d4c377600c6f",
2100 ),
2101 (
2102 "b0d8689e08b983e578d6a0c136b76952497087ee144369af653a0a1b231eeb28",
2103 15612408165265483596u64,
2104 13112504741499957010u64,
2105 "4fc49edeff215f1d54dfd2e60a14a3de2abecbe845db2148c7aee32c65f3c91c",
2106 ),
2107 (
2108 "29d7fb6b714cbdd1be95c4a268cef7f544329642ae05fab26dc251bbc773085e",
2109 17509162400681223655u64,
2110 5075629528173950353u64,
2111 "781ecb560ef8cf0bcfa96b8d12075f4cf87ad52d69dfb2c72801206eded135bd",
2112 ),
2113 (
2114 "85dbf7074c93a4e39b67cc504b35351ee16c1fab437a7fb9e5d9320be1d9c13c",
2115 17692199403267011109u64,
2116 7069378948726478427u64,
2117 "a3ff0d8dee5aa0214460f5b03a70bd76ef00ac8c07f07c0b3d82c9c57e4c72a9",
2118 ),
2119 (
2120 "7bd5a9f3126b4a681afac9a177c6ff7f3dd80d8d7fd5a821a705221c96975ded",
2121 17807965607151214145u64,
2122 5562549152802999850u64,
2123 "dbc3861943b7372e49698b1c5b0e4255b7c93e9fa2c13d6a4405172ab0db9a5b",
2124 ),
2125 (
2126 "496d13d45dbe7eb02fee23c914ac9fefdf86cf5c937c520719fc6a31b3fcf8d9",
2127 13446203348342334214u64,
2128 332407928246785326u64,
2129 "d2d73f15fcdc12adce25b911aa4551dcf900e225761e254eb6392cbd414e389c",
2130 ),
2131 (
2132 "b2f0a0127fc74a35dec5515b1c7eb8a3833ca99925049c47cd109ec94678e6c5",
2133 9683373807753869342u64,
2134 7570798132195583433u64,
2135 "e704110433e5ab17858c5fbe4f1b6d692942d5f5981cac68372d06066bee97fe",
2136 ),
2137 (
2138 "d5f65171b17d7720411905ef138e84b9d1f459e2b248521c449f1781aafd675e",
2139 10088287051097617949u64,
2140 185695341767856973u64,
2141 "8d784c4171e242af4187f30510cd298106b7e68cd3088444a055cb1f3893ba28",
2142 ),
2143 (
2144 "7dcbec5c20fbf1d69665d4b9cdc450fea2d0098e78084bce0a864fea4ba016b0",
2145 13908816056510478374u64,
2146 17793990636863600193u64,
2147 "18e9026372d91e116faf813ce3ba9d7fadef2bb3b779be6efeba8a4ecd9e1f38",
2148 ),
2149 (
2150 "d4f772f4bf1cfa4dad4b55962b50900da8657a4961dabbdf0664f3cd42d368f8",
2151 16438076732493217366u64,
2152 18419670900047275588u64,
2153 "b9fd16b16b3a8fab4d9c47f452d9ce4aad530edeb06ee6830589078db2f79382",
2154 ),
2155 (
2156 "2d009535f82b1813ce2ca7236ceae7864c1e4d3644a1acd02656919ef1aa55d0",
2157 10206924399607440433u64,
2158 3986996560633257271u64,
2159 "db49e225bd427768599a7c06d7aee432121fa3179505f9ee8c717f51c7fa8c54",
2160 ),
2161 (
2162 "b1d7a292df12e505e7433c7e850e9efc81a8931b65f3354a66402894b6d5ba76",
2163 8215550459234533539u64,
2164 10241096845089693964u64,
2165 "5567813b312cb811909a01d14ee8f7ec4d239198ea2d37243123e1de2317e1af",
2166 ),
2167 (
2168 "85120d6f43ea9258accf6a87e49cd5461d9b3735a4dc623f9fbcc669cbdd1ce6",
2169 17566770568845511328u64,
2170 8686605711223432099u64,
2171 "e163f4fcd17acf5714ee48278732808601e861cd4c4c24326cd24431aab1d0ce",
2172 ),
2173 (
2174 "48fe4c22080c6e702f7af0e97fb5354c1c14ff4616c6fc4ac8a4491d4b9b3473",
2175 14371024664575587429u64,
2176 15149464181957728462u64,
2177 "061dec7af4b41bdd056306a8b13b71d574a49a4595884b1a77674f5150d4509d",
2178 ),
2179 (
2180 "29d14b014fa3cabbb3b4808e751e81f571de6d0e727cae627318a5fd82fef517",
2181 9612395342616083334u64,
2182 3700617080099093094u64,
2183 "f7b33a2d2784441f77f0cc1c87930e79bea3332a921269b500e81d823108561c",
2184 ),
2185 ];
2186
2187 // Insert all accounts
2188 for (addr, nonce, balance, code_hash) in &account_data {
2189 accounts.insert(
2190 b256(addr),
2191 Some(Account {
2192 nonce: *nonce,
2193 balance: U256::from(*balance),
2194 bytecode_hash: Some(b256(code_hash)),
2195 }),
2196 );
2197 }
2198
2199 let post_state = HashedPostState { accounts, storages: Default::default() };
2200
2201 // Create test harness
2202 let harness = ProofTestHarness::new(post_state);
2203
2204 // Create targets from test case input - these are Nibbles in hex form
2205 let targets = vec![
2206 Target::new(b256("0153000000000000000000000000000000000000000000000000000000000000"))
2207 .with_min_len(2),
2208 Target::new(b256("0000000000000000000000000000000000000000000000000000000000000000"))
2209 .with_min_len(2),
2210 Target::new(b256("2300000000000000000000000000000000000000000000000000000000000000"))
2211 .with_min_len(2),
2212 ];
2213
2214 // Test proof generation
2215 harness.assert_proof(targets).expect("Proof generation failed");
2216 }
2217}