1use alloy_consensus::BlockHeader;
4use alloy_genesis::GenesisAccount;
5use alloy_primitives::{keccak256, map::HashMap, Address, B256, U256};
6use reth_chainspec::EthChainSpec;
7use reth_codecs::Compact;
8use reth_config::config::EtlConfig;
9use reth_db_api::{tables, transaction::DbTxMut, DatabaseError};
10use reth_etl::Collector;
11use reth_execution_errors::StateRootError;
12use reth_primitives_traits::{Account, Bytecode, GotExpected, NodePrimitives, StorageEntry};
13use reth_provider::{
14 errors::provider::ProviderResult, providers::StaticFileWriter, BlockHashReader, BlockNumReader,
15 BundleStateInit, ChainSpecProvider, DBProvider, DatabaseProviderFactory, ExecutionOutcome,
16 HashingWriter, HeaderProvider, HistoryWriter, OriginalValuesKnown, ProviderError, RevertsInit,
17 StageCheckpointReader, StageCheckpointWriter, StateWriter, StaticFileProviderFactory,
18 TrieWriter,
19};
20use reth_stages_types::{StageCheckpoint, StageId};
21use reth_static_file_types::StaticFileSegment;
22use reth_trie::{
23 prefix_set::{TriePrefixSets, TriePrefixSetsMut},
24 IntermediateStateRootState, Nibbles, StateRoot as StateRootComputer, StateRootProgress,
25};
26use reth_trie_db::DatabaseStateRoot;
27use serde::{Deserialize, Serialize};
28use std::io::BufRead;
29use tracing::{debug, error, info, trace};
30
31pub const DEFAULT_SOFT_LIMIT_BYTE_LEN_ACCOUNTS_CHUNK: usize = 1_000_000_000;
36
37pub const AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP: usize = 285_228;
44
45const SOFT_LIMIT_COUNT_FLUSHED_UPDATES: usize = 1_000_000;
47
48#[derive(Debug, thiserror::Error, Clone)]
50pub enum InitStorageError {
51 #[error(
53 "static files found, but the database is uninitialized. If attempting to re-syncing, delete both."
54 )]
55 UninitializedDatabase,
56 #[error(
59 "genesis hash in the storage does not match the specified chainspec: chainspec is {chainspec_hash}, database is {storage_hash}"
60 )]
61 GenesisHashMismatch {
62 chainspec_hash: B256,
64 storage_hash: B256,
66 },
67 #[error(transparent)]
69 Provider(#[from] ProviderError),
70 #[error(transparent)]
72 StateRootError(#[from] StateRootError),
73 #[error("state root mismatch: {_0}")]
75 StateRootMismatch(GotExpected<B256>),
76}
77
78impl From<DatabaseError> for InitStorageError {
79 fn from(error: DatabaseError) -> Self {
80 Self::Provider(ProviderError::Database(error))
81 }
82}
83
84pub fn init_genesis<PF>(factory: &PF) -> Result<B256, InitStorageError>
86where
87 PF: DatabaseProviderFactory
88 + StaticFileProviderFactory<Primitives: NodePrimitives<BlockHeader: Compact>>
89 + ChainSpecProvider
90 + StageCheckpointReader
91 + BlockHashReader,
92 PF::ProviderRW: StaticFileProviderFactory<Primitives = PF::Primitives>
93 + StageCheckpointWriter
94 + HistoryWriter
95 + HeaderProvider
96 + HashingWriter
97 + StateWriter
98 + TrieWriter
99 + AsRef<PF::ProviderRW>,
100 PF::ChainSpec: EthChainSpec<Header = <PF::Primitives as NodePrimitives>::BlockHeader>,
101{
102 let chain = factory.chain_spec();
103
104 let genesis = chain.genesis();
105 let hash = chain.genesis_hash();
106
107 match factory.block_hash(0) {
109 Ok(None) | Err(ProviderError::MissingStaticFileBlock(StaticFileSegment::Headers, 0)) => {}
110 Ok(Some(block_hash)) => {
111 if block_hash == hash {
112 if factory.get_stage_checkpoint(StageId::Headers)?.is_none() {
116 error!(target: "reth::storage", "Genesis header found on static files, but database is uninitialized.");
117 return Err(InitStorageError::UninitializedDatabase)
118 }
119
120 debug!("Genesis already written, skipping.");
121 return Ok(hash)
122 }
123
124 return Err(InitStorageError::GenesisHashMismatch {
125 chainspec_hash: hash,
126 storage_hash: block_hash,
127 })
128 }
129 Err(e) => {
130 debug!(?e);
131 return Err(e.into());
132 }
133 }
134
135 debug!("Writing genesis block.");
136
137 let alloc = &genesis.alloc;
138
139 let provider_rw = factory.database_provider_rw()?;
141 insert_genesis_hashes(&provider_rw, alloc.iter())?;
142 insert_genesis_history(&provider_rw, alloc.iter())?;
143
144 insert_genesis_header(&provider_rw, &chain)?;
146
147 insert_genesis_state(&provider_rw, alloc.iter())?;
148
149 compute_state_root(&provider_rw, None)?;
151
152 for stage in StageId::ALL {
154 provider_rw.save_stage_checkpoint(stage, Default::default())?;
155 }
156
157 let static_file_provider = provider_rw.static_file_provider();
159 static_file_provider.latest_writer(StaticFileSegment::Receipts)?.increment_block(0)?;
160 static_file_provider.latest_writer(StaticFileSegment::Transactions)?.increment_block(0)?;
161
162 provider_rw.commit()?;
165
166 Ok(hash)
167}
168
169pub fn insert_genesis_state<'a, 'b, Provider>(
171 provider: &Provider,
172 alloc: impl Iterator<Item = (&'a Address, &'b GenesisAccount)>,
173) -> ProviderResult<()>
174where
175 Provider: StaticFileProviderFactory
176 + DBProvider<Tx: DbTxMut>
177 + HeaderProvider
178 + StateWriter
179 + AsRef<Provider>,
180{
181 insert_state(provider, alloc, 0)
182}
183
184pub fn insert_state<'a, 'b, Provider>(
186 provider: &Provider,
187 alloc: impl Iterator<Item = (&'a Address, &'b GenesisAccount)>,
188 block: u64,
189) -> ProviderResult<()>
190where
191 Provider: StaticFileProviderFactory
192 + DBProvider<Tx: DbTxMut>
193 + HeaderProvider
194 + StateWriter
195 + AsRef<Provider>,
196{
197 let capacity = alloc.size_hint().1.unwrap_or(0);
198 let mut state_init: BundleStateInit =
199 HashMap::with_capacity_and_hasher(capacity, Default::default());
200 let mut reverts_init = HashMap::with_capacity_and_hasher(capacity, Default::default());
201 let mut contracts: HashMap<B256, Bytecode> =
202 HashMap::with_capacity_and_hasher(capacity, Default::default());
203
204 for (address, account) in alloc {
205 let bytecode_hash = if let Some(code) = &account.code {
206 match Bytecode::new_raw_checked(code.clone()) {
207 Ok(bytecode) => {
208 let hash = bytecode.hash_slow();
209 contracts.insert(hash, bytecode);
210 Some(hash)
211 }
212 Err(err) => {
213 error!(%address, %err, "Failed to decode genesis bytecode.");
214 return Err(DatabaseError::Other(err.to_string()).into());
215 }
216 }
217 } else {
218 None
219 };
220
221 let storage = account
223 .storage
224 .as_ref()
225 .map(|m| {
226 m.iter()
227 .map(|(key, value)| {
228 let value = U256::from_be_bytes(value.0);
229 (*key, (U256::ZERO, value))
230 })
231 .collect::<HashMap<_, _>>()
232 })
233 .unwrap_or_default();
234
235 reverts_init.insert(
236 *address,
237 (Some(None), storage.keys().map(|k| StorageEntry::new(*k, U256::ZERO)).collect()),
238 );
239
240 state_init.insert(
241 *address,
242 (
243 None,
244 Some(Account {
245 nonce: account.nonce.unwrap_or_default(),
246 balance: account.balance,
247 bytecode_hash,
248 }),
249 storage,
250 ),
251 );
252 }
253 let all_reverts_init: RevertsInit = HashMap::from_iter([(block, reverts_init)]);
254
255 let execution_outcome = ExecutionOutcome::new_init(
256 state_init,
257 all_reverts_init,
258 contracts,
259 Vec::default(),
260 block,
261 Vec::new(),
262 );
263
264 provider.write_state(&execution_outcome, OriginalValuesKnown::Yes)?;
265
266 trace!(target: "reth::cli", "Inserted state");
267
268 Ok(())
269}
270
271pub fn insert_genesis_hashes<'a, 'b, Provider>(
273 provider: &Provider,
274 alloc: impl Iterator<Item = (&'a Address, &'b GenesisAccount)> + Clone,
275) -> ProviderResult<()>
276where
277 Provider: DBProvider<Tx: DbTxMut> + HashingWriter,
278{
279 let alloc_accounts = alloc.clone().map(|(addr, account)| (*addr, Some(Account::from(account))));
281 provider.insert_account_for_hashing(alloc_accounts)?;
282
283 trace!(target: "reth::cli", "Inserted account hashes");
284
285 let alloc_storage = alloc.filter_map(|(addr, account)| {
286 account.storage.as_ref().map(|storage| {
288 (*addr, storage.iter().map(|(&key, &value)| StorageEntry { key, value: value.into() }))
289 })
290 });
291 provider.insert_storage_for_hashing(alloc_storage)?;
292
293 trace!(target: "reth::cli", "Inserted storage hashes");
294
295 Ok(())
296}
297
298pub fn insert_genesis_history<'a, 'b, Provider>(
300 provider: &Provider,
301 alloc: impl Iterator<Item = (&'a Address, &'b GenesisAccount)> + Clone,
302) -> ProviderResult<()>
303where
304 Provider: DBProvider<Tx: DbTxMut> + HistoryWriter,
305{
306 insert_history(provider, alloc, 0)
307}
308
309pub fn insert_history<'a, 'b, Provider>(
311 provider: &Provider,
312 alloc: impl Iterator<Item = (&'a Address, &'b GenesisAccount)> + Clone,
313 block: u64,
314) -> ProviderResult<()>
315where
316 Provider: DBProvider<Tx: DbTxMut> + HistoryWriter,
317{
318 let account_transitions = alloc.clone().map(|(addr, _)| (*addr, [block]));
319 provider.insert_account_history_index(account_transitions)?;
320
321 trace!(target: "reth::cli", "Inserted account history");
322
323 let storage_transitions = alloc
324 .filter_map(|(addr, account)| account.storage.as_ref().map(|storage| (addr, storage)))
325 .flat_map(|(addr, storage)| storage.keys().map(|key| ((*addr, *key), [block])));
326 provider.insert_storage_history_index(storage_transitions)?;
327
328 trace!(target: "reth::cli", "Inserted storage history");
329
330 Ok(())
331}
332
333pub fn insert_genesis_header<Provider, Spec>(
335 provider: &Provider,
336 chain: &Spec,
337) -> ProviderResult<()>
338where
339 Provider: StaticFileProviderFactory<Primitives: NodePrimitives<BlockHeader: Compact>>
340 + DBProvider<Tx: DbTxMut>,
341 Spec: EthChainSpec<Header = <Provider::Primitives as NodePrimitives>::BlockHeader>,
342{
343 let (header, block_hash) = (chain.genesis_header(), chain.genesis_hash());
344 let static_file_provider = provider.static_file_provider();
345
346 match static_file_provider.block_hash(0) {
347 Ok(None) | Err(ProviderError::MissingStaticFileBlock(StaticFileSegment::Headers, 0)) => {
348 let (difficulty, hash) = (header.difficulty(), block_hash);
349 let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers)?;
350 writer.append_header(header, difficulty, &hash)?;
351 }
352 Ok(Some(_)) => {}
353 Err(e) => return Err(e),
354 }
355
356 provider.tx_ref().put::<tables::HeaderNumbers>(block_hash, 0)?;
357 provider.tx_ref().put::<tables::BlockBodyIndices>(0, Default::default())?;
358
359 Ok(())
360}
361
362pub fn init_from_state_dump<Provider>(
369 mut reader: impl BufRead,
370 provider_rw: &Provider,
371 etl_config: EtlConfig,
372) -> eyre::Result<B256>
373where
374 Provider: StaticFileProviderFactory
375 + DBProvider<Tx: DbTxMut>
376 + BlockNumReader
377 + BlockHashReader
378 + ChainSpecProvider
379 + StageCheckpointWriter
380 + HistoryWriter
381 + HeaderProvider
382 + HashingWriter
383 + TrieWriter
384 + StateWriter
385 + AsRef<Provider>,
386{
387 if etl_config.file_size == 0 {
388 return Err(eyre::eyre!("ETL file size cannot be zero"))
389 }
390
391 let block = provider_rw.last_block_number()?;
392 let hash = provider_rw
393 .block_hash(block)?
394 .ok_or_else(|| eyre::eyre!("Block hash not found for block {}", block))?;
395 let expected_state_root = provider_rw
396 .header_by_number(block)?
397 .ok_or_else(|| ProviderError::HeaderNotFound(block.into()))?
398 .state_root();
399
400 let dump_state_root = parse_state_root(&mut reader)?;
402 if expected_state_root != dump_state_root {
403 error!(target: "reth::cli",
404 ?dump_state_root,
405 ?expected_state_root,
406 "State root from state dump does not match state root in current header."
407 );
408 return Err(InitStorageError::StateRootMismatch(GotExpected {
409 got: dump_state_root,
410 expected: expected_state_root,
411 })
412 .into())
413 }
414
415 debug!(target: "reth::cli",
416 block,
417 chain=%provider_rw.chain_spec().chain(),
418 "Initializing state at block"
419 );
420
421 let collector = parse_accounts(&mut reader, etl_config)?;
423
424 let mut prefix_sets = TriePrefixSetsMut::default();
426 dump_state(collector, provider_rw, block, &mut prefix_sets)?;
427
428 info!(target: "reth::cli", "All accounts written to database, starting state root computation (may take some time)");
429
430 let computed_state_root = compute_state_root(provider_rw, Some(prefix_sets.freeze()))?;
432 if computed_state_root == expected_state_root {
433 info!(target: "reth::cli",
434 ?computed_state_root,
435 "Computed state root matches state root in state dump"
436 );
437 } else {
438 error!(target: "reth::cli",
439 ?computed_state_root,
440 ?expected_state_root,
441 "Computed state root does not match state root in state dump"
442 );
443
444 return Err(InitStorageError::StateRootMismatch(GotExpected {
445 got: computed_state_root,
446 expected: expected_state_root,
447 })
448 .into())
449 }
450
451 for stage in StageId::STATE_REQUIRED {
453 provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(block))?;
454 }
455
456 Ok(hash)
457}
458
459fn parse_state_root(reader: &mut impl BufRead) -> eyre::Result<B256> {
461 let mut line = String::new();
462 reader.read_line(&mut line)?;
463
464 let expected_state_root = serde_json::from_str::<StateRoot>(&line)?.root;
465 trace!(target: "reth::cli",
466 root=%expected_state_root,
467 "Read state root from file"
468 );
469 Ok(expected_state_root)
470}
471
472fn parse_accounts(
474 mut reader: impl BufRead,
475 etl_config: EtlConfig,
476) -> Result<Collector<Address, GenesisAccount>, eyre::Error> {
477 let mut line = String::new();
478 let mut collector = Collector::new(etl_config.file_size, etl_config.dir);
479
480 while let Ok(n) = reader.read_line(&mut line) {
481 if n == 0 {
482 break
483 }
484
485 let GenesisAccountWithAddress { genesis_account, address } = serde_json::from_str(&line)?;
486 collector.insert(address, genesis_account)?;
487
488 if !collector.is_empty() &&
489 collector.len().is_multiple_of(AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP)
490 {
491 info!(target: "reth::cli",
492 parsed_new_accounts=collector.len(),
493 );
494 }
495
496 line.clear();
497 }
498
499 Ok(collector)
500}
501
502fn dump_state<Provider>(
504 mut collector: Collector<Address, GenesisAccount>,
505 provider_rw: &Provider,
506 block: u64,
507 prefix_sets: &mut TriePrefixSetsMut,
508) -> Result<(), eyre::Error>
509where
510 Provider: StaticFileProviderFactory
511 + DBProvider<Tx: DbTxMut>
512 + HeaderProvider
513 + HashingWriter
514 + HistoryWriter
515 + StateWriter
516 + AsRef<Provider>,
517{
518 let accounts_len = collector.len();
519 let mut accounts = Vec::with_capacity(AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP);
520 let mut total_inserted_accounts = 0;
521
522 for (index, entry) in collector.iter()?.enumerate() {
523 let (address, account) = entry?;
524 let (address, _) = Address::from_compact(address.as_slice(), address.len());
525 let (account, _) = GenesisAccount::from_compact(account.as_slice(), account.len());
526
527 let hashed_address = keccak256(address);
529 prefix_sets.account_prefix_set.insert(Nibbles::unpack(hashed_address));
530
531 if let Some(ref storage) = account.storage {
533 for key in storage.keys() {
534 let hashed_key = keccak256(key);
535 prefix_sets
536 .storage_prefix_sets
537 .entry(hashed_address)
538 .or_default()
539 .insert(Nibbles::unpack(hashed_key));
540 }
541 }
542
543 accounts.push((address, account));
544
545 if (index > 0 && index.is_multiple_of(AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP)) ||
546 index == accounts_len - 1
547 {
548 total_inserted_accounts += accounts.len();
549
550 info!(target: "reth::cli",
551 total_inserted_accounts,
552 "Writing accounts to db"
553 );
554
555 insert_genesis_hashes(
557 provider_rw,
558 accounts.iter().map(|(address, account)| (address, account)),
559 )?;
560
561 insert_history(
562 provider_rw,
563 accounts.iter().map(|(address, account)| (address, account)),
564 block,
565 )?;
566
567 insert_state(
569 provider_rw,
570 accounts.iter().map(|(address, account)| (address, account)),
571 block,
572 )?;
573
574 accounts.clear();
575 }
576 }
577 Ok(())
578}
579
580fn compute_state_root<Provider>(
583 provider: &Provider,
584 prefix_sets: Option<TriePrefixSets>,
585) -> Result<B256, InitStorageError>
586where
587 Provider: DBProvider<Tx: DbTxMut> + TrieWriter,
588{
589 trace!(target: "reth::cli", "Computing state root");
590
591 let tx = provider.tx_ref();
592 let mut intermediate_state: Option<IntermediateStateRootState> = None;
593 let mut total_flushed_updates = 0;
594
595 loop {
596 let mut state_root =
597 StateRootComputer::from_tx(tx).with_intermediate_state(intermediate_state);
598
599 if let Some(sets) = prefix_sets.clone() {
600 state_root = state_root.with_prefix_sets(sets);
601 }
602
603 match state_root.root_with_progress()? {
604 StateRootProgress::Progress(state, _, updates) => {
605 let updated_len = provider.write_trie_updates(&updates)?;
606 total_flushed_updates += updated_len;
607
608 trace!(target: "reth::cli",
609 last_account_key = %state.account_root_state.last_hashed_key,
610 updated_len,
611 total_flushed_updates,
612 "Flushing trie updates"
613 );
614
615 intermediate_state = Some(*state);
616
617 if total_flushed_updates.is_multiple_of(SOFT_LIMIT_COUNT_FLUSHED_UPDATES) {
618 info!(target: "reth::cli",
619 total_flushed_updates,
620 "Flushing trie updates"
621 );
622 }
623 }
624 StateRootProgress::Complete(root, _, updates) => {
625 let updated_len = provider.write_trie_updates(&updates)?;
626 total_flushed_updates += updated_len;
627
628 trace!(target: "reth::cli",
629 %root,
630 updated_len,
631 total_flushed_updates,
632 "State root has been computed"
633 );
634
635 return Ok(root)
636 }
637 }
638 }
639}
640
641#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
643struct StateRoot {
644 root: B256,
645}
646
647#[derive(Debug, Serialize, Deserialize)]
650struct GenesisAccountWithAddress {
651 #[serde(flatten)]
653 genesis_account: GenesisAccount,
654 address: Address,
656}
657
658#[cfg(test)]
659mod tests {
660 use super::*;
661 use alloy_consensus::constants::{
662 HOLESKY_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH,
663 };
664 use alloy_genesis::Genesis;
665 use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET, SEPOLIA};
666 use reth_db::DatabaseEnv;
667 use reth_db_api::{
668 cursor::DbCursorRO,
669 models::{storage_sharded_key::StorageShardedKey, IntegerList, ShardedKey},
670 table::{Table, TableRow},
671 transaction::DbTx,
672 Database,
673 };
674 use reth_provider::{
675 test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB},
676 ProviderFactory,
677 };
678 use std::{collections::BTreeMap, sync::Arc};
679
680 fn collect_table_entries<DB, T>(
681 tx: &<DB as Database>::TX,
682 ) -> Result<Vec<TableRow<T>>, InitStorageError>
683 where
684 DB: Database,
685 T: Table,
686 {
687 Ok(tx.cursor_read::<T>()?.walk_range(..)?.collect::<Result<Vec<_>, _>>()?)
688 }
689
690 #[test]
691 fn success_init_genesis_mainnet() {
692 let genesis_hash =
693 init_genesis(&create_test_provider_factory_with_chain_spec(MAINNET.clone())).unwrap();
694
695 assert_eq!(genesis_hash, MAINNET_GENESIS_HASH);
697 }
698
699 #[test]
700 fn success_init_genesis_sepolia() {
701 let genesis_hash =
702 init_genesis(&create_test_provider_factory_with_chain_spec(SEPOLIA.clone())).unwrap();
703
704 assert_eq!(genesis_hash, SEPOLIA_GENESIS_HASH);
706 }
707
708 #[test]
709 fn success_init_genesis_holesky() {
710 let genesis_hash =
711 init_genesis(&create_test_provider_factory_with_chain_spec(HOLESKY.clone())).unwrap();
712
713 assert_eq!(genesis_hash, HOLESKY_GENESIS_HASH);
715 }
716
717 #[test]
718 fn fail_init_inconsistent_db() {
719 let factory = create_test_provider_factory_with_chain_spec(SEPOLIA.clone());
720 let static_file_provider = factory.static_file_provider();
721 init_genesis(&factory).unwrap();
722
723 let genesis_hash = init_genesis(&ProviderFactory::<MockNodeTypesWithDB>::new(
725 factory.into_db(),
726 MAINNET.clone(),
727 static_file_provider,
728 ));
729
730 assert!(matches!(
731 genesis_hash.unwrap_err(),
732 InitStorageError::GenesisHashMismatch {
733 chainspec_hash: MAINNET_GENESIS_HASH,
734 storage_hash: SEPOLIA_GENESIS_HASH
735 }
736 ))
737 }
738
739 #[test]
740 fn init_genesis_history() {
741 let address_with_balance = Address::with_last_byte(1);
742 let address_with_storage = Address::with_last_byte(2);
743 let storage_key = B256::with_last_byte(1);
744 let chain_spec = Arc::new(ChainSpec {
745 chain: Chain::from_id(1),
746 genesis: Genesis {
747 alloc: BTreeMap::from([
748 (
749 address_with_balance,
750 GenesisAccount { balance: U256::from(1), ..Default::default() },
751 ),
752 (
753 address_with_storage,
754 GenesisAccount {
755 storage: Some(BTreeMap::from([(storage_key, B256::random())])),
756 ..Default::default()
757 },
758 ),
759 ]),
760 ..Default::default()
761 },
762 hardforks: Default::default(),
763 paris_block_and_final_difficulty: None,
764 deposit_contract: None,
765 ..Default::default()
766 });
767
768 let factory = create_test_provider_factory_with_chain_spec(chain_spec);
769 init_genesis(&factory).unwrap();
770
771 let provider = factory.provider().unwrap();
772
773 let tx = provider.tx_ref();
774
775 assert_eq!(
776 collect_table_entries::<Arc<DatabaseEnv>, tables::AccountsHistory>(tx)
777 .expect("failed to collect"),
778 vec![
779 (ShardedKey::new(address_with_balance, u64::MAX), IntegerList::new([0]).unwrap()),
780 (ShardedKey::new(address_with_storage, u64::MAX), IntegerList::new([0]).unwrap())
781 ],
782 );
783
784 assert_eq!(
785 collect_table_entries::<Arc<DatabaseEnv>, tables::StoragesHistory>(tx)
786 .expect("failed to collect"),
787 vec![(
788 StorageShardedKey::new(address_with_storage, storage_key, u64::MAX),
789 IntegerList::new([0]).unwrap()
790 )],
791 );
792 }
793}