Skip to main content

reth_db_common/
init.rs

1//! Reth genesis initialization utility functions.
2
3use alloy_consensus::BlockHeader;
4use alloy_genesis::GenesisAccount;
5use alloy_primitives::{
6    map::{AddressMap, B256Map, HashMap},
7    Address, B256, U256,
8};
9use reth_chainspec::EthChainSpec;
10use reth_codecs::Compact;
11use reth_config::config::EtlConfig;
12use reth_db_api::{tables, transaction::DbTxMut, DatabaseError};
13use reth_etl::Collector;
14use reth_execution_errors::StateRootError;
15use reth_primitives_traits::{
16    Account, Bytecode, GotExpected, NodePrimitives, SealedHeader, StorageEntry,
17};
18use reth_provider::{
19    errors::provider::ProviderResult, providers::StaticFileWriter, BlockHashReader, BlockNumReader,
20    BundleStateInit, ChainSpecProvider, DBProvider, DatabaseProviderFactory, ExecutionOutcome,
21    HashingWriter, HeaderProvider, HistoryWriter, MetadataProvider, MetadataWriter,
22    NodePrimitivesProvider, OriginalValuesKnown, ProviderError, RevertsInit,
23    RocksDBProviderFactory, StageCheckpointReader, StageCheckpointWriter, StateWriteConfig,
24    StateWriter, StaticFileProviderFactory, StorageSettings, StorageSettingsCache, TrieWriter,
25};
26use reth_stages_types::{StageCheckpoint, StageId};
27use reth_static_file_types::StaticFileSegment;
28use reth_trie::{
29    prefix_set::TriePrefixSets, IntermediateStateRootState, StateRoot as StateRootComputer,
30    StateRootProgress,
31};
32use reth_trie_db::DatabaseStateRoot;
33
34type DbStateRoot<'a, TX, A> = StateRootComputer<
35    reth_trie_db::DatabaseTrieCursorFactory<&'a TX, A>,
36    reth_trie_db::DatabaseHashedCursorFactory<&'a TX>,
37>;
38
39use serde::{Deserialize, Serialize};
40use std::io::BufRead;
41use tracing::{debug, error, info, trace, warn};
42
43pub use reth_provider::init::{
44    insert_account_history, insert_genesis_account_history, insert_genesis_history,
45    insert_genesis_storage_history, insert_history, insert_storage_history,
46};
47
48/// Default soft limit for number of bytes to read from state dump file, before inserting into
49/// database.
50///
51/// Default is 1 GB.
52pub const DEFAULT_SOFT_LIMIT_BYTE_LEN_ACCOUNTS_CHUNK: usize = 1_000_000_000;
53
54/// Soft limit for the number of flushed updates after which to log progress summary.
55const SOFT_LIMIT_COUNT_FLUSHED_UPDATES: usize = 1_000_000;
56
57/// Storage initialization error type.
58#[derive(Debug, thiserror::Error, Clone)]
59pub enum InitStorageError {
60    /// Genesis header found on static files but the database is empty.
61    #[error(
62        "static files found, but the database is uninitialized. If attempting to re-syncing, delete both."
63    )]
64    UninitializedDatabase,
65    /// An existing genesis block was found in the database, and its hash did not match the hash of
66    /// the chainspec.
67    #[error(
68        "genesis hash in the storage does not match the specified chainspec: chainspec is {chainspec_hash}, database is {storage_hash}"
69    )]
70    GenesisHashMismatch {
71        /// Expected genesis hash.
72        chainspec_hash: B256,
73        /// Actual genesis hash.
74        storage_hash: B256,
75    },
76    /// Provider error.
77    #[error(transparent)]
78    Provider(#[from] ProviderError),
79    /// State root error while computing the state root
80    #[error(transparent)]
81    StateRootError(#[from] StateRootError),
82    /// State root doesn't match the expected one.
83    #[error("state root mismatch: {_0}")]
84    StateRootMismatch(GotExpected<B256>),
85}
86
87impl From<DatabaseError> for InitStorageError {
88    fn from(error: DatabaseError) -> Self {
89        Self::Provider(ProviderError::Database(error))
90    }
91}
92
93/// Write the genesis block if it has not already been written
94pub fn init_genesis<PF>(factory: &PF) -> Result<B256, InitStorageError>
95where
96    PF: DatabaseProviderFactory
97        + StaticFileProviderFactory<Primitives: NodePrimitives<BlockHeader: Compact>>
98        + ChainSpecProvider
99        + StageCheckpointReader
100        + BlockNumReader
101        + MetadataProvider
102        + StorageSettingsCache,
103    PF::ProviderRW: StaticFileProviderFactory<Primitives = PF::Primitives>
104        + StageCheckpointWriter
105        + HistoryWriter
106        + HeaderProvider
107        + HashingWriter
108        + StateWriter
109        + TrieWriter
110        + MetadataWriter
111        + ChainSpecProvider
112        + StorageSettingsCache
113        + RocksDBProviderFactory
114        + NodePrimitivesProvider
115        + AsRef<PF::ProviderRW>,
116    PF::ChainSpec: EthChainSpec<Header = <PF::Primitives as NodePrimitives>::BlockHeader>,
117{
118    init_genesis_with_settings(factory, StorageSettings::base())
119}
120
121/// Write the genesis block if it has not already been written with [`StorageSettings`].
122pub fn init_genesis_with_settings<PF>(
123    factory: &PF,
124    genesis_storage_settings: StorageSettings,
125) -> Result<B256, InitStorageError>
126where
127    PF: DatabaseProviderFactory
128        + StaticFileProviderFactory<Primitives: NodePrimitives<BlockHeader: Compact>>
129        + ChainSpecProvider
130        + StageCheckpointReader
131        + BlockNumReader
132        + MetadataProvider
133        + StorageSettingsCache,
134    PF::ProviderRW: StaticFileProviderFactory<Primitives = PF::Primitives>
135        + StageCheckpointWriter
136        + HistoryWriter
137        + HeaderProvider
138        + HashingWriter
139        + StateWriter
140        + TrieWriter
141        + MetadataWriter
142        + ChainSpecProvider
143        + StorageSettingsCache
144        + RocksDBProviderFactory
145        + NodePrimitivesProvider
146        + AsRef<PF::ProviderRW>,
147    PF::ChainSpec: EthChainSpec<Header = <PF::Primitives as NodePrimitives>::BlockHeader>,
148{
149    let chain = factory.chain_spec();
150
151    let genesis = chain.genesis();
152    let hash = chain.genesis_hash();
153
154    // Get the genesis block number from the chain spec
155    let genesis_block_number = chain.genesis_header().number();
156
157    // Check if we already have the genesis header or if we have the wrong one.
158    match factory.block_hash(genesis_block_number) {
159        Ok(None) | Err(ProviderError::MissingStaticFileBlock(StaticFileSegment::Headers, _)) => {}
160        Ok(Some(block_hash)) => {
161            if block_hash == hash {
162                // Some users will at times attempt to re-sync from scratch by just deleting the
163                // database. Since `factory.block_hash` will only query the static files, we need to
164                // make sure that our database has been written to, and throw error if it's empty.
165                if factory.get_stage_checkpoint(StageId::Headers)?.is_none() {
166                    error!(target: "reth::storage", "Genesis header found on static files, but database is uninitialized.");
167                    return Err(InitStorageError::UninitializedDatabase)
168                }
169
170                let stored = factory.storage_settings()?.unwrap_or_else(StorageSettings::v1);
171                if stored != genesis_storage_settings {
172                    warn!(
173                        target: "reth::storage",
174                        ?stored,
175                        requested = ?genesis_storage_settings,
176                        "Storage settings mismatch detected. Using the stored settings from the existing database."
177                    );
178                }
179
180                debug!("Genesis already written, skipping.");
181                return Ok(hash)
182            }
183
184            return Err(InitStorageError::GenesisHashMismatch {
185                chainspec_hash: hash,
186                storage_hash: block_hash,
187            })
188        }
189        Err(e) => {
190            debug!(?e);
191            return Err(e.into());
192        }
193    }
194
195    debug!("Writing genesis block.");
196
197    // Make sure to set storage settings before anything writes
198    factory.set_storage_settings_cache(genesis_storage_settings);
199
200    let alloc = &genesis.alloc;
201
202    // use transaction to insert genesis header
203    let provider_rw = factory.database_provider_rw()?;
204
205    // Behaviour reserved only for new nodes should be set in the storage settings.
206    provider_rw.write_storage_settings(genesis_storage_settings)?;
207
208    // For non-zero genesis blocks, set expected_block_start BEFORE insert_genesis_state.
209    // When block_range is None, next_block_number() uses expected_block_start. By default,
210    // expected_block_start comes from find_fixed_range which returns the file range start (0),
211    // not the genesis block number. This would cause increment_block(N) to fail.
212    let static_file_provider = provider_rw.static_file_provider();
213    if genesis_block_number > 0 {
214        if genesis_storage_settings.storage_v2 {
215            static_file_provider
216                .get_writer(genesis_block_number, StaticFileSegment::AccountChangeSets)?
217                .user_header_mut()
218                .set_expected_block_start(genesis_block_number);
219        }
220        if genesis_storage_settings.storage_v2 {
221            static_file_provider
222                .get_writer(genesis_block_number, StaticFileSegment::StorageChangeSets)?
223                .user_header_mut()
224                .set_expected_block_start(genesis_block_number);
225        }
226    }
227
228    insert_genesis_hashes(&provider_rw, alloc.iter())?;
229    insert_genesis_history(&provider_rw, alloc.iter())?;
230
231    // Insert header
232    insert_genesis_header(&provider_rw, &chain)?;
233
234    insert_genesis_state(&provider_rw, alloc.iter())?;
235
236    // compute state root to populate trie tables
237    compute_state_root(&provider_rw, None)?;
238
239    // set stage checkpoint to genesis block number for all stages
240    let checkpoint = StageCheckpoint::new(genesis_block_number);
241    for stage in StageId::ALL {
242        provider_rw.save_stage_checkpoint(stage, checkpoint)?;
243    }
244
245    // Static file segments start empty, so we need to initialize the block range.
246    // For genesis blocks with non-zero block numbers, we use get_writer() instead of
247    // latest_writer() and set_block_range() to ensure static files start at the correct block.
248    let static_file_provider = provider_rw.static_file_provider();
249
250    static_file_provider
251        .get_writer(genesis_block_number, StaticFileSegment::Receipts)?
252        .user_header_mut()
253        .set_block_range(genesis_block_number, genesis_block_number);
254    static_file_provider
255        .get_writer(genesis_block_number, StaticFileSegment::Transactions)?
256        .user_header_mut()
257        .set_block_range(genesis_block_number, genesis_block_number);
258
259    if genesis_storage_settings.storage_v2 {
260        static_file_provider
261            .get_writer(genesis_block_number, StaticFileSegment::TransactionSenders)?
262            .user_header_mut()
263            .set_block_range(genesis_block_number, genesis_block_number);
264    }
265
266    // `commit_unwind`` will first commit the DB and then the static file provider, which is
267    // necessary on `init_genesis`.
268    provider_rw.commit()?;
269
270    Ok(hash)
271}
272
273/// Inserts the genesis state into the database.
274pub fn insert_genesis_state<'a, 'b, Provider>(
275    provider: &Provider,
276    alloc: impl Iterator<Item = (&'a Address, &'b GenesisAccount)>,
277) -> ProviderResult<()>
278where
279    Provider: StaticFileProviderFactory
280        + DBProvider<Tx: DbTxMut>
281        + HeaderProvider
282        + StateWriter
283        + ChainSpecProvider
284        + AsRef<Provider>,
285{
286    let genesis_block_number = provider.chain_spec().genesis_header().number();
287    insert_state(provider, alloc, genesis_block_number)
288}
289
290/// Inserts state at given block into database.
291pub fn insert_state<'a, 'b, Provider>(
292    provider: &Provider,
293    alloc: impl Iterator<Item = (&'a Address, &'b GenesisAccount)>,
294    block: u64,
295) -> ProviderResult<()>
296where
297    Provider: StaticFileProviderFactory
298        + DBProvider<Tx: DbTxMut>
299        + HeaderProvider
300        + StateWriter
301        + AsRef<Provider>,
302{
303    let capacity = alloc.size_hint().1.unwrap_or(0);
304    let mut state_init: BundleStateInit =
305        AddressMap::with_capacity_and_hasher(capacity, Default::default());
306    let mut reverts_init: AddressMap<_> =
307        AddressMap::with_capacity_and_hasher(capacity, Default::default());
308    let mut contracts: B256Map<Bytecode> =
309        B256Map::with_capacity_and_hasher(capacity, Default::default());
310
311    for (address, account) in alloc {
312        let bytecode_hash = if let Some(code) = &account.code {
313            match Bytecode::new_raw_checked(code.clone()) {
314                Ok(bytecode) => {
315                    let hash = bytecode.hash_slow();
316                    contracts.insert(hash, bytecode);
317                    Some(hash)
318                }
319                Err(err) => {
320                    error!(%address, %err, "Failed to decode genesis bytecode.");
321                    return Err(DatabaseError::Other(err.to_string()).into());
322                }
323            }
324        } else {
325            None
326        };
327
328        // get state
329        let storage = account
330            .storage
331            .as_ref()
332            .map(|m| {
333                m.iter()
334                    .map(|(key, value)| {
335                        let value = U256::from_be_bytes(value.0);
336                        (*key, (U256::ZERO, value))
337                    })
338                    .collect::<B256Map<_>>()
339            })
340            .unwrap_or_default();
341
342        reverts_init.insert(
343            *address,
344            (Some(None), storage.keys().map(|k| StorageEntry::new(*k, U256::ZERO)).collect()),
345        );
346
347        state_init.insert(
348            *address,
349            (
350                None,
351                Some(Account {
352                    nonce: account.nonce.unwrap_or_default(),
353                    balance: account.balance,
354                    bytecode_hash,
355                }),
356                storage,
357            ),
358        );
359    }
360    let all_reverts_init: RevertsInit = HashMap::from_iter([(block, reverts_init)]);
361
362    let execution_outcome = ExecutionOutcome::new_init(
363        state_init,
364        all_reverts_init,
365        contracts,
366        Vec::default(),
367        block,
368        Vec::new(),
369    );
370
371    provider.write_state(
372        &execution_outcome,
373        OriginalValuesKnown::Yes,
374        StateWriteConfig::default(),
375    )?;
376
377    trace!(target: "reth::cli", "Inserted state");
378
379    Ok(())
380}
381
382/// Inserts hashes for the genesis state.
383pub fn insert_genesis_hashes<'a, 'b, Provider>(
384    provider: &Provider,
385    alloc: impl Iterator<Item = (&'a Address, &'b GenesisAccount)> + Clone,
386) -> ProviderResult<()>
387where
388    Provider: DBProvider<Tx: DbTxMut> + HashingWriter,
389{
390    // insert and hash accounts to hashing table
391    let alloc_accounts = alloc.clone().map(|(addr, account)| (*addr, Some(Account::from(account))));
392    provider.insert_account_for_hashing(alloc_accounts)?;
393
394    trace!(target: "reth::cli", "Inserted account hashes");
395
396    let alloc_storage = alloc.filter_map(|(addr, account)| {
397        // only return Some if there is storage
398        account.storage.as_ref().map(|storage| {
399            (*addr, storage.iter().map(|(&key, &value)| StorageEntry { key, value: value.into() }))
400        })
401    });
402    provider.insert_storage_for_hashing(alloc_storage)?;
403
404    trace!(target: "reth::cli", "Inserted storage hashes");
405
406    Ok(())
407}
408
409/// Inserts header for the genesis state.
410pub fn insert_genesis_header<Provider, Spec>(
411    provider: &Provider,
412    chain: &Spec,
413) -> ProviderResult<()>
414where
415    Provider: StaticFileProviderFactory<Primitives: NodePrimitives<BlockHeader: Compact>>
416        + DBProvider<Tx: DbTxMut>,
417    Spec: EthChainSpec<Header = <Provider::Primitives as NodePrimitives>::BlockHeader>,
418{
419    let (header, block_hash) = (chain.genesis_header(), chain.genesis_hash());
420    let static_file_provider = provider.static_file_provider();
421
422    // Get the actual genesis block number from the header
423    let genesis_block_number = header.number();
424
425    match static_file_provider.block_hash(genesis_block_number) {
426        Ok(None) | Err(ProviderError::MissingStaticFileBlock(StaticFileSegment::Headers, _)) => {
427            let difficulty = header.difficulty();
428
429            // For genesis blocks with non-zero block numbers, we need to ensure they are stored
430            // in the correct static file range. We use get_writer() with the genesis block number
431            // to ensure the genesis block is stored in the correct static file range.
432            let mut writer = static_file_provider
433                .get_writer(genesis_block_number, StaticFileSegment::Headers)?;
434
435            // For non-zero genesis blocks, we need to set block range to genesis_block_number and
436            // append header without increment block
437            if genesis_block_number > 0 {
438                writer
439                    .user_header_mut()
440                    .set_block_range(genesis_block_number, genesis_block_number);
441                writer.append_header_direct(header, difficulty, &block_hash)?;
442            } else {
443                // For zero genesis blocks, use normal append_header
444                writer.append_header(header, &block_hash)?;
445            }
446        }
447        Ok(Some(_)) => {}
448        Err(e) => return Err(e),
449    }
450
451    provider.tx_ref().put::<tables::HeaderNumbers>(block_hash, genesis_block_number)?;
452    provider.tx_ref().put::<tables::BlockBodyIndices>(genesis_block_number, Default::default())?;
453
454    Ok(())
455}
456
457/// Reads account state from a [`BufRead`] reader and initializes it at the highest block that can
458/// be found on database.
459///
460/// It's similar to [`init_genesis`] but supports importing state too big to fit in memory, and can
461/// be set to the highest block present. One practical usecase is to import OP mainnet state at
462/// bedrock transition block.
463pub fn init_from_state_dump<Provider>(
464    mut reader: impl BufRead,
465    provider_rw: &Provider,
466    etl_config: EtlConfig,
467) -> eyre::Result<B256>
468where
469    Provider: StaticFileProviderFactory
470        + DBProvider<Tx: DbTxMut>
471        + BlockNumReader
472        + BlockHashReader
473        + ChainSpecProvider
474        + StageCheckpointWriter
475        + HistoryWriter
476        + HeaderProvider
477        + HashingWriter
478        + TrieWriter
479        + StateWriter
480        + StorageSettingsCache
481        + RocksDBProviderFactory
482        + NodePrimitivesProvider
483        + AsRef<Provider>,
484{
485    if etl_config.file_size == 0 {
486        return Err(eyre::eyre!("ETL file size cannot be zero"))
487    }
488
489    let block = provider_rw.last_block_number()?;
490
491    let hash = provider_rw
492        .block_hash(block)?
493        .ok_or_else(|| eyre::eyre!("Block hash not found for block {}", block))?;
494    let header = provider_rw
495        .header_by_number(block)?
496        .map(|h| SealedHeader::new(h, hash))
497        .ok_or_else(|| ProviderError::HeaderNotFound(block.into()))?;
498
499    let expected_state_root = header.state_root();
500
501    // first line can be state root
502    let dump_state_root = parse_state_root(&mut reader)?;
503    if expected_state_root != dump_state_root {
504        error!(target: "reth::cli",
505            ?dump_state_root,
506            ?expected_state_root,
507            header=?header.num_hash(),
508            "State root from state dump does not match state root in current header."
509        );
510        return Err(InitStorageError::StateRootMismatch(GotExpected {
511            got: dump_state_root,
512            expected: expected_state_root,
513        })
514        .into())
515    }
516
517    debug!(target: "reth::cli",
518        block,
519        chain=%provider_rw.chain_spec().chain(),
520        "Initializing state at block"
521    );
522
523    // remaining lines are accounts
524    let collector = parse_accounts(&mut reader, etl_config)?;
525
526    // write state to db
527    dump_state(collector, provider_rw, block)?;
528
529    info!(target: "reth::cli", "All accounts written to database, starting state root computation (may take some time)");
530
531    // clear trie tables so state root is computed from scratch
532    provider_rw.tx_ref().clear::<tables::AccountsTrie>()?;
533    provider_rw.tx_ref().clear::<tables::StoragesTrie>()?;
534
535    // compute and compare state root. this advances the stage checkpoints.
536    let computed_state_root = compute_state_root(provider_rw, None)?;
537    if computed_state_root == expected_state_root {
538        info!(target: "reth::cli",
539            ?computed_state_root,
540            "Computed state root matches state root in state dump"
541        );
542    } else {
543        error!(target: "reth::cli",
544            ?computed_state_root,
545            ?expected_state_root,
546            "Computed state root does not match state root in state dump"
547        );
548
549        return Err(InitStorageError::StateRootMismatch(GotExpected {
550            got: computed_state_root,
551            expected: expected_state_root,
552        })
553        .into())
554    }
555
556    // insert sync stages for stages that require state
557    for stage in StageId::STATE_REQUIRED {
558        provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(block))?;
559    }
560
561    Ok(hash)
562}
563
564/// Parses and returns expected state root.
565fn parse_state_root(reader: &mut impl BufRead) -> eyre::Result<B256> {
566    let mut line = String::new();
567    reader.read_line(&mut line)?;
568
569    let expected_state_root = serde_json::from_str::<StateRoot>(&line)?.root;
570    trace!(target: "reth::cli",
571        root=%expected_state_root,
572        "Read state root from file"
573    );
574    Ok(expected_state_root)
575}
576
577/// Parses accounts and pushes them to a [`Collector`].
578fn parse_accounts(
579    mut reader: impl BufRead,
580    etl_config: EtlConfig,
581) -> Result<Collector<Address, GenesisAccount>, eyre::Error> {
582    let mut line = String::new();
583    let mut collector = Collector::new(etl_config.file_size, etl_config.dir);
584
585    loop {
586        let n = reader.read_line(&mut line)?;
587        if n == 0 {
588            break
589        }
590
591        let GenesisAccountWithAddress { genesis_account, address } = serde_json::from_str(&line)?;
592        collector.insert(address, genesis_account)?;
593
594        line.clear();
595    }
596
597    Ok(collector)
598}
599
600/// Takes a [`Collector`] and processes all accounts.
601fn dump_state<Provider>(
602    mut collector: Collector<Address, GenesisAccount>,
603    provider_rw: &Provider,
604    block: u64,
605) -> Result<(), eyre::Error>
606where
607    Provider: StaticFileProviderFactory
608        + DBProvider<Tx: DbTxMut>
609        + HeaderProvider
610        + HashingWriter
611        + HistoryWriter
612        + StateWriter
613        + StorageSettingsCache
614        + RocksDBProviderFactory
615        + NodePrimitivesProvider
616        + AsRef<Provider>,
617{
618    let accounts_len = collector.len();
619    let mut accounts = Vec::new();
620    let mut total_inserted_accounts = 0;
621    let mut chunk_byte_size = 0;
622
623    for (index, entry) in collector.iter()?.enumerate() {
624        let (address, account) = entry?;
625        chunk_byte_size += address.len() + account.len();
626        let (address, _) = Address::from_compact(address.as_slice(), address.len());
627        let (account, _) = GenesisAccount::from_compact(account.as_slice(), account.len());
628
629        accounts.push((address, account));
630
631        if chunk_byte_size >= DEFAULT_SOFT_LIMIT_BYTE_LEN_ACCOUNTS_CHUNK ||
632            index == accounts_len - 1
633        {
634            total_inserted_accounts += accounts.len();
635
636            info!(target: "reth::cli",
637                total_inserted_accounts,
638                "Writing accounts to db"
639            );
640
641            // use transaction to insert genesis header
642            insert_genesis_hashes(
643                provider_rw,
644                accounts.iter().map(|(address, account)| (address, account)),
645            )?;
646
647            insert_history(
648                provider_rw,
649                accounts.iter().map(|(address, account)| (address, account)),
650                block,
651            )?;
652
653            // block is already written to static files
654            insert_state(
655                provider_rw,
656                accounts.iter().map(|(address, account)| (address, account)),
657                block,
658            )?;
659
660            accounts.clear();
661            chunk_byte_size = 0;
662        }
663    }
664    Ok(())
665}
666
667/// Computes the state root (from scratch) based on the accounts and storages present in the
668/// database.
669fn compute_state_root<Provider>(
670    provider: &Provider,
671    prefix_sets: Option<TriePrefixSets>,
672) -> Result<B256, InitStorageError>
673where
674    Provider: DBProvider<Tx: DbTxMut> + TrieWriter + StorageSettingsCache,
675{
676    reth_trie_db::with_adapter!(provider, |A| {
677        compute_state_root_inner::<_, A>(provider, prefix_sets)
678    })
679}
680
681fn compute_state_root_inner<Provider, A>(
682    provider: &Provider,
683    prefix_sets: Option<TriePrefixSets>,
684) -> Result<B256, InitStorageError>
685where
686    Provider: DBProvider<Tx: DbTxMut> + TrieWriter + StorageSettingsCache,
687    A: reth_trie_db::TrieTableAdapter,
688{
689    trace!(target: "reth::cli", "Computing state root");
690
691    let tx = provider.tx_ref();
692    let mut intermediate_state: Option<IntermediateStateRootState> = None;
693    let mut total_flushed_updates = 0;
694
695    loop {
696        let mut state_root =
697            DbStateRoot::<_, A>::from_tx(tx).with_intermediate_state(intermediate_state);
698
699        if let Some(sets) = prefix_sets.clone() {
700            state_root = state_root.with_prefix_sets(sets);
701        }
702
703        match state_root.root_with_progress()? {
704            StateRootProgress::Progress(state, _, updates) => {
705                let updated_len = provider.write_trie_updates(updates)?;
706                total_flushed_updates += updated_len;
707
708                trace!(target: "reth::cli",
709                    last_account_key = %state.account_root_state.last_hashed_key,
710                    updated_len,
711                    total_flushed_updates,
712                    "Flushing trie updates"
713                );
714
715                intermediate_state = Some(*state);
716
717                if total_flushed_updates.is_multiple_of(SOFT_LIMIT_COUNT_FLUSHED_UPDATES) {
718                    info!(target: "reth::cli",
719                        total_flushed_updates,
720                        "Flushing trie updates"
721                    );
722                }
723            }
724            StateRootProgress::Complete(root, _, updates) => {
725                let updated_len = provider.write_trie_updates(updates)?;
726                total_flushed_updates += updated_len;
727
728                trace!(target: "reth::cli",
729                    %root,
730                    updated_len,
731                    total_flushed_updates,
732                    "State root has been computed"
733                );
734
735                return Ok(root)
736            }
737        }
738    }
739}
740
741/// Type to deserialize state root from state dump file.
742#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
743struct StateRoot {
744    root: B256,
745}
746
747/// An account as in the state dump file. This contains a [`GenesisAccount`] and the account's
748/// address.
749#[derive(Debug, Serialize, Deserialize)]
750struct GenesisAccountWithAddress {
751    /// The account's balance, nonce, code, and storage.
752    #[serde(flatten)]
753    genesis_account: GenesisAccount,
754    /// The account's address.
755    address: Address,
756}
757
758#[cfg(test)]
759mod tests {
760    use super::*;
761    use alloy_consensus::constants::{
762        HOLESKY_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH,
763    };
764    use alloy_genesis::Genesis;
765    use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET, SEPOLIA};
766    use reth_db::DatabaseEnv;
767    use reth_db_api::{
768        cursor::DbCursorRO,
769        models::{storage_sharded_key::StorageShardedKey, IntegerList, ShardedKey},
770        table::{Table, TableRow},
771        transaction::DbTx,
772        Database,
773    };
774    use reth_provider::{
775        test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB},
776        ProviderFactory, RocksDBProviderFactory,
777    };
778    use std::{collections::BTreeMap, sync::Arc};
779
780    fn collect_table_entries<DB, T>(
781        tx: &<DB as Database>::TX,
782    ) -> Result<Vec<TableRow<T>>, InitStorageError>
783    where
784        DB: Database,
785        T: Table,
786    {
787        Ok(tx.cursor_read::<T>()?.walk_range(..)?.collect::<Result<Vec<_>, _>>()?)
788    }
789
790    #[test]
791    fn success_init_genesis_mainnet() {
792        let genesis_hash =
793            init_genesis(&create_test_provider_factory_with_chain_spec(MAINNET.clone())).unwrap();
794
795        // actual, expected
796        assert_eq!(genesis_hash, MAINNET_GENESIS_HASH);
797    }
798
799    #[test]
800    fn success_init_genesis_sepolia() {
801        let genesis_hash =
802            init_genesis(&create_test_provider_factory_with_chain_spec(SEPOLIA.clone())).unwrap();
803
804        // actual, expected
805        assert_eq!(genesis_hash, SEPOLIA_GENESIS_HASH);
806    }
807
808    #[test]
809    fn success_init_genesis_holesky() {
810        let genesis_hash =
811            init_genesis(&create_test_provider_factory_with_chain_spec(HOLESKY.clone())).unwrap();
812
813        // actual, expected
814        assert_eq!(genesis_hash, HOLESKY_GENESIS_HASH);
815    }
816
817    #[test]
818    fn fail_init_inconsistent_db() {
819        let factory = create_test_provider_factory_with_chain_spec(SEPOLIA.clone());
820        let static_file_provider = factory.static_file_provider();
821        let rocksdb_provider = factory.rocksdb_provider();
822        init_genesis(&factory).unwrap();
823
824        // Try to init db with a different genesis block
825        let genesis_hash = init_genesis(
826            &ProviderFactory::<MockNodeTypesWithDB>::new(
827                factory.into_db(),
828                MAINNET.clone(),
829                static_file_provider,
830                rocksdb_provider,
831                reth_tasks::Runtime::test(),
832            )
833            .unwrap(),
834        );
835
836        assert!(matches!(
837            genesis_hash.unwrap_err(),
838            InitStorageError::GenesisHashMismatch {
839                chainspec_hash: MAINNET_GENESIS_HASH,
840                storage_hash: SEPOLIA_GENESIS_HASH
841            }
842        ))
843    }
844
845    #[test]
846    fn init_genesis_history() {
847        let address_with_balance = Address::with_last_byte(1);
848        let address_with_storage = Address::with_last_byte(2);
849        let storage_key = B256::with_last_byte(1);
850        let chain_spec = Arc::new(ChainSpec {
851            chain: Chain::from_id(1),
852            genesis: Genesis {
853                alloc: BTreeMap::from([
854                    (
855                        address_with_balance,
856                        GenesisAccount { balance: U256::from(1), ..Default::default() },
857                    ),
858                    (
859                        address_with_storage,
860                        GenesisAccount {
861                            storage: Some(BTreeMap::from([(storage_key, B256::random())])),
862                            ..Default::default()
863                        },
864                    ),
865                ]),
866                ..Default::default()
867            },
868            hardforks: Default::default(),
869            paris_block_and_final_difficulty: None,
870            deposit_contract: None,
871            ..Default::default()
872        });
873
874        let factory = create_test_provider_factory_with_chain_spec(chain_spec);
875        init_genesis(&factory).unwrap();
876
877        let expected_accounts = vec![
878            (ShardedKey::new(address_with_balance, u64::MAX), IntegerList::new([0]).unwrap()),
879            (ShardedKey::new(address_with_storage, u64::MAX), IntegerList::new([0]).unwrap()),
880        ];
881        let expected_storages = vec![(
882            StorageShardedKey::new(address_with_storage, storage_key, u64::MAX),
883            IntegerList::new([0]).unwrap(),
884        )];
885
886        let collect_from_mdbx = |factory: &ProviderFactory<MockNodeTypesWithDB>| {
887            let provider = factory.provider().unwrap();
888            let tx = provider.tx_ref();
889            (
890                collect_table_entries::<DatabaseEnv, tables::AccountsHistory>(tx).unwrap(),
891                collect_table_entries::<DatabaseEnv, tables::StoragesHistory>(tx).unwrap(),
892            )
893        };
894
895        {
896            let settings = factory.cached_storage_settings();
897            let rocksdb = factory.rocksdb_provider();
898
899            let collect_rocksdb = |rocksdb: &reth_provider::providers::RocksDBProvider| {
900                (
901                    rocksdb
902                        .iter::<tables::AccountsHistory>()
903                        .unwrap()
904                        .collect::<Result<Vec<_>, _>>()
905                        .unwrap(),
906                    rocksdb
907                        .iter::<tables::StoragesHistory>()
908                        .unwrap()
909                        .collect::<Result<Vec<_>, _>>()
910                        .unwrap(),
911                )
912            };
913
914            let (accounts, storages) = if settings.storage_v2 {
915                collect_rocksdb(&rocksdb)
916            } else {
917                collect_from_mdbx(&factory)
918            };
919            assert_eq!(accounts, expected_accounts);
920            assert_eq!(storages, expected_storages);
921        }
922    }
923
924    #[test]
925    fn warn_storage_settings_mismatch() {
926        let factory = create_test_provider_factory_with_chain_spec(MAINNET.clone());
927        init_genesis_with_settings(&factory, StorageSettings::v1()).unwrap();
928
929        // Request different settings - should warn but succeed
930        let result = init_genesis_with_settings(&factory, StorageSettings::v2());
931
932        // Should succeed (warning is logged, not an error)
933        assert!(result.is_ok());
934    }
935
936    #[test]
937    fn allow_same_storage_settings() {
938        let factory = create_test_provider_factory_with_chain_spec(MAINNET.clone());
939        let settings = StorageSettings::v2();
940        init_genesis_with_settings(&factory, settings).unwrap();
941
942        let result = init_genesis_with_settings(&factory, settings);
943
944        assert!(result.is_ok());
945    }
946}