reth_db_common/
init.rs

1//! Reth genesis initialization utility functions.
2
3use alloy_consensus::BlockHeader;
4use alloy_genesis::GenesisAccount;
5use alloy_primitives::{keccak256, map::HashMap, Address, B256, U256};
6use reth_chainspec::EthChainSpec;
7use reth_codecs::Compact;
8use reth_config::config::EtlConfig;
9use reth_db_api::{tables, transaction::DbTxMut, DatabaseError};
10use reth_etl::Collector;
11use reth_execution_errors::StateRootError;
12use reth_primitives_traits::{
13    Account, Bytecode, GotExpected, NodePrimitives, SealedHeader, StorageEntry,
14};
15use reth_provider::{
16    errors::provider::ProviderResult, providers::StaticFileWriter, BlockHashReader, BlockNumReader,
17    BundleStateInit, ChainSpecProvider, DBProvider, DatabaseProviderFactory, ExecutionOutcome,
18    HashingWriter, HeaderProvider, HistoryWriter, MetadataWriter, OriginalValuesKnown,
19    ProviderError, RevertsInit, StageCheckpointReader, StageCheckpointWriter, StateWriter,
20    StaticFileProviderFactory, StorageSettings, StorageSettingsCache, TrieWriter,
21};
22use reth_stages_types::{StageCheckpoint, StageId};
23use reth_static_file_types::StaticFileSegment;
24use reth_trie::{
25    prefix_set::{TriePrefixSets, TriePrefixSetsMut},
26    IntermediateStateRootState, Nibbles, StateRoot as StateRootComputer, StateRootProgress,
27};
28use reth_trie_db::DatabaseStateRoot;
29use serde::{Deserialize, Serialize};
30use std::io::BufRead;
31use tracing::{debug, error, info, trace};
32
33/// Default soft limit for number of bytes to read from state dump file, before inserting into
34/// database.
35///
36/// Default is 1 GB.
37pub const DEFAULT_SOFT_LIMIT_BYTE_LEN_ACCOUNTS_CHUNK: usize = 1_000_000_000;
38
39/// Approximate number of accounts per 1 GB of state dump file. One account is approximately 3.5 KB
40///
41/// Approximate is 285 228 accounts.
42//
43// (14.05 GB OP mainnet state dump at Bedrock block / 4 007 565 accounts in file > 3.5 KB per
44// account)
45pub const AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP: usize = 285_228;
46
47/// Soft limit for the number of flushed updates after which to log progress summary.
48const SOFT_LIMIT_COUNT_FLUSHED_UPDATES: usize = 1_000_000;
49
50/// Storage initialization error type.
51#[derive(Debug, thiserror::Error, Clone)]
52pub enum InitStorageError {
53    /// Genesis header found on static files but the database is empty.
54    #[error(
55        "static files found, but the database is uninitialized. If attempting to re-syncing, delete both."
56    )]
57    UninitializedDatabase,
58    /// An existing genesis block was found in the database, and its hash did not match the hash of
59    /// the chainspec.
60    #[error(
61        "genesis hash in the storage does not match the specified chainspec: chainspec is {chainspec_hash}, database is {storage_hash}"
62    )]
63    GenesisHashMismatch {
64        /// Expected genesis hash.
65        chainspec_hash: B256,
66        /// Actual genesis hash.
67        storage_hash: B256,
68    },
69    /// Provider error.
70    #[error(transparent)]
71    Provider(#[from] ProviderError),
72    /// State root error while computing the state root
73    #[error(transparent)]
74    StateRootError(#[from] StateRootError),
75    /// State root doesn't match the expected one.
76    #[error("state root mismatch: {_0}")]
77    StateRootMismatch(GotExpected<B256>),
78}
79
80impl From<DatabaseError> for InitStorageError {
81    fn from(error: DatabaseError) -> Self {
82        Self::Provider(ProviderError::Database(error))
83    }
84}
85
86/// Write the genesis block if it has not already been written
87pub fn init_genesis<PF>(factory: &PF) -> Result<B256, InitStorageError>
88where
89    PF: DatabaseProviderFactory
90        + StaticFileProviderFactory<Primitives: NodePrimitives<BlockHeader: Compact>>
91        + ChainSpecProvider
92        + StageCheckpointReader
93        + BlockHashReader
94        + StorageSettingsCache,
95    PF::ProviderRW: StaticFileProviderFactory<Primitives = PF::Primitives>
96        + StageCheckpointWriter
97        + HistoryWriter
98        + HeaderProvider
99        + HashingWriter
100        + StateWriter
101        + TrieWriter
102        + MetadataWriter
103        + ChainSpecProvider
104        + AsRef<PF::ProviderRW>,
105    PF::ChainSpec: EthChainSpec<Header = <PF::Primitives as NodePrimitives>::BlockHeader>,
106{
107    init_genesis_with_settings(factory, StorageSettings::legacy())
108}
109
110/// Write the genesis block if it has not already been written with [`StorageSettings`].
111pub fn init_genesis_with_settings<PF>(
112    factory: &PF,
113    storage_settings: StorageSettings,
114) -> Result<B256, InitStorageError>
115where
116    PF: DatabaseProviderFactory
117        + StaticFileProviderFactory<Primitives: NodePrimitives<BlockHeader: Compact>>
118        + ChainSpecProvider
119        + StageCheckpointReader
120        + BlockHashReader
121        + StorageSettingsCache,
122    PF::ProviderRW: StaticFileProviderFactory<Primitives = PF::Primitives>
123        + StageCheckpointWriter
124        + HistoryWriter
125        + HeaderProvider
126        + HashingWriter
127        + StateWriter
128        + TrieWriter
129        + MetadataWriter
130        + ChainSpecProvider
131        + AsRef<PF::ProviderRW>,
132    PF::ChainSpec: EthChainSpec<Header = <PF::Primitives as NodePrimitives>::BlockHeader>,
133{
134    let chain = factory.chain_spec();
135
136    let genesis = chain.genesis();
137    let hash = chain.genesis_hash();
138
139    // Get the genesis block number from the chain spec
140    let genesis_block_number = chain.genesis_header().number();
141
142    // Check if we already have the genesis header or if we have the wrong one.
143    match factory.block_hash(genesis_block_number) {
144        Ok(None) | Err(ProviderError::MissingStaticFileBlock(StaticFileSegment::Headers, _)) => {}
145        Ok(Some(block_hash)) => {
146            if block_hash == hash {
147                // Some users will at times attempt to re-sync from scratch by just deleting the
148                // database. Since `factory.block_hash` will only query the static files, we need to
149                // make sure that our database has been written to, and throw error if it's empty.
150                if factory.get_stage_checkpoint(StageId::Headers)?.is_none() {
151                    error!(target: "reth::storage", "Genesis header found on static files, but database is uninitialized.");
152                    return Err(InitStorageError::UninitializedDatabase)
153                }
154
155                debug!("Genesis already written, skipping.");
156                return Ok(hash)
157            }
158
159            return Err(InitStorageError::GenesisHashMismatch {
160                chainspec_hash: hash,
161                storage_hash: block_hash,
162            })
163        }
164        Err(e) => {
165            debug!(?e);
166            return Err(e.into());
167        }
168    }
169
170    debug!("Writing genesis block.");
171
172    let alloc = &genesis.alloc;
173
174    // use transaction to insert genesis header
175    let provider_rw = factory.database_provider_rw()?;
176    insert_genesis_hashes(&provider_rw, alloc.iter())?;
177    insert_genesis_history(&provider_rw, alloc.iter())?;
178
179    // Insert header
180    insert_genesis_header(&provider_rw, &chain)?;
181
182    insert_genesis_state(&provider_rw, alloc.iter())?;
183
184    // compute state root to populate trie tables
185    compute_state_root(&provider_rw, None)?;
186
187    // set stage checkpoint to genesis block number for all stages
188    let checkpoint = StageCheckpoint::new(genesis_block_number);
189    for stage in StageId::ALL {
190        provider_rw.save_stage_checkpoint(stage, checkpoint)?;
191    }
192
193    // Static file segments start empty, so we need to initialize the genesis block.
194    let static_file_provider = provider_rw.static_file_provider();
195
196    // Static file segments start empty, so we need to initialize the genesis block.
197    // For genesis blocks with non-zero block numbers, we need to use get_writer() instead of
198    // latest_writer() to ensure the genesis block is stored in the correct static file range.
199    static_file_provider
200        .get_writer(genesis_block_number, StaticFileSegment::Receipts)?
201        .user_header_mut()
202        .set_block_range(genesis_block_number, genesis_block_number);
203    static_file_provider
204        .get_writer(genesis_block_number, StaticFileSegment::Transactions)?
205        .user_header_mut()
206        .set_block_range(genesis_block_number, genesis_block_number);
207
208    // Behaviour reserved only for new nodes should be set here.
209    provider_rw.write_storage_settings(storage_settings)?;
210
211    // `commit_unwind`` will first commit the DB and then the static file provider, which is
212    // necessary on `init_genesis`.
213    provider_rw.commit()?;
214    factory.set_storage_settings_cache(storage_settings);
215
216    Ok(hash)
217}
218
219/// Inserts the genesis state into the database.
220pub fn insert_genesis_state<'a, 'b, Provider>(
221    provider: &Provider,
222    alloc: impl Iterator<Item = (&'a Address, &'b GenesisAccount)>,
223) -> ProviderResult<()>
224where
225    Provider: StaticFileProviderFactory
226        + DBProvider<Tx: DbTxMut>
227        + HeaderProvider
228        + StateWriter
229        + ChainSpecProvider
230        + AsRef<Provider>,
231{
232    let genesis_block_number = provider.chain_spec().genesis_header().number();
233    insert_state(provider, alloc, genesis_block_number)
234}
235
236/// Inserts state at given block into database.
237pub fn insert_state<'a, 'b, Provider>(
238    provider: &Provider,
239    alloc: impl Iterator<Item = (&'a Address, &'b GenesisAccount)>,
240    block: u64,
241) -> ProviderResult<()>
242where
243    Provider: StaticFileProviderFactory
244        + DBProvider<Tx: DbTxMut>
245        + HeaderProvider
246        + StateWriter
247        + AsRef<Provider>,
248{
249    let capacity = alloc.size_hint().1.unwrap_or(0);
250    let mut state_init: BundleStateInit =
251        HashMap::with_capacity_and_hasher(capacity, Default::default());
252    let mut reverts_init = HashMap::with_capacity_and_hasher(capacity, Default::default());
253    let mut contracts: HashMap<B256, Bytecode> =
254        HashMap::with_capacity_and_hasher(capacity, Default::default());
255
256    for (address, account) in alloc {
257        let bytecode_hash = if let Some(code) = &account.code {
258            match Bytecode::new_raw_checked(code.clone()) {
259                Ok(bytecode) => {
260                    let hash = bytecode.hash_slow();
261                    contracts.insert(hash, bytecode);
262                    Some(hash)
263                }
264                Err(err) => {
265                    error!(%address, %err, "Failed to decode genesis bytecode.");
266                    return Err(DatabaseError::Other(err.to_string()).into());
267                }
268            }
269        } else {
270            None
271        };
272
273        // get state
274        let storage = account
275            .storage
276            .as_ref()
277            .map(|m| {
278                m.iter()
279                    .map(|(key, value)| {
280                        let value = U256::from_be_bytes(value.0);
281                        (*key, (U256::ZERO, value))
282                    })
283                    .collect::<HashMap<_, _>>()
284            })
285            .unwrap_or_default();
286
287        reverts_init.insert(
288            *address,
289            (Some(None), storage.keys().map(|k| StorageEntry::new(*k, U256::ZERO)).collect()),
290        );
291
292        state_init.insert(
293            *address,
294            (
295                None,
296                Some(Account {
297                    nonce: account.nonce.unwrap_or_default(),
298                    balance: account.balance,
299                    bytecode_hash,
300                }),
301                storage,
302            ),
303        );
304    }
305    let all_reverts_init: RevertsInit = HashMap::from_iter([(block, reverts_init)]);
306
307    let execution_outcome = ExecutionOutcome::new_init(
308        state_init,
309        all_reverts_init,
310        contracts,
311        Vec::default(),
312        block,
313        Vec::new(),
314    );
315
316    provider.write_state(&execution_outcome, OriginalValuesKnown::Yes)?;
317
318    trace!(target: "reth::cli", "Inserted state");
319
320    Ok(())
321}
322
323/// Inserts hashes for the genesis state.
324pub fn insert_genesis_hashes<'a, 'b, Provider>(
325    provider: &Provider,
326    alloc: impl Iterator<Item = (&'a Address, &'b GenesisAccount)> + Clone,
327) -> ProviderResult<()>
328where
329    Provider: DBProvider<Tx: DbTxMut> + HashingWriter,
330{
331    // insert and hash accounts to hashing table
332    let alloc_accounts = alloc.clone().map(|(addr, account)| (*addr, Some(Account::from(account))));
333    provider.insert_account_for_hashing(alloc_accounts)?;
334
335    trace!(target: "reth::cli", "Inserted account hashes");
336
337    let alloc_storage = alloc.filter_map(|(addr, account)| {
338        // only return Some if there is storage
339        account.storage.as_ref().map(|storage| {
340            (*addr, storage.iter().map(|(&key, &value)| StorageEntry { key, value: value.into() }))
341        })
342    });
343    provider.insert_storage_for_hashing(alloc_storage)?;
344
345    trace!(target: "reth::cli", "Inserted storage hashes");
346
347    Ok(())
348}
349
350/// Inserts history indices for genesis accounts and storage.
351pub fn insert_genesis_history<'a, 'b, Provider>(
352    provider: &Provider,
353    alloc: impl Iterator<Item = (&'a Address, &'b GenesisAccount)> + Clone,
354) -> ProviderResult<()>
355where
356    Provider: DBProvider<Tx: DbTxMut> + HistoryWriter + ChainSpecProvider,
357{
358    let genesis_block_number = provider.chain_spec().genesis_header().number();
359    insert_history(provider, alloc, genesis_block_number)
360}
361
362/// Inserts history indices for genesis accounts and storage.
363pub fn insert_history<'a, 'b, Provider>(
364    provider: &Provider,
365    alloc: impl Iterator<Item = (&'a Address, &'b GenesisAccount)> + Clone,
366    block: u64,
367) -> ProviderResult<()>
368where
369    Provider: DBProvider<Tx: DbTxMut> + HistoryWriter,
370{
371    let account_transitions = alloc.clone().map(|(addr, _)| (*addr, [block]));
372    provider.insert_account_history_index(account_transitions)?;
373
374    trace!(target: "reth::cli", "Inserted account history");
375
376    let storage_transitions = alloc
377        .filter_map(|(addr, account)| account.storage.as_ref().map(|storage| (addr, storage)))
378        .flat_map(|(addr, storage)| storage.keys().map(|key| ((*addr, *key), [block])));
379    provider.insert_storage_history_index(storage_transitions)?;
380
381    trace!(target: "reth::cli", "Inserted storage history");
382
383    Ok(())
384}
385
386/// Inserts header for the genesis state.
387pub fn insert_genesis_header<Provider, Spec>(
388    provider: &Provider,
389    chain: &Spec,
390) -> ProviderResult<()>
391where
392    Provider: StaticFileProviderFactory<Primitives: NodePrimitives<BlockHeader: Compact>>
393        + DBProvider<Tx: DbTxMut>,
394    Spec: EthChainSpec<Header = <Provider::Primitives as NodePrimitives>::BlockHeader>,
395{
396    let (header, block_hash) = (chain.genesis_header(), chain.genesis_hash());
397    let static_file_provider = provider.static_file_provider();
398
399    // Get the actual genesis block number from the header
400    let genesis_block_number = header.number();
401
402    match static_file_provider.block_hash(genesis_block_number) {
403        Ok(None) | Err(ProviderError::MissingStaticFileBlock(StaticFileSegment::Headers, _)) => {
404            let difficulty = header.difficulty();
405
406            // For genesis blocks with non-zero block numbers, we need to ensure they are stored
407            // in the correct static file range. We use get_writer() with the genesis block number
408            // to ensure the genesis block is stored in the correct static file range.
409            let mut writer = static_file_provider
410                .get_writer(genesis_block_number, StaticFileSegment::Headers)?;
411
412            // For non-zero genesis blocks, we need to set block range to genesis_block_number and
413            // append header without increment block
414            if genesis_block_number > 0 {
415                writer
416                    .user_header_mut()
417                    .set_block_range(genesis_block_number, genesis_block_number);
418                writer.append_header_direct(header, difficulty, &block_hash)?;
419            } else {
420                // For zero genesis blocks, use normal append_header
421                writer.append_header(header, &block_hash)?;
422            }
423        }
424        Ok(Some(_)) => {}
425        Err(e) => return Err(e),
426    }
427
428    provider.tx_ref().put::<tables::HeaderNumbers>(block_hash, genesis_block_number)?;
429    provider.tx_ref().put::<tables::BlockBodyIndices>(genesis_block_number, Default::default())?;
430
431    Ok(())
432}
433
434/// Reads account state from a [`BufRead`] reader and initializes it at the highest block that can
435/// be found on database.
436///
437/// It's similar to [`init_genesis`] but supports importing state too big to fit in memory, and can
438/// be set to the highest block present. One practical usecase is to import OP mainnet state at
439/// bedrock transition block.
440pub fn init_from_state_dump<Provider>(
441    mut reader: impl BufRead,
442    provider_rw: &Provider,
443    etl_config: EtlConfig,
444) -> eyre::Result<B256>
445where
446    Provider: StaticFileProviderFactory
447        + DBProvider<Tx: DbTxMut>
448        + BlockNumReader
449        + BlockHashReader
450        + ChainSpecProvider
451        + StageCheckpointWriter
452        + HistoryWriter
453        + HeaderProvider
454        + HashingWriter
455        + TrieWriter
456        + StateWriter
457        + AsRef<Provider>,
458{
459    if etl_config.file_size == 0 {
460        return Err(eyre::eyre!("ETL file size cannot be zero"))
461    }
462
463    let block = provider_rw.last_block_number()?;
464
465    let hash = provider_rw
466        .block_hash(block)?
467        .ok_or_else(|| eyre::eyre!("Block hash not found for block {}", block))?;
468    let header = provider_rw
469        .header_by_number(block)?
470        .map(SealedHeader::seal_slow)
471        .ok_or_else(|| ProviderError::HeaderNotFound(block.into()))?;
472
473    let expected_state_root = header.state_root();
474
475    // first line can be state root
476    let dump_state_root = parse_state_root(&mut reader)?;
477    if expected_state_root != dump_state_root {
478        error!(target: "reth::cli",
479            ?dump_state_root,
480            ?expected_state_root,
481            header=?header.num_hash(),
482            "State root from state dump does not match state root in current header."
483        );
484        return Err(InitStorageError::StateRootMismatch(GotExpected {
485            got: dump_state_root,
486            expected: expected_state_root,
487        })
488        .into())
489    }
490
491    debug!(target: "reth::cli",
492        block,
493        chain=%provider_rw.chain_spec().chain(),
494        "Initializing state at block"
495    );
496
497    // remaining lines are accounts
498    let collector = parse_accounts(&mut reader, etl_config)?;
499
500    // write state to db and collect prefix sets
501    let mut prefix_sets = TriePrefixSetsMut::default();
502    dump_state(collector, provider_rw, block, &mut prefix_sets)?;
503
504    info!(target: "reth::cli", "All accounts written to database, starting state root computation (may take some time)");
505
506    // compute and compare state root. this advances the stage checkpoints.
507    let computed_state_root = compute_state_root(provider_rw, Some(prefix_sets.freeze()))?;
508    if computed_state_root == expected_state_root {
509        info!(target: "reth::cli",
510            ?computed_state_root,
511            "Computed state root matches state root in state dump"
512        );
513    } else {
514        error!(target: "reth::cli",
515            ?computed_state_root,
516            ?expected_state_root,
517            "Computed state root does not match state root in state dump"
518        );
519
520        return Err(InitStorageError::StateRootMismatch(GotExpected {
521            got: computed_state_root,
522            expected: expected_state_root,
523        })
524        .into())
525    }
526
527    // insert sync stages for stages that require state
528    for stage in StageId::STATE_REQUIRED {
529        provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(block))?;
530    }
531
532    Ok(hash)
533}
534
535/// Parses and returns expected state root.
536fn parse_state_root(reader: &mut impl BufRead) -> eyre::Result<B256> {
537    let mut line = String::new();
538    reader.read_line(&mut line)?;
539
540    let expected_state_root = serde_json::from_str::<StateRoot>(&line)?.root;
541    trace!(target: "reth::cli",
542        root=%expected_state_root,
543        "Read state root from file"
544    );
545    Ok(expected_state_root)
546}
547
548/// Parses accounts and pushes them to a [`Collector`].
549fn parse_accounts(
550    mut reader: impl BufRead,
551    etl_config: EtlConfig,
552) -> Result<Collector<Address, GenesisAccount>, eyre::Error> {
553    let mut line = String::new();
554    let mut collector = Collector::new(etl_config.file_size, etl_config.dir);
555
556    loop {
557        let n = reader.read_line(&mut line)?;
558        if n == 0 {
559            break
560        }
561
562        let GenesisAccountWithAddress { genesis_account, address } = serde_json::from_str(&line)?;
563        collector.insert(address, genesis_account)?;
564
565        if !collector.is_empty() &&
566            collector.len().is_multiple_of(AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP)
567        {
568            info!(target: "reth::cli",
569                parsed_new_accounts=collector.len(),
570            );
571        }
572
573        line.clear();
574    }
575
576    Ok(collector)
577}
578
579/// Takes a [`Collector`] and processes all accounts.
580fn dump_state<Provider>(
581    mut collector: Collector<Address, GenesisAccount>,
582    provider_rw: &Provider,
583    block: u64,
584    prefix_sets: &mut TriePrefixSetsMut,
585) -> Result<(), eyre::Error>
586where
587    Provider: StaticFileProviderFactory
588        + DBProvider<Tx: DbTxMut>
589        + HeaderProvider
590        + HashingWriter
591        + HistoryWriter
592        + StateWriter
593        + AsRef<Provider>,
594{
595    let accounts_len = collector.len();
596    let mut accounts = Vec::with_capacity(AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP);
597    let mut total_inserted_accounts = 0;
598
599    for (index, entry) in collector.iter()?.enumerate() {
600        let (address, account) = entry?;
601        let (address, _) = Address::from_compact(address.as_slice(), address.len());
602        let (account, _) = GenesisAccount::from_compact(account.as_slice(), account.len());
603
604        // Add to prefix sets
605        let hashed_address = keccak256(address);
606        prefix_sets.account_prefix_set.insert(Nibbles::unpack(hashed_address));
607
608        // Add storage keys to prefix sets if storage exists
609        if let Some(ref storage) = account.storage {
610            for key in storage.keys() {
611                let hashed_key = keccak256(key);
612                prefix_sets
613                    .storage_prefix_sets
614                    .entry(hashed_address)
615                    .or_default()
616                    .insert(Nibbles::unpack(hashed_key));
617            }
618        }
619
620        accounts.push((address, account));
621
622        if (index > 0 && index.is_multiple_of(AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP)) ||
623            index == accounts_len - 1
624        {
625            total_inserted_accounts += accounts.len();
626
627            info!(target: "reth::cli",
628                total_inserted_accounts,
629                "Writing accounts to db"
630            );
631
632            // use transaction to insert genesis header
633            insert_genesis_hashes(
634                provider_rw,
635                accounts.iter().map(|(address, account)| (address, account)),
636            )?;
637
638            insert_history(
639                provider_rw,
640                accounts.iter().map(|(address, account)| (address, account)),
641                block,
642            )?;
643
644            // block is already written to static files
645            insert_state(
646                provider_rw,
647                accounts.iter().map(|(address, account)| (address, account)),
648                block,
649            )?;
650
651            accounts.clear();
652        }
653    }
654    Ok(())
655}
656
657/// Computes the state root (from scratch) based on the accounts and storages present in the
658/// database.
659fn compute_state_root<Provider>(
660    provider: &Provider,
661    prefix_sets: Option<TriePrefixSets>,
662) -> Result<B256, InitStorageError>
663where
664    Provider: DBProvider<Tx: DbTxMut> + TrieWriter,
665{
666    trace!(target: "reth::cli", "Computing state root");
667
668    let tx = provider.tx_ref();
669    let mut intermediate_state: Option<IntermediateStateRootState> = None;
670    let mut total_flushed_updates = 0;
671
672    loop {
673        let mut state_root =
674            StateRootComputer::from_tx(tx).with_intermediate_state(intermediate_state);
675
676        if let Some(sets) = prefix_sets.clone() {
677            state_root = state_root.with_prefix_sets(sets);
678        }
679
680        match state_root.root_with_progress()? {
681            StateRootProgress::Progress(state, _, updates) => {
682                let updated_len = provider.write_trie_updates(updates)?;
683                total_flushed_updates += updated_len;
684
685                trace!(target: "reth::cli",
686                    last_account_key = %state.account_root_state.last_hashed_key,
687                    updated_len,
688                    total_flushed_updates,
689                    "Flushing trie updates"
690                );
691
692                intermediate_state = Some(*state);
693
694                if total_flushed_updates.is_multiple_of(SOFT_LIMIT_COUNT_FLUSHED_UPDATES) {
695                    info!(target: "reth::cli",
696                        total_flushed_updates,
697                        "Flushing trie updates"
698                    );
699                }
700            }
701            StateRootProgress::Complete(root, _, updates) => {
702                let updated_len = provider.write_trie_updates(updates)?;
703                total_flushed_updates += updated_len;
704
705                trace!(target: "reth::cli",
706                    %root,
707                    updated_len,
708                    total_flushed_updates,
709                    "State root has been computed"
710                );
711
712                return Ok(root)
713            }
714        }
715    }
716}
717
718/// Type to deserialize state root from state dump file.
719#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
720struct StateRoot {
721    root: B256,
722}
723
724/// An account as in the state dump file. This contains a [`GenesisAccount`] and the account's
725/// address.
726#[derive(Debug, Serialize, Deserialize)]
727struct GenesisAccountWithAddress {
728    /// The account's balance, nonce, code, and storage.
729    #[serde(flatten)]
730    genesis_account: GenesisAccount,
731    /// The account's address.
732    address: Address,
733}
734
735#[cfg(test)]
736mod tests {
737    use super::*;
738    use alloy_consensus::constants::{
739        HOLESKY_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH,
740    };
741    use alloy_genesis::Genesis;
742    use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET, SEPOLIA};
743    use reth_db::DatabaseEnv;
744    use reth_db_api::{
745        cursor::DbCursorRO,
746        models::{storage_sharded_key::StorageShardedKey, IntegerList, ShardedKey},
747        table::{Table, TableRow},
748        transaction::DbTx,
749        Database,
750    };
751    use reth_provider::{
752        test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB},
753        ProviderFactory, RocksDBProviderFactory,
754    };
755    use std::{collections::BTreeMap, sync::Arc};
756
757    fn collect_table_entries<DB, T>(
758        tx: &<DB as Database>::TX,
759    ) -> Result<Vec<TableRow<T>>, InitStorageError>
760    where
761        DB: Database,
762        T: Table,
763    {
764        Ok(tx.cursor_read::<T>()?.walk_range(..)?.collect::<Result<Vec<_>, _>>()?)
765    }
766
767    #[test]
768    fn success_init_genesis_mainnet() {
769        let genesis_hash =
770            init_genesis(&create_test_provider_factory_with_chain_spec(MAINNET.clone())).unwrap();
771
772        // actual, expected
773        assert_eq!(genesis_hash, MAINNET_GENESIS_HASH);
774    }
775
776    #[test]
777    fn success_init_genesis_sepolia() {
778        let genesis_hash =
779            init_genesis(&create_test_provider_factory_with_chain_spec(SEPOLIA.clone())).unwrap();
780
781        // actual, expected
782        assert_eq!(genesis_hash, SEPOLIA_GENESIS_HASH);
783    }
784
785    #[test]
786    fn success_init_genesis_holesky() {
787        let genesis_hash =
788            init_genesis(&create_test_provider_factory_with_chain_spec(HOLESKY.clone())).unwrap();
789
790        // actual, expected
791        assert_eq!(genesis_hash, HOLESKY_GENESIS_HASH);
792    }
793
794    #[test]
795    fn fail_init_inconsistent_db() {
796        let factory = create_test_provider_factory_with_chain_spec(SEPOLIA.clone());
797        let static_file_provider = factory.static_file_provider();
798        let rocksdb_provider = factory.rocksdb_provider();
799        init_genesis(&factory).unwrap();
800
801        // Try to init db with a different genesis block
802        let genesis_hash = init_genesis(
803            &ProviderFactory::<MockNodeTypesWithDB>::new(
804                factory.into_db(),
805                MAINNET.clone(),
806                static_file_provider,
807                rocksdb_provider,
808            )
809            .unwrap(),
810        );
811
812        assert!(matches!(
813            genesis_hash.unwrap_err(),
814            InitStorageError::GenesisHashMismatch {
815                chainspec_hash: MAINNET_GENESIS_HASH,
816                storage_hash: SEPOLIA_GENESIS_HASH
817            }
818        ))
819    }
820
821    #[test]
822    fn init_genesis_history() {
823        let address_with_balance = Address::with_last_byte(1);
824        let address_with_storage = Address::with_last_byte(2);
825        let storage_key = B256::with_last_byte(1);
826        let chain_spec = Arc::new(ChainSpec {
827            chain: Chain::from_id(1),
828            genesis: Genesis {
829                alloc: BTreeMap::from([
830                    (
831                        address_with_balance,
832                        GenesisAccount { balance: U256::from(1), ..Default::default() },
833                    ),
834                    (
835                        address_with_storage,
836                        GenesisAccount {
837                            storage: Some(BTreeMap::from([(storage_key, B256::random())])),
838                            ..Default::default()
839                        },
840                    ),
841                ]),
842                ..Default::default()
843            },
844            hardforks: Default::default(),
845            paris_block_and_final_difficulty: None,
846            deposit_contract: None,
847            ..Default::default()
848        });
849
850        let factory = create_test_provider_factory_with_chain_spec(chain_spec);
851        init_genesis(&factory).unwrap();
852
853        let provider = factory.provider().unwrap();
854
855        let tx = provider.tx_ref();
856
857        assert_eq!(
858            collect_table_entries::<Arc<DatabaseEnv>, tables::AccountsHistory>(tx)
859                .expect("failed to collect"),
860            vec![
861                (ShardedKey::new(address_with_balance, u64::MAX), IntegerList::new([0]).unwrap()),
862                (ShardedKey::new(address_with_storage, u64::MAX), IntegerList::new([0]).unwrap())
863            ],
864        );
865
866        assert_eq!(
867            collect_table_entries::<Arc<DatabaseEnv>, tables::StoragesHistory>(tx)
868                .expect("failed to collect"),
869            vec![(
870                StorageShardedKey::new(address_with_storage, storage_key, u64::MAX),
871                IntegerList::new([0]).unwrap()
872            )],
873        );
874    }
875}