reth_db_common/
init.rs

1//! Reth genesis initialization utility functions.
2
3use alloy_consensus::BlockHeader;
4use alloy_genesis::GenesisAccount;
5use alloy_primitives::{keccak256, map::HashMap, Address, B256, U256};
6use reth_chainspec::EthChainSpec;
7use reth_codecs::Compact;
8use reth_config::config::EtlConfig;
9use reth_db_api::{tables, transaction::DbTxMut, DatabaseError};
10use reth_etl::Collector;
11use reth_execution_errors::StateRootError;
12use reth_primitives_traits::{
13    Account, Bytecode, GotExpected, NodePrimitives, SealedHeader, StorageEntry,
14};
15use reth_provider::{
16    errors::provider::ProviderResult, providers::StaticFileWriter, BlockHashReader, BlockNumReader,
17    BundleStateInit, ChainSpecProvider, DBProvider, DatabaseProviderFactory, ExecutionOutcome,
18    HashingWriter, HeaderProvider, HistoryWriter, MetadataWriter, OriginalValuesKnown,
19    ProviderError, RevertsInit, StageCheckpointReader, StageCheckpointWriter, StateWriter,
20    StaticFileProviderFactory, StorageSettings, StorageSettingsCache, TrieWriter,
21};
22use reth_stages_types::{StageCheckpoint, StageId};
23use reth_static_file_types::StaticFileSegment;
24use reth_trie::{
25    prefix_set::{TriePrefixSets, TriePrefixSetsMut},
26    IntermediateStateRootState, Nibbles, StateRoot as StateRootComputer, StateRootProgress,
27};
28use reth_trie_db::DatabaseStateRoot;
29use serde::{Deserialize, Serialize};
30use std::io::BufRead;
31use tracing::{debug, error, info, trace};
32
33/// Default soft limit for number of bytes to read from state dump file, before inserting into
34/// database.
35///
36/// Default is 1 GB.
37pub const DEFAULT_SOFT_LIMIT_BYTE_LEN_ACCOUNTS_CHUNK: usize = 1_000_000_000;
38
39/// Approximate number of accounts per 1 GB of state dump file. One account is approximately 3.5 KB
40///
41/// Approximate is 285 228 accounts.
42//
43// (14.05 GB OP mainnet state dump at Bedrock block / 4 007 565 accounts in file > 3.5 KB per
44// account)
45pub const AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP: usize = 285_228;
46
47/// Soft limit for the number of flushed updates after which to log progress summary.
48const SOFT_LIMIT_COUNT_FLUSHED_UPDATES: usize = 1_000_000;
49
50/// Storage initialization error type.
51#[derive(Debug, thiserror::Error, Clone)]
52pub enum InitStorageError {
53    /// Genesis header found on static files but the database is empty.
54    #[error(
55        "static files found, but the database is uninitialized. If attempting to re-syncing, delete both."
56    )]
57    UninitializedDatabase,
58    /// An existing genesis block was found in the database, and its hash did not match the hash of
59    /// the chainspec.
60    #[error(
61        "genesis hash in the storage does not match the specified chainspec: chainspec is {chainspec_hash}, database is {storage_hash}"
62    )]
63    GenesisHashMismatch {
64        /// Expected genesis hash.
65        chainspec_hash: B256,
66        /// Actual genesis hash.
67        storage_hash: B256,
68    },
69    /// Provider error.
70    #[error(transparent)]
71    Provider(#[from] ProviderError),
72    /// State root error while computing the state root
73    #[error(transparent)]
74    StateRootError(#[from] StateRootError),
75    /// State root doesn't match the expected one.
76    #[error("state root mismatch: {_0}")]
77    StateRootMismatch(GotExpected<B256>),
78}
79
80impl From<DatabaseError> for InitStorageError {
81    fn from(error: DatabaseError) -> Self {
82        Self::Provider(ProviderError::Database(error))
83    }
84}
85
86/// Write the genesis block if it has not already been written
87pub fn init_genesis<PF>(factory: &PF) -> Result<B256, InitStorageError>
88where
89    PF: DatabaseProviderFactory
90        + StaticFileProviderFactory<Primitives: NodePrimitives<BlockHeader: Compact>>
91        + ChainSpecProvider
92        + StageCheckpointReader
93        + BlockHashReader
94        + StorageSettingsCache,
95    PF::ProviderRW: StaticFileProviderFactory<Primitives = PF::Primitives>
96        + StageCheckpointWriter
97        + HistoryWriter
98        + HeaderProvider
99        + HashingWriter
100        + StateWriter
101        + TrieWriter
102        + MetadataWriter
103        + AsRef<PF::ProviderRW>,
104    PF::ChainSpec: EthChainSpec<Header = <PF::Primitives as NodePrimitives>::BlockHeader>,
105{
106    init_genesis_with_settings(factory, StorageSettings::legacy())
107}
108
109/// Write the genesis block if it has not already been written with [`StorageSettings`].
110pub fn init_genesis_with_settings<PF>(
111    factory: &PF,
112    storage_settings: StorageSettings,
113) -> Result<B256, InitStorageError>
114where
115    PF: DatabaseProviderFactory
116        + StaticFileProviderFactory<Primitives: NodePrimitives<BlockHeader: Compact>>
117        + ChainSpecProvider
118        + StageCheckpointReader
119        + BlockHashReader
120        + StorageSettingsCache,
121    PF::ProviderRW: StaticFileProviderFactory<Primitives = PF::Primitives>
122        + StageCheckpointWriter
123        + HistoryWriter
124        + HeaderProvider
125        + HashingWriter
126        + StateWriter
127        + TrieWriter
128        + MetadataWriter
129        + AsRef<PF::ProviderRW>,
130    PF::ChainSpec: EthChainSpec<Header = <PF::Primitives as NodePrimitives>::BlockHeader>,
131{
132    let chain = factory.chain_spec();
133
134    let genesis = chain.genesis();
135    let hash = chain.genesis_hash();
136
137    // Check if we already have the genesis header or if we have the wrong one.
138    match factory.block_hash(0) {
139        Ok(None) | Err(ProviderError::MissingStaticFileBlock(StaticFileSegment::Headers, 0)) => {}
140        Ok(Some(block_hash)) => {
141            if block_hash == hash {
142                // Some users will at times attempt to re-sync from scratch by just deleting the
143                // database. Since `factory.block_hash` will only query the static files, we need to
144                // make sure that our database has been written to, and throw error if it's empty.
145                if factory.get_stage_checkpoint(StageId::Headers)?.is_none() {
146                    error!(target: "reth::storage", "Genesis header found on static files, but database is uninitialized.");
147                    return Err(InitStorageError::UninitializedDatabase)
148                }
149
150                debug!("Genesis already written, skipping.");
151                return Ok(hash)
152            }
153
154            return Err(InitStorageError::GenesisHashMismatch {
155                chainspec_hash: hash,
156                storage_hash: block_hash,
157            })
158        }
159        Err(e) => {
160            debug!(?e);
161            return Err(e.into());
162        }
163    }
164
165    debug!("Writing genesis block.");
166
167    let alloc = &genesis.alloc;
168
169    // use transaction to insert genesis header
170    let provider_rw = factory.database_provider_rw()?;
171    insert_genesis_hashes(&provider_rw, alloc.iter())?;
172    insert_genesis_history(&provider_rw, alloc.iter())?;
173
174    // Insert header
175    insert_genesis_header(&provider_rw, &chain)?;
176
177    insert_genesis_state(&provider_rw, alloc.iter())?;
178
179    // compute state root to populate trie tables
180    compute_state_root(&provider_rw, None)?;
181
182    // insert sync stage
183    for stage in StageId::ALL {
184        provider_rw.save_stage_checkpoint(stage, Default::default())?;
185    }
186
187    // Static file segments start empty, so we need to initialize the genesis block.
188    let static_file_provider = provider_rw.static_file_provider();
189    static_file_provider.latest_writer(StaticFileSegment::Receipts)?.increment_block(0)?;
190    static_file_provider.latest_writer(StaticFileSegment::Transactions)?.increment_block(0)?;
191
192    // Behaviour reserved only for new nodes should be set here.
193    provider_rw.write_storage_settings(storage_settings)?;
194
195    // `commit_unwind`` will first commit the DB and then the static file provider, which is
196    // necessary on `init_genesis`.
197    provider_rw.commit()?;
198    factory.set_storage_settings_cache(storage_settings);
199
200    Ok(hash)
201}
202
203/// Inserts the genesis state into the database.
204pub fn insert_genesis_state<'a, 'b, Provider>(
205    provider: &Provider,
206    alloc: impl Iterator<Item = (&'a Address, &'b GenesisAccount)>,
207) -> ProviderResult<()>
208where
209    Provider: StaticFileProviderFactory
210        + DBProvider<Tx: DbTxMut>
211        + HeaderProvider
212        + StateWriter
213        + AsRef<Provider>,
214{
215    insert_state(provider, alloc, 0)
216}
217
218/// Inserts state at given block into database.
219pub fn insert_state<'a, 'b, Provider>(
220    provider: &Provider,
221    alloc: impl Iterator<Item = (&'a Address, &'b GenesisAccount)>,
222    block: u64,
223) -> ProviderResult<()>
224where
225    Provider: StaticFileProviderFactory
226        + DBProvider<Tx: DbTxMut>
227        + HeaderProvider
228        + StateWriter
229        + AsRef<Provider>,
230{
231    let capacity = alloc.size_hint().1.unwrap_or(0);
232    let mut state_init: BundleStateInit =
233        HashMap::with_capacity_and_hasher(capacity, Default::default());
234    let mut reverts_init = HashMap::with_capacity_and_hasher(capacity, Default::default());
235    let mut contracts: HashMap<B256, Bytecode> =
236        HashMap::with_capacity_and_hasher(capacity, Default::default());
237
238    for (address, account) in alloc {
239        let bytecode_hash = if let Some(code) = &account.code {
240            match Bytecode::new_raw_checked(code.clone()) {
241                Ok(bytecode) => {
242                    let hash = bytecode.hash_slow();
243                    contracts.insert(hash, bytecode);
244                    Some(hash)
245                }
246                Err(err) => {
247                    error!(%address, %err, "Failed to decode genesis bytecode.");
248                    return Err(DatabaseError::Other(err.to_string()).into());
249                }
250            }
251        } else {
252            None
253        };
254
255        // get state
256        let storage = account
257            .storage
258            .as_ref()
259            .map(|m| {
260                m.iter()
261                    .map(|(key, value)| {
262                        let value = U256::from_be_bytes(value.0);
263                        (*key, (U256::ZERO, value))
264                    })
265                    .collect::<HashMap<_, _>>()
266            })
267            .unwrap_or_default();
268
269        reverts_init.insert(
270            *address,
271            (Some(None), storage.keys().map(|k| StorageEntry::new(*k, U256::ZERO)).collect()),
272        );
273
274        state_init.insert(
275            *address,
276            (
277                None,
278                Some(Account {
279                    nonce: account.nonce.unwrap_or_default(),
280                    balance: account.balance,
281                    bytecode_hash,
282                }),
283                storage,
284            ),
285        );
286    }
287    let all_reverts_init: RevertsInit = HashMap::from_iter([(block, reverts_init)]);
288
289    let execution_outcome = ExecutionOutcome::new_init(
290        state_init,
291        all_reverts_init,
292        contracts,
293        Vec::default(),
294        block,
295        Vec::new(),
296    );
297
298    provider.write_state(&execution_outcome, OriginalValuesKnown::Yes)?;
299
300    trace!(target: "reth::cli", "Inserted state");
301
302    Ok(())
303}
304
305/// Inserts hashes for the genesis state.
306pub fn insert_genesis_hashes<'a, 'b, Provider>(
307    provider: &Provider,
308    alloc: impl Iterator<Item = (&'a Address, &'b GenesisAccount)> + Clone,
309) -> ProviderResult<()>
310where
311    Provider: DBProvider<Tx: DbTxMut> + HashingWriter,
312{
313    // insert and hash accounts to hashing table
314    let alloc_accounts = alloc.clone().map(|(addr, account)| (*addr, Some(Account::from(account))));
315    provider.insert_account_for_hashing(alloc_accounts)?;
316
317    trace!(target: "reth::cli", "Inserted account hashes");
318
319    let alloc_storage = alloc.filter_map(|(addr, account)| {
320        // only return Some if there is storage
321        account.storage.as_ref().map(|storage| {
322            (*addr, storage.iter().map(|(&key, &value)| StorageEntry { key, value: value.into() }))
323        })
324    });
325    provider.insert_storage_for_hashing(alloc_storage)?;
326
327    trace!(target: "reth::cli", "Inserted storage hashes");
328
329    Ok(())
330}
331
332/// Inserts history indices for genesis accounts and storage.
333pub fn insert_genesis_history<'a, 'b, Provider>(
334    provider: &Provider,
335    alloc: impl Iterator<Item = (&'a Address, &'b GenesisAccount)> + Clone,
336) -> ProviderResult<()>
337where
338    Provider: DBProvider<Tx: DbTxMut> + HistoryWriter,
339{
340    insert_history(provider, alloc, 0)
341}
342
343/// Inserts history indices for genesis accounts and storage.
344pub fn insert_history<'a, 'b, Provider>(
345    provider: &Provider,
346    alloc: impl Iterator<Item = (&'a Address, &'b GenesisAccount)> + Clone,
347    block: u64,
348) -> ProviderResult<()>
349where
350    Provider: DBProvider<Tx: DbTxMut> + HistoryWriter,
351{
352    let account_transitions = alloc.clone().map(|(addr, _)| (*addr, [block]));
353    provider.insert_account_history_index(account_transitions)?;
354
355    trace!(target: "reth::cli", "Inserted account history");
356
357    let storage_transitions = alloc
358        .filter_map(|(addr, account)| account.storage.as_ref().map(|storage| (addr, storage)))
359        .flat_map(|(addr, storage)| storage.keys().map(|key| ((*addr, *key), [block])));
360    provider.insert_storage_history_index(storage_transitions)?;
361
362    trace!(target: "reth::cli", "Inserted storage history");
363
364    Ok(())
365}
366
367/// Inserts header for the genesis state.
368pub fn insert_genesis_header<Provider, Spec>(
369    provider: &Provider,
370    chain: &Spec,
371) -> ProviderResult<()>
372where
373    Provider: StaticFileProviderFactory<Primitives: NodePrimitives<BlockHeader: Compact>>
374        + DBProvider<Tx: DbTxMut>,
375    Spec: EthChainSpec<Header = <Provider::Primitives as NodePrimitives>::BlockHeader>,
376{
377    let (header, block_hash) = (chain.genesis_header(), chain.genesis_hash());
378    let static_file_provider = provider.static_file_provider();
379
380    match static_file_provider.block_hash(0) {
381        Ok(None) | Err(ProviderError::MissingStaticFileBlock(StaticFileSegment::Headers, 0)) => {
382            let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers)?;
383            writer.append_header(header, &block_hash)?;
384        }
385        Ok(Some(_)) => {}
386        Err(e) => return Err(e),
387    }
388
389    provider.tx_ref().put::<tables::HeaderNumbers>(block_hash, 0)?;
390    provider.tx_ref().put::<tables::BlockBodyIndices>(0, Default::default())?;
391
392    Ok(())
393}
394
395/// Reads account state from a [`BufRead`] reader and initializes it at the highest block that can
396/// be found on database.
397///
398/// It's similar to [`init_genesis`] but supports importing state too big to fit in memory, and can
399/// be set to the highest block present. One practical usecase is to import OP mainnet state at
400/// bedrock transition block.
401pub fn init_from_state_dump<Provider>(
402    mut reader: impl BufRead,
403    provider_rw: &Provider,
404    etl_config: EtlConfig,
405) -> eyre::Result<B256>
406where
407    Provider: StaticFileProviderFactory
408        + DBProvider<Tx: DbTxMut>
409        + BlockNumReader
410        + BlockHashReader
411        + ChainSpecProvider
412        + StageCheckpointWriter
413        + HistoryWriter
414        + HeaderProvider
415        + HashingWriter
416        + TrieWriter
417        + StateWriter
418        + AsRef<Provider>,
419{
420    if etl_config.file_size == 0 {
421        return Err(eyre::eyre!("ETL file size cannot be zero"))
422    }
423
424    let block = provider_rw.last_block_number()?;
425
426    let hash = provider_rw
427        .block_hash(block)?
428        .ok_or_else(|| eyre::eyre!("Block hash not found for block {}", block))?;
429    let header = provider_rw
430        .header_by_number(block)?
431        .map(SealedHeader::seal_slow)
432        .ok_or_else(|| ProviderError::HeaderNotFound(block.into()))?;
433
434    let expected_state_root = header.state_root();
435
436    // first line can be state root
437    let dump_state_root = parse_state_root(&mut reader)?;
438    if expected_state_root != dump_state_root {
439        error!(target: "reth::cli",
440            ?dump_state_root,
441            ?expected_state_root,
442            header=?header.num_hash(),
443            "State root from state dump does not match state root in current header."
444        );
445        return Err(InitStorageError::StateRootMismatch(GotExpected {
446            got: dump_state_root,
447            expected: expected_state_root,
448        })
449        .into())
450    }
451
452    debug!(target: "reth::cli",
453        block,
454        chain=%provider_rw.chain_spec().chain(),
455        "Initializing state at block"
456    );
457
458    // remaining lines are accounts
459    let collector = parse_accounts(&mut reader, etl_config)?;
460
461    // write state to db and collect prefix sets
462    let mut prefix_sets = TriePrefixSetsMut::default();
463    dump_state(collector, provider_rw, block, &mut prefix_sets)?;
464
465    info!(target: "reth::cli", "All accounts written to database, starting state root computation (may take some time)");
466
467    // compute and compare state root. this advances the stage checkpoints.
468    let computed_state_root = compute_state_root(provider_rw, Some(prefix_sets.freeze()))?;
469    if computed_state_root == expected_state_root {
470        info!(target: "reth::cli",
471            ?computed_state_root,
472            "Computed state root matches state root in state dump"
473        );
474    } else {
475        error!(target: "reth::cli",
476            ?computed_state_root,
477            ?expected_state_root,
478            "Computed state root does not match state root in state dump"
479        );
480
481        return Err(InitStorageError::StateRootMismatch(GotExpected {
482            got: computed_state_root,
483            expected: expected_state_root,
484        })
485        .into())
486    }
487
488    // insert sync stages for stages that require state
489    for stage in StageId::STATE_REQUIRED {
490        provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(block))?;
491    }
492
493    Ok(hash)
494}
495
496/// Parses and returns expected state root.
497fn parse_state_root(reader: &mut impl BufRead) -> eyre::Result<B256> {
498    let mut line = String::new();
499    reader.read_line(&mut line)?;
500
501    let expected_state_root = serde_json::from_str::<StateRoot>(&line)?.root;
502    trace!(target: "reth::cli",
503        root=%expected_state_root,
504        "Read state root from file"
505    );
506    Ok(expected_state_root)
507}
508
509/// Parses accounts and pushes them to a [`Collector`].
510fn parse_accounts(
511    mut reader: impl BufRead,
512    etl_config: EtlConfig,
513) -> Result<Collector<Address, GenesisAccount>, eyre::Error> {
514    let mut line = String::new();
515    let mut collector = Collector::new(etl_config.file_size, etl_config.dir);
516
517    loop {
518        let n = reader.read_line(&mut line)?;
519        if n == 0 {
520            break
521        }
522
523        let GenesisAccountWithAddress { genesis_account, address } = serde_json::from_str(&line)?;
524        collector.insert(address, genesis_account)?;
525
526        if !collector.is_empty() &&
527            collector.len().is_multiple_of(AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP)
528        {
529            info!(target: "reth::cli",
530                parsed_new_accounts=collector.len(),
531            );
532        }
533
534        line.clear();
535    }
536
537    Ok(collector)
538}
539
540/// Takes a [`Collector`] and processes all accounts.
541fn dump_state<Provider>(
542    mut collector: Collector<Address, GenesisAccount>,
543    provider_rw: &Provider,
544    block: u64,
545    prefix_sets: &mut TriePrefixSetsMut,
546) -> Result<(), eyre::Error>
547where
548    Provider: StaticFileProviderFactory
549        + DBProvider<Tx: DbTxMut>
550        + HeaderProvider
551        + HashingWriter
552        + HistoryWriter
553        + StateWriter
554        + AsRef<Provider>,
555{
556    let accounts_len = collector.len();
557    let mut accounts = Vec::with_capacity(AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP);
558    let mut total_inserted_accounts = 0;
559
560    for (index, entry) in collector.iter()?.enumerate() {
561        let (address, account) = entry?;
562        let (address, _) = Address::from_compact(address.as_slice(), address.len());
563        let (account, _) = GenesisAccount::from_compact(account.as_slice(), account.len());
564
565        // Add to prefix sets
566        let hashed_address = keccak256(address);
567        prefix_sets.account_prefix_set.insert(Nibbles::unpack(hashed_address));
568
569        // Add storage keys to prefix sets if storage exists
570        if let Some(ref storage) = account.storage {
571            for key in storage.keys() {
572                let hashed_key = keccak256(key);
573                prefix_sets
574                    .storage_prefix_sets
575                    .entry(hashed_address)
576                    .or_default()
577                    .insert(Nibbles::unpack(hashed_key));
578            }
579        }
580
581        accounts.push((address, account));
582
583        if (index > 0 && index.is_multiple_of(AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP)) ||
584            index == accounts_len - 1
585        {
586            total_inserted_accounts += accounts.len();
587
588            info!(target: "reth::cli",
589                total_inserted_accounts,
590                "Writing accounts to db"
591            );
592
593            // use transaction to insert genesis header
594            insert_genesis_hashes(
595                provider_rw,
596                accounts.iter().map(|(address, account)| (address, account)),
597            )?;
598
599            insert_history(
600                provider_rw,
601                accounts.iter().map(|(address, account)| (address, account)),
602                block,
603            )?;
604
605            // block is already written to static files
606            insert_state(
607                provider_rw,
608                accounts.iter().map(|(address, account)| (address, account)),
609                block,
610            )?;
611
612            accounts.clear();
613        }
614    }
615    Ok(())
616}
617
618/// Computes the state root (from scratch) based on the accounts and storages present in the
619/// database.
620fn compute_state_root<Provider>(
621    provider: &Provider,
622    prefix_sets: Option<TriePrefixSets>,
623) -> Result<B256, InitStorageError>
624where
625    Provider: DBProvider<Tx: DbTxMut> + TrieWriter,
626{
627    trace!(target: "reth::cli", "Computing state root");
628
629    let tx = provider.tx_ref();
630    let mut intermediate_state: Option<IntermediateStateRootState> = None;
631    let mut total_flushed_updates = 0;
632
633    loop {
634        let mut state_root =
635            StateRootComputer::from_tx(tx).with_intermediate_state(intermediate_state);
636
637        if let Some(sets) = prefix_sets.clone() {
638            state_root = state_root.with_prefix_sets(sets);
639        }
640
641        match state_root.root_with_progress()? {
642            StateRootProgress::Progress(state, _, updates) => {
643                let updated_len = provider.write_trie_updates(updates)?;
644                total_flushed_updates += updated_len;
645
646                trace!(target: "reth::cli",
647                    last_account_key = %state.account_root_state.last_hashed_key,
648                    updated_len,
649                    total_flushed_updates,
650                    "Flushing trie updates"
651                );
652
653                intermediate_state = Some(*state);
654
655                if total_flushed_updates.is_multiple_of(SOFT_LIMIT_COUNT_FLUSHED_UPDATES) {
656                    info!(target: "reth::cli",
657                        total_flushed_updates,
658                        "Flushing trie updates"
659                    );
660                }
661            }
662            StateRootProgress::Complete(root, _, updates) => {
663                let updated_len = provider.write_trie_updates(updates)?;
664                total_flushed_updates += updated_len;
665
666                trace!(target: "reth::cli",
667                    %root,
668                    updated_len,
669                    total_flushed_updates,
670                    "State root has been computed"
671                );
672
673                return Ok(root)
674            }
675        }
676    }
677}
678
679/// Type to deserialize state root from state dump file.
680#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
681struct StateRoot {
682    root: B256,
683}
684
685/// An account as in the state dump file. This contains a [`GenesisAccount`] and the account's
686/// address.
687#[derive(Debug, Serialize, Deserialize)]
688struct GenesisAccountWithAddress {
689    /// The account's balance, nonce, code, and storage.
690    #[serde(flatten)]
691    genesis_account: GenesisAccount,
692    /// The account's address.
693    address: Address,
694}
695
696#[cfg(test)]
697mod tests {
698    use super::*;
699    use alloy_consensus::constants::{
700        HOLESKY_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH,
701    };
702    use alloy_genesis::Genesis;
703    use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET, SEPOLIA};
704    use reth_db::DatabaseEnv;
705    use reth_db_api::{
706        cursor::DbCursorRO,
707        models::{storage_sharded_key::StorageShardedKey, IntegerList, ShardedKey},
708        table::{Table, TableRow},
709        transaction::DbTx,
710        Database,
711    };
712    use reth_provider::{
713        test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB},
714        ProviderFactory,
715    };
716    use std::{collections::BTreeMap, sync::Arc};
717
718    fn collect_table_entries<DB, T>(
719        tx: &<DB as Database>::TX,
720    ) -> Result<Vec<TableRow<T>>, InitStorageError>
721    where
722        DB: Database,
723        T: Table,
724    {
725        Ok(tx.cursor_read::<T>()?.walk_range(..)?.collect::<Result<Vec<_>, _>>()?)
726    }
727
728    #[test]
729    fn success_init_genesis_mainnet() {
730        let genesis_hash =
731            init_genesis(&create_test_provider_factory_with_chain_spec(MAINNET.clone())).unwrap();
732
733        // actual, expected
734        assert_eq!(genesis_hash, MAINNET_GENESIS_HASH);
735    }
736
737    #[test]
738    fn success_init_genesis_sepolia() {
739        let genesis_hash =
740            init_genesis(&create_test_provider_factory_with_chain_spec(SEPOLIA.clone())).unwrap();
741
742        // actual, expected
743        assert_eq!(genesis_hash, SEPOLIA_GENESIS_HASH);
744    }
745
746    #[test]
747    fn success_init_genesis_holesky() {
748        let genesis_hash =
749            init_genesis(&create_test_provider_factory_with_chain_spec(HOLESKY.clone())).unwrap();
750
751        // actual, expected
752        assert_eq!(genesis_hash, HOLESKY_GENESIS_HASH);
753    }
754
755    #[test]
756    fn fail_init_inconsistent_db() {
757        let factory = create_test_provider_factory_with_chain_spec(SEPOLIA.clone());
758        let static_file_provider = factory.static_file_provider();
759        init_genesis(&factory).unwrap();
760
761        // Try to init db with a different genesis block
762        let genesis_hash = init_genesis(
763            &ProviderFactory::<MockNodeTypesWithDB>::new(
764                factory.into_db(),
765                MAINNET.clone(),
766                static_file_provider,
767            )
768            .unwrap(),
769        );
770
771        assert!(matches!(
772            genesis_hash.unwrap_err(),
773            InitStorageError::GenesisHashMismatch {
774                chainspec_hash: MAINNET_GENESIS_HASH,
775                storage_hash: SEPOLIA_GENESIS_HASH
776            }
777        ))
778    }
779
780    #[test]
781    fn init_genesis_history() {
782        let address_with_balance = Address::with_last_byte(1);
783        let address_with_storage = Address::with_last_byte(2);
784        let storage_key = B256::with_last_byte(1);
785        let chain_spec = Arc::new(ChainSpec {
786            chain: Chain::from_id(1),
787            genesis: Genesis {
788                alloc: BTreeMap::from([
789                    (
790                        address_with_balance,
791                        GenesisAccount { balance: U256::from(1), ..Default::default() },
792                    ),
793                    (
794                        address_with_storage,
795                        GenesisAccount {
796                            storage: Some(BTreeMap::from([(storage_key, B256::random())])),
797                            ..Default::default()
798                        },
799                    ),
800                ]),
801                ..Default::default()
802            },
803            hardforks: Default::default(),
804            paris_block_and_final_difficulty: None,
805            deposit_contract: None,
806            ..Default::default()
807        });
808
809        let factory = create_test_provider_factory_with_chain_spec(chain_spec);
810        init_genesis(&factory).unwrap();
811
812        let provider = factory.provider().unwrap();
813
814        let tx = provider.tx_ref();
815
816        assert_eq!(
817            collect_table_entries::<Arc<DatabaseEnv>, tables::AccountsHistory>(tx)
818                .expect("failed to collect"),
819            vec![
820                (ShardedKey::new(address_with_balance, u64::MAX), IntegerList::new([0]).unwrap()),
821                (ShardedKey::new(address_with_storage, u64::MAX), IntegerList::new([0]).unwrap())
822            ],
823        );
824
825        assert_eq!(
826            collect_table_entries::<Arc<DatabaseEnv>, tables::StoragesHistory>(tx)
827                .expect("failed to collect"),
828            vec![(
829                StorageShardedKey::new(address_with_storage, storage_key, u64::MAX),
830                IntegerList::new([0]).unwrap()
831            )],
832        );
833    }
834}