reth_db_common/
init.rs

1//! Reth genesis initialization utility functions.
2
3use alloy_consensus::BlockHeader;
4use alloy_genesis::GenesisAccount;
5use alloy_primitives::{keccak256, map::HashMap, Address, B256, U256};
6use reth_chainspec::EthChainSpec;
7use reth_codecs::Compact;
8use reth_config::config::EtlConfig;
9use reth_db_api::{tables, transaction::DbTxMut, DatabaseError};
10use reth_etl::Collector;
11use reth_execution_errors::StateRootError;
12use reth_primitives_traits::{
13    Account, Bytecode, GotExpected, NodePrimitives, SealedHeader, StorageEntry,
14};
15use reth_provider::{
16    errors::provider::ProviderResult, providers::StaticFileWriter, BlockHashReader, BlockNumReader,
17    BundleStateInit, ChainSpecProvider, DBProvider, DatabaseProviderFactory, ExecutionOutcome,
18    HashingWriter, HeaderProvider, HistoryWriter, OriginalValuesKnown, ProviderError, RevertsInit,
19    StageCheckpointReader, StageCheckpointWriter, StateWriter, StaticFileProviderFactory,
20    TrieWriter,
21};
22use reth_stages_types::{StageCheckpoint, StageId};
23use reth_static_file_types::StaticFileSegment;
24use reth_trie::{
25    prefix_set::{TriePrefixSets, TriePrefixSetsMut},
26    IntermediateStateRootState, Nibbles, StateRoot as StateRootComputer, StateRootProgress,
27};
28use reth_trie_db::DatabaseStateRoot;
29use serde::{Deserialize, Serialize};
30use std::io::BufRead;
31use tracing::{debug, error, info, trace};
32
33/// Default soft limit for number of bytes to read from state dump file, before inserting into
34/// database.
35///
36/// Default is 1 GB.
37pub const DEFAULT_SOFT_LIMIT_BYTE_LEN_ACCOUNTS_CHUNK: usize = 1_000_000_000;
38
39/// Approximate number of accounts per 1 GB of state dump file. One account is approximately 3.5 KB
40///
41/// Approximate is 285 228 accounts.
42//
43// (14.05 GB OP mainnet state dump at Bedrock block / 4 007 565 accounts in file > 3.5 KB per
44// account)
45pub const AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP: usize = 285_228;
46
47/// Soft limit for the number of flushed updates after which to log progress summary.
48const SOFT_LIMIT_COUNT_FLUSHED_UPDATES: usize = 1_000_000;
49
50/// Storage initialization error type.
51#[derive(Debug, thiserror::Error, Clone)]
52pub enum InitStorageError {
53    /// Genesis header found on static files but the database is empty.
54    #[error(
55        "static files found, but the database is uninitialized. If attempting to re-syncing, delete both."
56    )]
57    UninitializedDatabase,
58    /// An existing genesis block was found in the database, and its hash did not match the hash of
59    /// the chainspec.
60    #[error(
61        "genesis hash in the storage does not match the specified chainspec: chainspec is {chainspec_hash}, database is {storage_hash}"
62    )]
63    GenesisHashMismatch {
64        /// Expected genesis hash.
65        chainspec_hash: B256,
66        /// Actual genesis hash.
67        storage_hash: B256,
68    },
69    /// Provider error.
70    #[error(transparent)]
71    Provider(#[from] ProviderError),
72    /// State root error while computing the state root
73    #[error(transparent)]
74    StateRootError(#[from] StateRootError),
75    /// State root doesn't match the expected one.
76    #[error("state root mismatch: {_0}")]
77    StateRootMismatch(GotExpected<B256>),
78}
79
80impl From<DatabaseError> for InitStorageError {
81    fn from(error: DatabaseError) -> Self {
82        Self::Provider(ProviderError::Database(error))
83    }
84}
85
86/// Write the genesis block if it has not already been written
87pub fn init_genesis<PF>(factory: &PF) -> Result<B256, InitStorageError>
88where
89    PF: DatabaseProviderFactory
90        + StaticFileProviderFactory<Primitives: NodePrimitives<BlockHeader: Compact>>
91        + ChainSpecProvider
92        + StageCheckpointReader
93        + BlockHashReader,
94    PF::ProviderRW: StaticFileProviderFactory<Primitives = PF::Primitives>
95        + StageCheckpointWriter
96        + HistoryWriter
97        + HeaderProvider
98        + HashingWriter
99        + StateWriter
100        + TrieWriter
101        + AsRef<PF::ProviderRW>,
102    PF::ChainSpec: EthChainSpec<Header = <PF::Primitives as NodePrimitives>::BlockHeader>,
103{
104    let chain = factory.chain_spec();
105
106    let genesis = chain.genesis();
107    let hash = chain.genesis_hash();
108
109    // Check if we already have the genesis header or if we have the wrong one.
110    match factory.block_hash(0) {
111        Ok(None) | Err(ProviderError::MissingStaticFileBlock(StaticFileSegment::Headers, 0)) => {}
112        Ok(Some(block_hash)) => {
113            if block_hash == hash {
114                // Some users will at times attempt to re-sync from scratch by just deleting the
115                // database. Since `factory.block_hash` will only query the static files, we need to
116                // make sure that our database has been written to, and throw error if it's empty.
117                if factory.get_stage_checkpoint(StageId::Headers)?.is_none() {
118                    error!(target: "reth::storage", "Genesis header found on static files, but database is uninitialized.");
119                    return Err(InitStorageError::UninitializedDatabase)
120                }
121
122                debug!("Genesis already written, skipping.");
123                return Ok(hash)
124            }
125
126            return Err(InitStorageError::GenesisHashMismatch {
127                chainspec_hash: hash,
128                storage_hash: block_hash,
129            })
130        }
131        Err(e) => {
132            debug!(?e);
133            return Err(e.into());
134        }
135    }
136
137    debug!("Writing genesis block.");
138
139    let alloc = &genesis.alloc;
140
141    // use transaction to insert genesis header
142    let provider_rw = factory.database_provider_rw()?;
143    insert_genesis_hashes(&provider_rw, alloc.iter())?;
144    insert_genesis_history(&provider_rw, alloc.iter())?;
145
146    // Insert header
147    insert_genesis_header(&provider_rw, &chain)?;
148
149    insert_genesis_state(&provider_rw, alloc.iter())?;
150
151    // compute state root to populate trie tables
152    compute_state_root(&provider_rw, None)?;
153
154    // insert sync stage
155    for stage in StageId::ALL {
156        provider_rw.save_stage_checkpoint(stage, Default::default())?;
157    }
158
159    // Static file segments start empty, so we need to initialize the genesis block.
160    let static_file_provider = provider_rw.static_file_provider();
161    static_file_provider.latest_writer(StaticFileSegment::Receipts)?.increment_block(0)?;
162    static_file_provider.latest_writer(StaticFileSegment::Transactions)?.increment_block(0)?;
163
164    // `commit_unwind`` will first commit the DB and then the static file provider, which is
165    // necessary on `init_genesis`.
166    provider_rw.commit()?;
167
168    Ok(hash)
169}
170
171/// Inserts the genesis state into the database.
172pub fn insert_genesis_state<'a, 'b, Provider>(
173    provider: &Provider,
174    alloc: impl Iterator<Item = (&'a Address, &'b GenesisAccount)>,
175) -> ProviderResult<()>
176where
177    Provider: StaticFileProviderFactory
178        + DBProvider<Tx: DbTxMut>
179        + HeaderProvider
180        + StateWriter
181        + AsRef<Provider>,
182{
183    insert_state(provider, alloc, 0)
184}
185
186/// Inserts state at given block into database.
187pub fn insert_state<'a, 'b, Provider>(
188    provider: &Provider,
189    alloc: impl Iterator<Item = (&'a Address, &'b GenesisAccount)>,
190    block: u64,
191) -> ProviderResult<()>
192where
193    Provider: StaticFileProviderFactory
194        + DBProvider<Tx: DbTxMut>
195        + HeaderProvider
196        + StateWriter
197        + AsRef<Provider>,
198{
199    let capacity = alloc.size_hint().1.unwrap_or(0);
200    let mut state_init: BundleStateInit =
201        HashMap::with_capacity_and_hasher(capacity, Default::default());
202    let mut reverts_init = HashMap::with_capacity_and_hasher(capacity, Default::default());
203    let mut contracts: HashMap<B256, Bytecode> =
204        HashMap::with_capacity_and_hasher(capacity, Default::default());
205
206    for (address, account) in alloc {
207        let bytecode_hash = if let Some(code) = &account.code {
208            match Bytecode::new_raw_checked(code.clone()) {
209                Ok(bytecode) => {
210                    let hash = bytecode.hash_slow();
211                    contracts.insert(hash, bytecode);
212                    Some(hash)
213                }
214                Err(err) => {
215                    error!(%address, %err, "Failed to decode genesis bytecode.");
216                    return Err(DatabaseError::Other(err.to_string()).into());
217                }
218            }
219        } else {
220            None
221        };
222
223        // get state
224        let storage = account
225            .storage
226            .as_ref()
227            .map(|m| {
228                m.iter()
229                    .map(|(key, value)| {
230                        let value = U256::from_be_bytes(value.0);
231                        (*key, (U256::ZERO, value))
232                    })
233                    .collect::<HashMap<_, _>>()
234            })
235            .unwrap_or_default();
236
237        reverts_init.insert(
238            *address,
239            (Some(None), storage.keys().map(|k| StorageEntry::new(*k, U256::ZERO)).collect()),
240        );
241
242        state_init.insert(
243            *address,
244            (
245                None,
246                Some(Account {
247                    nonce: account.nonce.unwrap_or_default(),
248                    balance: account.balance,
249                    bytecode_hash,
250                }),
251                storage,
252            ),
253        );
254    }
255    let all_reverts_init: RevertsInit = HashMap::from_iter([(block, reverts_init)]);
256
257    let execution_outcome = ExecutionOutcome::new_init(
258        state_init,
259        all_reverts_init,
260        contracts,
261        Vec::default(),
262        block,
263        Vec::new(),
264    );
265
266    provider.write_state(&execution_outcome, OriginalValuesKnown::Yes)?;
267
268    trace!(target: "reth::cli", "Inserted state");
269
270    Ok(())
271}
272
273/// Inserts hashes for the genesis state.
274pub fn insert_genesis_hashes<'a, 'b, Provider>(
275    provider: &Provider,
276    alloc: impl Iterator<Item = (&'a Address, &'b GenesisAccount)> + Clone,
277) -> ProviderResult<()>
278where
279    Provider: DBProvider<Tx: DbTxMut> + HashingWriter,
280{
281    // insert and hash accounts to hashing table
282    let alloc_accounts = alloc.clone().map(|(addr, account)| (*addr, Some(Account::from(account))));
283    provider.insert_account_for_hashing(alloc_accounts)?;
284
285    trace!(target: "reth::cli", "Inserted account hashes");
286
287    let alloc_storage = alloc.filter_map(|(addr, account)| {
288        // only return Some if there is storage
289        account.storage.as_ref().map(|storage| {
290            (*addr, storage.iter().map(|(&key, &value)| StorageEntry { key, value: value.into() }))
291        })
292    });
293    provider.insert_storage_for_hashing(alloc_storage)?;
294
295    trace!(target: "reth::cli", "Inserted storage hashes");
296
297    Ok(())
298}
299
300/// Inserts history indices for genesis accounts and storage.
301pub fn insert_genesis_history<'a, 'b, Provider>(
302    provider: &Provider,
303    alloc: impl Iterator<Item = (&'a Address, &'b GenesisAccount)> + Clone,
304) -> ProviderResult<()>
305where
306    Provider: DBProvider<Tx: DbTxMut> + HistoryWriter,
307{
308    insert_history(provider, alloc, 0)
309}
310
311/// Inserts history indices for genesis accounts and storage.
312pub fn insert_history<'a, 'b, Provider>(
313    provider: &Provider,
314    alloc: impl Iterator<Item = (&'a Address, &'b GenesisAccount)> + Clone,
315    block: u64,
316) -> ProviderResult<()>
317where
318    Provider: DBProvider<Tx: DbTxMut> + HistoryWriter,
319{
320    let account_transitions = alloc.clone().map(|(addr, _)| (*addr, [block]));
321    provider.insert_account_history_index(account_transitions)?;
322
323    trace!(target: "reth::cli", "Inserted account history");
324
325    let storage_transitions = alloc
326        .filter_map(|(addr, account)| account.storage.as_ref().map(|storage| (addr, storage)))
327        .flat_map(|(addr, storage)| storage.keys().map(|key| ((*addr, *key), [block])));
328    provider.insert_storage_history_index(storage_transitions)?;
329
330    trace!(target: "reth::cli", "Inserted storage history");
331
332    Ok(())
333}
334
335/// Inserts header for the genesis state.
336pub fn insert_genesis_header<Provider, Spec>(
337    provider: &Provider,
338    chain: &Spec,
339) -> ProviderResult<()>
340where
341    Provider: StaticFileProviderFactory<Primitives: NodePrimitives<BlockHeader: Compact>>
342        + DBProvider<Tx: DbTxMut>,
343    Spec: EthChainSpec<Header = <Provider::Primitives as NodePrimitives>::BlockHeader>,
344{
345    let (header, block_hash) = (chain.genesis_header(), chain.genesis_hash());
346    let static_file_provider = provider.static_file_provider();
347
348    match static_file_provider.block_hash(0) {
349        Ok(None) | Err(ProviderError::MissingStaticFileBlock(StaticFileSegment::Headers, 0)) => {
350            let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers)?;
351            writer.append_header(header, &block_hash)?;
352        }
353        Ok(Some(_)) => {}
354        Err(e) => return Err(e),
355    }
356
357    provider.tx_ref().put::<tables::HeaderNumbers>(block_hash, 0)?;
358    provider.tx_ref().put::<tables::BlockBodyIndices>(0, Default::default())?;
359
360    Ok(())
361}
362
363/// Reads account state from a [`BufRead`] reader and initializes it at the highest block that can
364/// be found on database.
365///
366/// It's similar to [`init_genesis`] but supports importing state too big to fit in memory, and can
367/// be set to the highest block present. One practical usecase is to import OP mainnet state at
368/// bedrock transition block.
369pub fn init_from_state_dump<Provider>(
370    mut reader: impl BufRead,
371    provider_rw: &Provider,
372    etl_config: EtlConfig,
373) -> eyre::Result<B256>
374where
375    Provider: StaticFileProviderFactory
376        + DBProvider<Tx: DbTxMut>
377        + BlockNumReader
378        + BlockHashReader
379        + ChainSpecProvider
380        + StageCheckpointWriter
381        + HistoryWriter
382        + HeaderProvider
383        + HashingWriter
384        + TrieWriter
385        + StateWriter
386        + AsRef<Provider>,
387{
388    if etl_config.file_size == 0 {
389        return Err(eyre::eyre!("ETL file size cannot be zero"))
390    }
391
392    let block = provider_rw.last_block_number()?;
393
394    let hash = provider_rw
395        .block_hash(block)?
396        .ok_or_else(|| eyre::eyre!("Block hash not found for block {}", block))?;
397    let header = provider_rw
398        .header_by_number(block)?
399        .map(SealedHeader::seal_slow)
400        .ok_or_else(|| ProviderError::HeaderNotFound(block.into()))?;
401
402    let expected_state_root = header.state_root();
403
404    // first line can be state root
405    let dump_state_root = parse_state_root(&mut reader)?;
406    if expected_state_root != dump_state_root {
407        error!(target: "reth::cli",
408            ?dump_state_root,
409            ?expected_state_root,
410            header=?header.num_hash(),
411            "State root from state dump does not match state root in current header."
412        );
413        return Err(InitStorageError::StateRootMismatch(GotExpected {
414            got: dump_state_root,
415            expected: expected_state_root,
416        })
417        .into())
418    }
419
420    debug!(target: "reth::cli",
421        block,
422        chain=%provider_rw.chain_spec().chain(),
423        "Initializing state at block"
424    );
425
426    // remaining lines are accounts
427    let collector = parse_accounts(&mut reader, etl_config)?;
428
429    // write state to db and collect prefix sets
430    let mut prefix_sets = TriePrefixSetsMut::default();
431    dump_state(collector, provider_rw, block, &mut prefix_sets)?;
432
433    info!(target: "reth::cli", "All accounts written to database, starting state root computation (may take some time)");
434
435    // compute and compare state root. this advances the stage checkpoints.
436    let computed_state_root = compute_state_root(provider_rw, Some(prefix_sets.freeze()))?;
437    if computed_state_root == expected_state_root {
438        info!(target: "reth::cli",
439            ?computed_state_root,
440            "Computed state root matches state root in state dump"
441        );
442    } else {
443        error!(target: "reth::cli",
444            ?computed_state_root,
445            ?expected_state_root,
446            "Computed state root does not match state root in state dump"
447        );
448
449        return Err(InitStorageError::StateRootMismatch(GotExpected {
450            got: computed_state_root,
451            expected: expected_state_root,
452        })
453        .into())
454    }
455
456    // insert sync stages for stages that require state
457    for stage in StageId::STATE_REQUIRED {
458        provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(block))?;
459    }
460
461    Ok(hash)
462}
463
464/// Parses and returns expected state root.
465fn parse_state_root(reader: &mut impl BufRead) -> eyre::Result<B256> {
466    let mut line = String::new();
467    reader.read_line(&mut line)?;
468
469    let expected_state_root = serde_json::from_str::<StateRoot>(&line)?.root;
470    trace!(target: "reth::cli",
471        root=%expected_state_root,
472        "Read state root from file"
473    );
474    Ok(expected_state_root)
475}
476
477/// Parses accounts and pushes them to a [`Collector`].
478fn parse_accounts(
479    mut reader: impl BufRead,
480    etl_config: EtlConfig,
481) -> Result<Collector<Address, GenesisAccount>, eyre::Error> {
482    let mut line = String::new();
483    let mut collector = Collector::new(etl_config.file_size, etl_config.dir);
484
485    while let Ok(n) = reader.read_line(&mut line) {
486        if n == 0 {
487            break
488        }
489
490        let GenesisAccountWithAddress { genesis_account, address } = serde_json::from_str(&line)?;
491        collector.insert(address, genesis_account)?;
492
493        if !collector.is_empty() &&
494            collector.len().is_multiple_of(AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP)
495        {
496            info!(target: "reth::cli",
497                parsed_new_accounts=collector.len(),
498            );
499        }
500
501        line.clear();
502    }
503
504    Ok(collector)
505}
506
507/// Takes a [`Collector`] and processes all accounts.
508fn dump_state<Provider>(
509    mut collector: Collector<Address, GenesisAccount>,
510    provider_rw: &Provider,
511    block: u64,
512    prefix_sets: &mut TriePrefixSetsMut,
513) -> Result<(), eyre::Error>
514where
515    Provider: StaticFileProviderFactory
516        + DBProvider<Tx: DbTxMut>
517        + HeaderProvider
518        + HashingWriter
519        + HistoryWriter
520        + StateWriter
521        + AsRef<Provider>,
522{
523    let accounts_len = collector.len();
524    let mut accounts = Vec::with_capacity(AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP);
525    let mut total_inserted_accounts = 0;
526
527    for (index, entry) in collector.iter()?.enumerate() {
528        let (address, account) = entry?;
529        let (address, _) = Address::from_compact(address.as_slice(), address.len());
530        let (account, _) = GenesisAccount::from_compact(account.as_slice(), account.len());
531
532        // Add to prefix sets
533        let hashed_address = keccak256(address);
534        prefix_sets.account_prefix_set.insert(Nibbles::unpack(hashed_address));
535
536        // Add storage keys to prefix sets if storage exists
537        if let Some(ref storage) = account.storage {
538            for key in storage.keys() {
539                let hashed_key = keccak256(key);
540                prefix_sets
541                    .storage_prefix_sets
542                    .entry(hashed_address)
543                    .or_default()
544                    .insert(Nibbles::unpack(hashed_key));
545            }
546        }
547
548        accounts.push((address, account));
549
550        if (index > 0 && index.is_multiple_of(AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP)) ||
551            index == accounts_len - 1
552        {
553            total_inserted_accounts += accounts.len();
554
555            info!(target: "reth::cli",
556                total_inserted_accounts,
557                "Writing accounts to db"
558            );
559
560            // use transaction to insert genesis header
561            insert_genesis_hashes(
562                provider_rw,
563                accounts.iter().map(|(address, account)| (address, account)),
564            )?;
565
566            insert_history(
567                provider_rw,
568                accounts.iter().map(|(address, account)| (address, account)),
569                block,
570            )?;
571
572            // block is already written to static files
573            insert_state(
574                provider_rw,
575                accounts.iter().map(|(address, account)| (address, account)),
576                block,
577            )?;
578
579            accounts.clear();
580        }
581    }
582    Ok(())
583}
584
585/// Computes the state root (from scratch) based on the accounts and storages present in the
586/// database.
587fn compute_state_root<Provider>(
588    provider: &Provider,
589    prefix_sets: Option<TriePrefixSets>,
590) -> Result<B256, InitStorageError>
591where
592    Provider: DBProvider<Tx: DbTxMut> + TrieWriter,
593{
594    trace!(target: "reth::cli", "Computing state root");
595
596    let tx = provider.tx_ref();
597    let mut intermediate_state: Option<IntermediateStateRootState> = None;
598    let mut total_flushed_updates = 0;
599
600    loop {
601        let mut state_root =
602            StateRootComputer::from_tx(tx).with_intermediate_state(intermediate_state);
603
604        if let Some(sets) = prefix_sets.clone() {
605            state_root = state_root.with_prefix_sets(sets);
606        }
607
608        match state_root.root_with_progress()? {
609            StateRootProgress::Progress(state, _, updates) => {
610                let updated_len = provider.write_trie_updates(updates)?;
611                total_flushed_updates += updated_len;
612
613                trace!(target: "reth::cli",
614                    last_account_key = %state.account_root_state.last_hashed_key,
615                    updated_len,
616                    total_flushed_updates,
617                    "Flushing trie updates"
618                );
619
620                intermediate_state = Some(*state);
621
622                if total_flushed_updates.is_multiple_of(SOFT_LIMIT_COUNT_FLUSHED_UPDATES) {
623                    info!(target: "reth::cli",
624                        total_flushed_updates,
625                        "Flushing trie updates"
626                    );
627                }
628            }
629            StateRootProgress::Complete(root, _, updates) => {
630                let updated_len = provider.write_trie_updates(updates)?;
631                total_flushed_updates += updated_len;
632
633                trace!(target: "reth::cli",
634                    %root,
635                    updated_len,
636                    total_flushed_updates,
637                    "State root has been computed"
638                );
639
640                return Ok(root)
641            }
642        }
643    }
644}
645
646/// Type to deserialize state root from state dump file.
647#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
648struct StateRoot {
649    root: B256,
650}
651
652/// An account as in the state dump file. This contains a [`GenesisAccount`] and the account's
653/// address.
654#[derive(Debug, Serialize, Deserialize)]
655struct GenesisAccountWithAddress {
656    /// The account's balance, nonce, code, and storage.
657    #[serde(flatten)]
658    genesis_account: GenesisAccount,
659    /// The account's address.
660    address: Address,
661}
662
663#[cfg(test)]
664mod tests {
665    use super::*;
666    use alloy_consensus::constants::{
667        HOLESKY_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH,
668    };
669    use alloy_genesis::Genesis;
670    use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET, SEPOLIA};
671    use reth_db::DatabaseEnv;
672    use reth_db_api::{
673        cursor::DbCursorRO,
674        models::{storage_sharded_key::StorageShardedKey, IntegerList, ShardedKey},
675        table::{Table, TableRow},
676        transaction::DbTx,
677        Database,
678    };
679    use reth_provider::{
680        test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB},
681        ProviderFactory,
682    };
683    use std::{collections::BTreeMap, sync::Arc};
684
685    fn collect_table_entries<DB, T>(
686        tx: &<DB as Database>::TX,
687    ) -> Result<Vec<TableRow<T>>, InitStorageError>
688    where
689        DB: Database,
690        T: Table,
691    {
692        Ok(tx.cursor_read::<T>()?.walk_range(..)?.collect::<Result<Vec<_>, _>>()?)
693    }
694
695    #[test]
696    fn success_init_genesis_mainnet() {
697        let genesis_hash =
698            init_genesis(&create_test_provider_factory_with_chain_spec(MAINNET.clone())).unwrap();
699
700        // actual, expected
701        assert_eq!(genesis_hash, MAINNET_GENESIS_HASH);
702    }
703
704    #[test]
705    fn success_init_genesis_sepolia() {
706        let genesis_hash =
707            init_genesis(&create_test_provider_factory_with_chain_spec(SEPOLIA.clone())).unwrap();
708
709        // actual, expected
710        assert_eq!(genesis_hash, SEPOLIA_GENESIS_HASH);
711    }
712
713    #[test]
714    fn success_init_genesis_holesky() {
715        let genesis_hash =
716            init_genesis(&create_test_provider_factory_with_chain_spec(HOLESKY.clone())).unwrap();
717
718        // actual, expected
719        assert_eq!(genesis_hash, HOLESKY_GENESIS_HASH);
720    }
721
722    #[test]
723    fn fail_init_inconsistent_db() {
724        let factory = create_test_provider_factory_with_chain_spec(SEPOLIA.clone());
725        let static_file_provider = factory.static_file_provider();
726        init_genesis(&factory).unwrap();
727
728        // Try to init db with a different genesis block
729        let genesis_hash = init_genesis(&ProviderFactory::<MockNodeTypesWithDB>::new(
730            factory.into_db(),
731            MAINNET.clone(),
732            static_file_provider,
733        ));
734
735        assert!(matches!(
736            genesis_hash.unwrap_err(),
737            InitStorageError::GenesisHashMismatch {
738                chainspec_hash: MAINNET_GENESIS_HASH,
739                storage_hash: SEPOLIA_GENESIS_HASH
740            }
741        ))
742    }
743
744    #[test]
745    fn init_genesis_history() {
746        let address_with_balance = Address::with_last_byte(1);
747        let address_with_storage = Address::with_last_byte(2);
748        let storage_key = B256::with_last_byte(1);
749        let chain_spec = Arc::new(ChainSpec {
750            chain: Chain::from_id(1),
751            genesis: Genesis {
752                alloc: BTreeMap::from([
753                    (
754                        address_with_balance,
755                        GenesisAccount { balance: U256::from(1), ..Default::default() },
756                    ),
757                    (
758                        address_with_storage,
759                        GenesisAccount {
760                            storage: Some(BTreeMap::from([(storage_key, B256::random())])),
761                            ..Default::default()
762                        },
763                    ),
764                ]),
765                ..Default::default()
766            },
767            hardforks: Default::default(),
768            paris_block_and_final_difficulty: None,
769            deposit_contract: None,
770            ..Default::default()
771        });
772
773        let factory = create_test_provider_factory_with_chain_spec(chain_spec);
774        init_genesis(&factory).unwrap();
775
776        let provider = factory.provider().unwrap();
777
778        let tx = provider.tx_ref();
779
780        assert_eq!(
781            collect_table_entries::<Arc<DatabaseEnv>, tables::AccountsHistory>(tx)
782                .expect("failed to collect"),
783            vec![
784                (ShardedKey::new(address_with_balance, u64::MAX), IntegerList::new([0]).unwrap()),
785                (ShardedKey::new(address_with_storage, u64::MAX), IntegerList::new([0]).unwrap())
786            ],
787        );
788
789        assert_eq!(
790            collect_table_entries::<Arc<DatabaseEnv>, tables::StoragesHistory>(tx)
791                .expect("failed to collect"),
792            vec![(
793                StorageShardedKey::new(address_with_storage, storage_key, u64::MAX),
794                IntegerList::new([0]).unwrap()
795            )],
796        );
797    }
798}