reth_db_common/
init.rs

1//! Reth genesis initialization utility functions.
2
3use alloy_consensus::BlockHeader;
4use alloy_genesis::GenesisAccount;
5use alloy_primitives::{map::HashMap, Address, B256, U256};
6use reth_chainspec::EthChainSpec;
7use reth_codecs::Compact;
8use reth_config::config::EtlConfig;
9use reth_db_api::{tables, transaction::DbTxMut, DatabaseError};
10use reth_etl::Collector;
11use reth_execution_errors::StateRootError;
12use reth_primitives_traits::{Account, Bytecode, GotExpected, NodePrimitives, StorageEntry};
13use reth_provider::{
14    errors::provider::ProviderResult, providers::StaticFileWriter, writer::UnifiedStorageWriter,
15    BlockHashReader, BlockNumReader, BundleStateInit, ChainSpecProvider, DBProvider,
16    DatabaseProviderFactory, ExecutionOutcome, HashingWriter, HeaderProvider, HistoryWriter,
17    OriginalValuesKnown, ProviderError, RevertsInit, StageCheckpointReader, StageCheckpointWriter,
18    StateWriter, StaticFileProviderFactory, StorageLocation, TrieWriter,
19};
20use reth_stages_types::{StageCheckpoint, StageId};
21use reth_static_file_types::StaticFileSegment;
22use reth_trie::{IntermediateStateRootState, StateRoot as StateRootComputer, StateRootProgress};
23use reth_trie_db::DatabaseStateRoot;
24use serde::{Deserialize, Serialize};
25use std::io::BufRead;
26use tracing::{debug, error, info, trace};
27
28/// Default soft limit for number of bytes to read from state dump file, before inserting into
29/// database.
30///
31/// Default is 1 GB.
32pub const DEFAULT_SOFT_LIMIT_BYTE_LEN_ACCOUNTS_CHUNK: usize = 1_000_000_000;
33
34/// Approximate number of accounts per 1 GB of state dump file. One account is approximately 3.5 KB
35///
36/// Approximate is 285 228 accounts.
37//
38// (14.05 GB OP mainnet state dump at Bedrock block / 4 007 565 accounts in file > 3.5 KB per
39// account)
40pub const AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP: usize = 285_228;
41
42/// Soft limit for the number of flushed updates after which to log progress summary.
43const SOFT_LIMIT_COUNT_FLUSHED_UPDATES: usize = 1_000_000;
44
45/// Storage initialization error type.
46#[derive(Debug, thiserror::Error, Clone)]
47pub enum InitStorageError {
48    /// Genesis header found on static files but the database is empty.
49    #[error(
50        "static files found, but the database is uninitialized. If attempting to re-syncing, delete both."
51    )]
52    UninitializedDatabase,
53    /// An existing genesis block was found in the database, and its hash did not match the hash of
54    /// the chainspec.
55    #[error(
56        "genesis hash in the storage does not match the specified chainspec: chainspec is {chainspec_hash}, database is {storage_hash}"
57    )]
58    GenesisHashMismatch {
59        /// Expected genesis hash.
60        chainspec_hash: B256,
61        /// Actual genesis hash.
62        storage_hash: B256,
63    },
64    /// Provider error.
65    #[error(transparent)]
66    Provider(#[from] ProviderError),
67    /// State root error while computing the state root
68    #[error(transparent)]
69    StateRootError(#[from] StateRootError),
70    /// State root doesn't match the expected one.
71    #[error("state root mismatch: {_0}")]
72    StateRootMismatch(GotExpected<B256>),
73}
74
75impl From<DatabaseError> for InitStorageError {
76    fn from(error: DatabaseError) -> Self {
77        Self::Provider(ProviderError::Database(error))
78    }
79}
80
81/// Write the genesis block if it has not already been written
82pub fn init_genesis<PF>(factory: &PF) -> Result<B256, InitStorageError>
83where
84    PF: DatabaseProviderFactory
85        + StaticFileProviderFactory<Primitives: NodePrimitives<BlockHeader: Compact>>
86        + ChainSpecProvider
87        + StageCheckpointReader
88        + BlockHashReader,
89    PF::ProviderRW: StaticFileProviderFactory<Primitives = PF::Primitives>
90        + StageCheckpointWriter
91        + HistoryWriter
92        + HeaderProvider
93        + HashingWriter
94        + StateWriter
95        + TrieWriter
96        + AsRef<PF::ProviderRW>,
97    PF::ChainSpec: EthChainSpec<Header = <PF::Primitives as NodePrimitives>::BlockHeader>,
98{
99    let chain = factory.chain_spec();
100
101    let genesis = chain.genesis();
102    let hash = chain.genesis_hash();
103
104    // Check if we already have the genesis header or if we have the wrong one.
105    match factory.block_hash(0) {
106        Ok(None) | Err(ProviderError::MissingStaticFileBlock(StaticFileSegment::Headers, 0)) => {}
107        Ok(Some(block_hash)) => {
108            if block_hash == hash {
109                // Some users will at times attempt to re-sync from scratch by just deleting the
110                // database. Since `factory.block_hash` will only query the static files, we need to
111                // make sure that our database has been written to, and throw error if it's empty.
112                if factory.get_stage_checkpoint(StageId::Headers)?.is_none() {
113                    error!(target: "reth::storage", "Genesis header found on static files, but database is uninitialized.");
114                    return Err(InitStorageError::UninitializedDatabase)
115                }
116
117                debug!("Genesis already written, skipping.");
118                return Ok(hash)
119            }
120
121            return Err(InitStorageError::GenesisHashMismatch {
122                chainspec_hash: hash,
123                storage_hash: block_hash,
124            })
125        }
126        Err(e) => {
127            debug!(?e);
128            return Err(e.into());
129        }
130    }
131
132    debug!("Writing genesis block.");
133
134    let alloc = &genesis.alloc;
135
136    // use transaction to insert genesis header
137    let provider_rw = factory.database_provider_rw()?;
138    insert_genesis_hashes(&provider_rw, alloc.iter())?;
139    insert_genesis_history(&provider_rw, alloc.iter())?;
140
141    // Insert header
142    insert_genesis_header(&provider_rw, &chain)?;
143
144    insert_genesis_state(&provider_rw, alloc.iter())?;
145
146    // compute state root to populate trie tables
147    compute_state_root(&provider_rw)?;
148
149    // insert sync stage
150    for stage in StageId::ALL {
151        provider_rw.save_stage_checkpoint(stage, Default::default())?;
152    }
153
154    let static_file_provider = provider_rw.static_file_provider();
155    // Static file segments start empty, so we need to initialize the genesis block.
156    let segment = StaticFileSegment::Receipts;
157    static_file_provider.latest_writer(segment)?.increment_block(0)?;
158
159    let segment = StaticFileSegment::Transactions;
160    static_file_provider.latest_writer(segment)?.increment_block(0)?;
161
162    // `commit_unwind`` will first commit the DB and then the static file provider, which is
163    // necessary on `init_genesis`.
164    UnifiedStorageWriter::commit_unwind(provider_rw)?;
165
166    Ok(hash)
167}
168
169/// Inserts the genesis state into the database.
170pub fn insert_genesis_state<'a, 'b, Provider>(
171    provider: &Provider,
172    alloc: impl Iterator<Item = (&'a Address, &'b GenesisAccount)>,
173) -> ProviderResult<()>
174where
175    Provider: StaticFileProviderFactory
176        + DBProvider<Tx: DbTxMut>
177        + HeaderProvider
178        + StateWriter
179        + AsRef<Provider>,
180{
181    insert_state(provider, alloc, 0)
182}
183
184/// Inserts state at given block into database.
185pub fn insert_state<'a, 'b, Provider>(
186    provider: &Provider,
187    alloc: impl Iterator<Item = (&'a Address, &'b GenesisAccount)>,
188    block: u64,
189) -> ProviderResult<()>
190where
191    Provider: StaticFileProviderFactory
192        + DBProvider<Tx: DbTxMut>
193        + HeaderProvider
194        + StateWriter
195        + AsRef<Provider>,
196{
197    let capacity = alloc.size_hint().1.unwrap_or(0);
198    let mut state_init: BundleStateInit =
199        HashMap::with_capacity_and_hasher(capacity, Default::default());
200    let mut reverts_init = HashMap::with_capacity_and_hasher(capacity, Default::default());
201    let mut contracts: HashMap<B256, Bytecode> =
202        HashMap::with_capacity_and_hasher(capacity, Default::default());
203
204    for (address, account) in alloc {
205        let bytecode_hash = if let Some(code) = &account.code {
206            match Bytecode::new_raw_checked(code.clone()) {
207                Ok(bytecode) => {
208                    let hash = bytecode.hash_slow();
209                    contracts.insert(hash, bytecode);
210                    Some(hash)
211                }
212                Err(err) => {
213                    error!(%address, %err, "Failed to decode genesis bytecode.");
214                    return Err(DatabaseError::Other(err.to_string()).into());
215                }
216            }
217        } else {
218            None
219        };
220
221        // get state
222        let storage = account
223            .storage
224            .as_ref()
225            .map(|m| {
226                m.iter()
227                    .map(|(key, value)| {
228                        let value = U256::from_be_bytes(value.0);
229                        (*key, (U256::ZERO, value))
230                    })
231                    .collect::<HashMap<_, _>>()
232            })
233            .unwrap_or_default();
234
235        reverts_init.insert(
236            *address,
237            (Some(None), storage.keys().map(|k| StorageEntry::new(*k, U256::ZERO)).collect()),
238        );
239
240        state_init.insert(
241            *address,
242            (
243                None,
244                Some(Account {
245                    nonce: account.nonce.unwrap_or_default(),
246                    balance: account.balance,
247                    bytecode_hash,
248                }),
249                storage,
250            ),
251        );
252    }
253    let all_reverts_init: RevertsInit = HashMap::from_iter([(block, reverts_init)]);
254
255    let execution_outcome = ExecutionOutcome::new_init(
256        state_init,
257        all_reverts_init,
258        contracts,
259        Vec::default(),
260        block,
261        Vec::new(),
262    );
263
264    provider.write_state(
265        &execution_outcome,
266        OriginalValuesKnown::Yes,
267        StorageLocation::Database,
268    )?;
269
270    trace!(target: "reth::cli", "Inserted state");
271
272    Ok(())
273}
274
275/// Inserts hashes for the genesis state.
276pub fn insert_genesis_hashes<'a, 'b, Provider>(
277    provider: &Provider,
278    alloc: impl Iterator<Item = (&'a Address, &'b GenesisAccount)> + Clone,
279) -> ProviderResult<()>
280where
281    Provider: DBProvider<Tx: DbTxMut> + HashingWriter,
282{
283    // insert and hash accounts to hashing table
284    let alloc_accounts = alloc.clone().map(|(addr, account)| (*addr, Some(Account::from(account))));
285    provider.insert_account_for_hashing(alloc_accounts)?;
286
287    trace!(target: "reth::cli", "Inserted account hashes");
288
289    let alloc_storage = alloc.filter_map(|(addr, account)| {
290        // only return Some if there is storage
291        account.storage.as_ref().map(|storage| {
292            (*addr, storage.iter().map(|(&key, &value)| StorageEntry { key, value: value.into() }))
293        })
294    });
295    provider.insert_storage_for_hashing(alloc_storage)?;
296
297    trace!(target: "reth::cli", "Inserted storage hashes");
298
299    Ok(())
300}
301
302/// Inserts history indices for genesis accounts and storage.
303pub fn insert_genesis_history<'a, 'b, Provider>(
304    provider: &Provider,
305    alloc: impl Iterator<Item = (&'a Address, &'b GenesisAccount)> + Clone,
306) -> ProviderResult<()>
307where
308    Provider: DBProvider<Tx: DbTxMut> + HistoryWriter,
309{
310    insert_history(provider, alloc, 0)
311}
312
313/// Inserts history indices for genesis accounts and storage.
314pub fn insert_history<'a, 'b, Provider>(
315    provider: &Provider,
316    alloc: impl Iterator<Item = (&'a Address, &'b GenesisAccount)> + Clone,
317    block: u64,
318) -> ProviderResult<()>
319where
320    Provider: DBProvider<Tx: DbTxMut> + HistoryWriter,
321{
322    let account_transitions = alloc.clone().map(|(addr, _)| (*addr, [block]));
323    provider.insert_account_history_index(account_transitions)?;
324
325    trace!(target: "reth::cli", "Inserted account history");
326
327    let storage_transitions = alloc
328        .filter_map(|(addr, account)| account.storage.as_ref().map(|storage| (addr, storage)))
329        .flat_map(|(addr, storage)| storage.keys().map(|key| ((*addr, *key), [block])));
330    provider.insert_storage_history_index(storage_transitions)?;
331
332    trace!(target: "reth::cli", "Inserted storage history");
333
334    Ok(())
335}
336
337/// Inserts header for the genesis state.
338pub fn insert_genesis_header<Provider, Spec>(
339    provider: &Provider,
340    chain: &Spec,
341) -> ProviderResult<()>
342where
343    Provider: StaticFileProviderFactory<Primitives: NodePrimitives<BlockHeader: Compact>>
344        + DBProvider<Tx: DbTxMut>,
345    Spec: EthChainSpec<Header = <Provider::Primitives as NodePrimitives>::BlockHeader>,
346{
347    let (header, block_hash) = (chain.genesis_header(), chain.genesis_hash());
348    let static_file_provider = provider.static_file_provider();
349
350    match static_file_provider.block_hash(0) {
351        Ok(None) | Err(ProviderError::MissingStaticFileBlock(StaticFileSegment::Headers, 0)) => {
352            let (difficulty, hash) = (header.difficulty(), block_hash);
353            let mut writer = static_file_provider.latest_writer(StaticFileSegment::Headers)?;
354            writer.append_header(header, difficulty, &hash)?;
355        }
356        Ok(Some(_)) => {}
357        Err(e) => return Err(e),
358    }
359
360    provider.tx_ref().put::<tables::HeaderNumbers>(block_hash, 0)?;
361    provider.tx_ref().put::<tables::BlockBodyIndices>(0, Default::default())?;
362
363    Ok(())
364}
365
366/// Reads account state from a [`BufRead`] reader and initializes it at the highest block that can
367/// be found on database.
368///
369/// It's similar to [`init_genesis`] but supports importing state too big to fit in memory, and can
370/// be set to the highest block present. One practical usecase is to import OP mainnet state at
371/// bedrock transition block.
372pub fn init_from_state_dump<Provider>(
373    mut reader: impl BufRead,
374    provider_rw: &Provider,
375    etl_config: EtlConfig,
376) -> eyre::Result<B256>
377where
378    Provider: StaticFileProviderFactory
379        + DBProvider<Tx: DbTxMut>
380        + BlockNumReader
381        + BlockHashReader
382        + ChainSpecProvider
383        + StageCheckpointWriter
384        + HistoryWriter
385        + HeaderProvider
386        + HashingWriter
387        + TrieWriter
388        + StateWriter
389        + AsRef<Provider>,
390{
391    if etl_config.file_size == 0 {
392        return Err(eyre::eyre!("ETL file size cannot be zero"))
393    }
394
395    let block = provider_rw.last_block_number()?;
396    let hash = provider_rw
397        .block_hash(block)?
398        .ok_or_else(|| eyre::eyre!("Block hash not found for block {}", block))?;
399    let expected_state_root = provider_rw
400        .header_by_number(block)?
401        .ok_or_else(|| ProviderError::HeaderNotFound(block.into()))?
402        .state_root();
403
404    // first line can be state root
405    let dump_state_root = parse_state_root(&mut reader)?;
406    if expected_state_root != dump_state_root {
407        error!(target: "reth::cli",
408            ?dump_state_root,
409            ?expected_state_root,
410            "State root from state dump does not match state root in current header."
411        );
412        return Err(InitStorageError::StateRootMismatch(GotExpected {
413            got: dump_state_root,
414            expected: expected_state_root,
415        })
416        .into())
417    }
418
419    debug!(target: "reth::cli",
420        block,
421        chain=%provider_rw.chain_spec().chain(),
422        "Initializing state at block"
423    );
424
425    // remaining lines are accounts
426    let collector = parse_accounts(&mut reader, etl_config)?;
427
428    // write state to db
429    dump_state(collector, provider_rw, block)?;
430
431    info!(target: "reth::cli", "All accounts written to database, starting state root computation (may take some time)");
432
433    // compute and compare state root. this advances the stage checkpoints.
434    let computed_state_root = compute_state_root(provider_rw)?;
435    if computed_state_root == expected_state_root {
436        info!(target: "reth::cli",
437            ?computed_state_root,
438            "Computed state root matches state root in state dump"
439        );
440    } else {
441        error!(target: "reth::cli",
442            ?computed_state_root,
443            ?expected_state_root,
444            "Computed state root does not match state root in state dump"
445        );
446
447        return Err(InitStorageError::StateRootMismatch(GotExpected {
448            got: computed_state_root,
449            expected: expected_state_root,
450        })
451        .into())
452    }
453
454    // insert sync stages for stages that require state
455    for stage in StageId::STATE_REQUIRED {
456        provider_rw.save_stage_checkpoint(stage, StageCheckpoint::new(block))?;
457    }
458
459    Ok(hash)
460}
461
462/// Parses and returns expected state root.
463fn parse_state_root(reader: &mut impl BufRead) -> eyre::Result<B256> {
464    let mut line = String::new();
465    reader.read_line(&mut line)?;
466
467    let expected_state_root = serde_json::from_str::<StateRoot>(&line)?.root;
468    trace!(target: "reth::cli",
469        root=%expected_state_root,
470        "Read state root from file"
471    );
472    Ok(expected_state_root)
473}
474
475/// Parses accounts and pushes them to a [`Collector`].
476fn parse_accounts(
477    mut reader: impl BufRead,
478    etl_config: EtlConfig,
479) -> Result<Collector<Address, GenesisAccount>, eyre::Error> {
480    let mut line = String::new();
481    let mut collector = Collector::new(etl_config.file_size, etl_config.dir);
482
483    while let Ok(n) = reader.read_line(&mut line) {
484        if n == 0 {
485            break
486        }
487
488        let GenesisAccountWithAddress { genesis_account, address } = serde_json::from_str(&line)?;
489        collector.insert(address, genesis_account)?;
490
491        if !collector.is_empty() &&
492            collector.len().is_multiple_of(AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP)
493        {
494            info!(target: "reth::cli",
495                parsed_new_accounts=collector.len(),
496            );
497        }
498
499        line.clear();
500    }
501
502    Ok(collector)
503}
504
505/// Takes a [`Collector`] and processes all accounts.
506fn dump_state<Provider>(
507    mut collector: Collector<Address, GenesisAccount>,
508    provider_rw: &Provider,
509    block: u64,
510) -> Result<(), eyre::Error>
511where
512    Provider: StaticFileProviderFactory
513        + DBProvider<Tx: DbTxMut>
514        + HeaderProvider
515        + HashingWriter
516        + HistoryWriter
517        + StateWriter
518        + AsRef<Provider>,
519{
520    let accounts_len = collector.len();
521    let mut accounts = Vec::with_capacity(AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP);
522    let mut total_inserted_accounts = 0;
523
524    for (index, entry) in collector.iter()?.enumerate() {
525        let (address, account) = entry?;
526        let (address, _) = Address::from_compact(address.as_slice(), address.len());
527        let (account, _) = GenesisAccount::from_compact(account.as_slice(), account.len());
528
529        accounts.push((address, account));
530
531        if (index > 0 && index.is_multiple_of(AVERAGE_COUNT_ACCOUNTS_PER_GB_STATE_DUMP)) ||
532            index == accounts_len - 1
533        {
534            total_inserted_accounts += accounts.len();
535
536            info!(target: "reth::cli",
537                total_inserted_accounts,
538                "Writing accounts to db"
539            );
540
541            // use transaction to insert genesis header
542            insert_genesis_hashes(
543                provider_rw,
544                accounts.iter().map(|(address, account)| (address, account)),
545            )?;
546
547            insert_history(
548                provider_rw,
549                accounts.iter().map(|(address, account)| (address, account)),
550                block,
551            )?;
552
553            // block is already written to static files
554            insert_state(
555                provider_rw,
556                accounts.iter().map(|(address, account)| (address, account)),
557                block,
558            )?;
559
560            accounts.clear();
561        }
562    }
563    Ok(())
564}
565
566/// Computes the state root (from scratch) based on the accounts and storages present in the
567/// database.
568fn compute_state_root<Provider>(provider: &Provider) -> Result<B256, InitStorageError>
569where
570    Provider: DBProvider<Tx: DbTxMut> + TrieWriter,
571{
572    trace!(target: "reth::cli", "Computing state root");
573
574    let tx = provider.tx_ref();
575    let mut intermediate_state: Option<IntermediateStateRootState> = None;
576    let mut total_flushed_updates = 0;
577
578    loop {
579        match StateRootComputer::from_tx(tx)
580            .with_intermediate_state(intermediate_state)
581            .root_with_progress()?
582        {
583            StateRootProgress::Progress(state, _, updates) => {
584                let updated_len = provider.write_trie_updates(&updates)?;
585                total_flushed_updates += updated_len;
586
587                trace!(target: "reth::cli",
588                    last_account_key = %state.account_root_state.last_hashed_key,
589                    updated_len,
590                    total_flushed_updates,
591                    "Flushing trie updates"
592                );
593
594                intermediate_state = Some(*state);
595
596                if total_flushed_updates.is_multiple_of(SOFT_LIMIT_COUNT_FLUSHED_UPDATES) {
597                    info!(target: "reth::cli",
598                        total_flushed_updates,
599                        "Flushing trie updates"
600                    );
601                }
602            }
603            StateRootProgress::Complete(root, _, updates) => {
604                let updated_len = provider.write_trie_updates(&updates)?;
605                total_flushed_updates += updated_len;
606
607                trace!(target: "reth::cli",
608                    %root,
609                    updated_len,
610                    total_flushed_updates,
611                    "State root has been computed"
612                );
613
614                return Ok(root)
615            }
616        }
617    }
618}
619
620/// Type to deserialize state root from state dump file.
621#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
622struct StateRoot {
623    root: B256,
624}
625
626/// An account as in the state dump file. This contains a [`GenesisAccount`] and the account's
627/// address.
628#[derive(Debug, Serialize, Deserialize)]
629struct GenesisAccountWithAddress {
630    /// The account's balance, nonce, code, and storage.
631    #[serde(flatten)]
632    genesis_account: GenesisAccount,
633    /// The account's address.
634    address: Address,
635}
636
637#[cfg(test)]
638mod tests {
639    use super::*;
640    use alloy_consensus::constants::{
641        HOLESKY_GENESIS_HASH, MAINNET_GENESIS_HASH, SEPOLIA_GENESIS_HASH,
642    };
643    use alloy_genesis::Genesis;
644    use reth_chainspec::{Chain, ChainSpec, HOLESKY, MAINNET, SEPOLIA};
645    use reth_db::DatabaseEnv;
646    use reth_db_api::{
647        cursor::DbCursorRO,
648        models::{storage_sharded_key::StorageShardedKey, IntegerList, ShardedKey},
649        table::{Table, TableRow},
650        transaction::DbTx,
651        Database,
652    };
653    use reth_provider::{
654        test_utils::{create_test_provider_factory_with_chain_spec, MockNodeTypesWithDB},
655        ProviderFactory,
656    };
657    use std::{collections::BTreeMap, sync::Arc};
658
659    fn collect_table_entries<DB, T>(
660        tx: &<DB as Database>::TX,
661    ) -> Result<Vec<TableRow<T>>, InitStorageError>
662    where
663        DB: Database,
664        T: Table,
665    {
666        Ok(tx.cursor_read::<T>()?.walk_range(..)?.collect::<Result<Vec<_>, _>>()?)
667    }
668
669    #[test]
670    fn success_init_genesis_mainnet() {
671        let genesis_hash =
672            init_genesis(&create_test_provider_factory_with_chain_spec(MAINNET.clone())).unwrap();
673
674        // actual, expected
675        assert_eq!(genesis_hash, MAINNET_GENESIS_HASH);
676    }
677
678    #[test]
679    fn success_init_genesis_sepolia() {
680        let genesis_hash =
681            init_genesis(&create_test_provider_factory_with_chain_spec(SEPOLIA.clone())).unwrap();
682
683        // actual, expected
684        assert_eq!(genesis_hash, SEPOLIA_GENESIS_HASH);
685    }
686
687    #[test]
688    fn success_init_genesis_holesky() {
689        let genesis_hash =
690            init_genesis(&create_test_provider_factory_with_chain_spec(HOLESKY.clone())).unwrap();
691
692        // actual, expected
693        assert_eq!(genesis_hash, HOLESKY_GENESIS_HASH);
694    }
695
696    #[test]
697    fn fail_init_inconsistent_db() {
698        let factory = create_test_provider_factory_with_chain_spec(SEPOLIA.clone());
699        let static_file_provider = factory.static_file_provider();
700        init_genesis(&factory).unwrap();
701
702        // Try to init db with a different genesis block
703        let genesis_hash = init_genesis(&ProviderFactory::<MockNodeTypesWithDB>::new(
704            factory.into_db(),
705            MAINNET.clone(),
706            static_file_provider,
707        ));
708
709        assert!(matches!(
710            genesis_hash.unwrap_err(),
711            InitStorageError::GenesisHashMismatch {
712                chainspec_hash: MAINNET_GENESIS_HASH,
713                storage_hash: SEPOLIA_GENESIS_HASH
714            }
715        ))
716    }
717
718    #[test]
719    fn init_genesis_history() {
720        let address_with_balance = Address::with_last_byte(1);
721        let address_with_storage = Address::with_last_byte(2);
722        let storage_key = B256::with_last_byte(1);
723        let chain_spec = Arc::new(ChainSpec {
724            chain: Chain::from_id(1),
725            genesis: Genesis {
726                alloc: BTreeMap::from([
727                    (
728                        address_with_balance,
729                        GenesisAccount { balance: U256::from(1), ..Default::default() },
730                    ),
731                    (
732                        address_with_storage,
733                        GenesisAccount {
734                            storage: Some(BTreeMap::from([(storage_key, B256::random())])),
735                            ..Default::default()
736                        },
737                    ),
738                ]),
739                ..Default::default()
740            },
741            hardforks: Default::default(),
742            paris_block_and_final_difficulty: None,
743            deposit_contract: None,
744            ..Default::default()
745        });
746
747        let factory = create_test_provider_factory_with_chain_spec(chain_spec);
748        init_genesis(&factory).unwrap();
749
750        let provider = factory.provider().unwrap();
751
752        let tx = provider.tx_ref();
753
754        assert_eq!(
755            collect_table_entries::<Arc<DatabaseEnv>, tables::AccountsHistory>(tx)
756                .expect("failed to collect"),
757            vec![
758                (ShardedKey::new(address_with_balance, u64::MAX), IntegerList::new([0]).unwrap()),
759                (ShardedKey::new(address_with_storage, u64::MAX), IntegerList::new([0]).unwrap())
760            ],
761        );
762
763        assert_eq!(
764            collect_table_entries::<Arc<DatabaseEnv>, tables::StoragesHistory>(tx)
765                .expect("failed to collect"),
766            vec![(
767                StorageShardedKey::new(address_with_storage, storage_key, u64::MAX),
768                IntegerList::new([0]).unwrap()
769            )],
770        );
771    }
772}