Skip to main content

reth_config/
config.rs

1//! Configuration files.
2use reth_network_types::{PeersConfig, SessionsConfig};
3use reth_prune_types::PruneModes;
4use reth_stages_types::ExecutionStageThresholds;
5use reth_static_file_types::{StaticFileMap, StaticFileSegment};
6use std::{
7    path::{Path, PathBuf},
8    time::Duration,
9};
10use url::Url;
11
12#[cfg(feature = "serde")]
13const EXTENSION: &str = "toml";
14
15/// The default prune block interval
16pub const DEFAULT_BLOCK_INTERVAL: usize = 5;
17
18/// Configuration for the reth node.
19#[derive(Debug, Clone, Default, PartialEq, Eq)]
20#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
21#[cfg_attr(feature = "serde", serde(default))]
22pub struct Config {
23    /// Configuration for each stage in the pipeline.
24    pub stages: StageConfig,
25    /// Configuration for pruning.
26    #[cfg_attr(feature = "serde", serde(default))]
27    pub prune: PruneConfig,
28    /// Configuration for the discovery service.
29    pub peers: PeersConfig,
30    /// Configuration for peer sessions.
31    pub sessions: SessionsConfig,
32    /// Configuration for static files.
33    #[cfg_attr(feature = "serde", serde(default))]
34    pub static_files: StaticFilesConfig,
35}
36
37impl Config {
38    /// Sets the pruning configuration.
39    pub fn set_prune_config(&mut self, prune_config: PruneConfig) {
40        self.prune = prune_config;
41    }
42}
43
44#[cfg(feature = "serde")]
45impl Config {
46    /// Load a [`Config`] from a specified path.
47    ///
48    /// A new configuration file is created with default values if none
49    /// exists.
50    pub fn from_path(path: impl AsRef<Path>) -> eyre::Result<Self> {
51        let path = path.as_ref();
52        match std::fs::read_to_string(path) {
53            Ok(cfg_string) => {
54                toml::from_str(&cfg_string).map_err(|e| eyre::eyre!("Failed to parse TOML: {e}"))
55            }
56            Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
57                if let Some(parent) = path.parent() {
58                    std::fs::create_dir_all(parent)
59                        .map_err(|e| eyre::eyre!("Failed to create directory: {e}"))?;
60                }
61                let cfg = Self::default();
62                let s = toml::to_string_pretty(&cfg)
63                    .map_err(|e| eyre::eyre!("Failed to serialize to TOML: {e}"))?;
64                std::fs::write(path, s)
65                    .map_err(|e| eyre::eyre!("Failed to write configuration file: {e}"))?;
66                Ok(cfg)
67            }
68            Err(e) => Err(eyre::eyre!("Failed to load configuration: {e}")),
69        }
70    }
71
72    /// Returns the [`PeersConfig`] for the node.
73    ///
74    /// If a peers file is provided, the basic nodes from the file are added to the configuration.
75    pub fn peers_config_with_basic_nodes_from_file(
76        &self,
77        peers_file: Option<&Path>,
78    ) -> PeersConfig {
79        self.peers
80            .clone()
81            .with_basic_nodes_from_file(peers_file)
82            .unwrap_or_else(|_| self.peers.clone())
83    }
84
85    /// Save the configuration to toml file.
86    pub fn save(&self, path: &Path) -> Result<(), std::io::Error> {
87        if path.extension() != Some(std::ffi::OsStr::new(EXTENSION)) {
88            return Err(std::io::Error::new(
89                std::io::ErrorKind::InvalidInput,
90                format!("reth config file extension must be '{EXTENSION}'"),
91            ));
92        }
93
94        std::fs::write(
95            path,
96            toml::to_string(self)
97                .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e.to_string()))?,
98        )
99    }
100}
101
102/// Configuration for each stage in the pipeline.
103#[derive(Debug, Clone, Default, PartialEq, Eq)]
104#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
105#[cfg_attr(feature = "serde", serde(default))]
106pub struct StageConfig {
107    /// ERA stage configuration.
108    pub era: EraConfig,
109    /// Header stage configuration.
110    pub headers: HeadersConfig,
111    /// Body stage configuration.
112    pub bodies: BodiesConfig,
113    /// Sender Recovery stage configuration.
114    pub sender_recovery: SenderRecoveryConfig,
115    /// Execution stage configuration.
116    pub execution: ExecutionConfig,
117    /// Prune stage configuration.
118    pub prune: PruneStageConfig,
119    /// Account Hashing stage configuration.
120    pub account_hashing: HashingConfig,
121    /// Storage Hashing stage configuration.
122    pub storage_hashing: HashingConfig,
123    /// Merkle stage configuration.
124    pub merkle: MerkleConfig,
125    /// Transaction Lookup stage configuration.
126    pub transaction_lookup: TransactionLookupConfig,
127    /// Index Account History stage configuration.
128    pub index_account_history: IndexHistoryConfig,
129    /// Index Storage History stage configuration.
130    pub index_storage_history: IndexHistoryConfig,
131    /// Common ETL related configuration.
132    pub etl: EtlConfig,
133}
134
135impl StageConfig {
136    /// The highest threshold (in number of blocks) for switching between incremental and full
137    /// calculations across `MerkleStage`, `AccountHashingStage` and `StorageHashingStage`. This is
138    /// required to figure out if can prune or not changesets on subsequent pipeline runs during
139    /// `ExecutionStage`
140    pub fn execution_external_clean_threshold(&self) -> u64 {
141        self.merkle
142            .incremental_threshold
143            .max(self.account_hashing.clean_threshold)
144            .max(self.storage_hashing.clean_threshold)
145    }
146}
147
148/// ERA stage configuration.
149#[derive(Debug, Clone, Default, PartialEq, Eq)]
150#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
151#[cfg_attr(feature = "serde", serde(default))]
152pub struct EraConfig {
153    /// Path to a local directory where ERA1 files are located.
154    ///
155    /// Conflicts with `url`.
156    pub path: Option<PathBuf>,
157    /// The base URL of an ERA1 file host to download from.
158    ///
159    /// Conflicts with `path`.
160    pub url: Option<Url>,
161    /// Path to a directory where files downloaded from `url` will be stored until processed.
162    ///
163    /// Required for `url`.
164    pub folder: Option<PathBuf>,
165}
166
167impl EraConfig {
168    /// Sets `folder` for temporary downloads as a directory called "era" inside `dir`.
169    pub fn with_datadir(mut self, dir: impl AsRef<Path>) -> Self {
170        self.folder = Some(dir.as_ref().join("era"));
171        self
172    }
173}
174
175/// Header stage configuration.
176#[derive(Debug, Clone, Copy, PartialEq, Eq)]
177#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
178#[cfg_attr(feature = "serde", serde(default))]
179pub struct HeadersConfig {
180    /// The maximum number of requests to send concurrently.
181    ///
182    /// Default: 100
183    pub downloader_max_concurrent_requests: usize,
184    /// The minimum number of requests to send concurrently.
185    ///
186    /// Default: 5
187    pub downloader_min_concurrent_requests: usize,
188    /// Maximum amount of responses to buffer internally.
189    /// The response contains multiple headers.
190    pub downloader_max_buffered_responses: usize,
191    /// The maximum number of headers to request from a peer at a time.
192    pub downloader_request_limit: u64,
193    /// The maximum number of headers to download before committing progress to the database.
194    pub commit_threshold: u64,
195}
196
197impl Default for HeadersConfig {
198    fn default() -> Self {
199        Self {
200            commit_threshold: 10_000,
201            downloader_request_limit: 1_000,
202            downloader_max_concurrent_requests: 100,
203            downloader_min_concurrent_requests: 5,
204            downloader_max_buffered_responses: 100,
205        }
206    }
207}
208
209/// Body stage configuration.
210#[derive(Debug, Clone, Copy, PartialEq, Eq)]
211#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
212#[cfg_attr(feature = "serde", serde(default))]
213pub struct BodiesConfig {
214    /// The batch size of non-empty blocks per one request
215    ///
216    /// Default: 200
217    pub downloader_request_limit: u64,
218    /// The maximum number of block bodies returned at once from the stream
219    ///
220    /// Default: `1_000`
221    pub downloader_stream_batch_size: usize,
222    /// The size of the internal block buffer in bytes.
223    ///
224    /// Default: 2GB
225    pub downloader_max_buffered_blocks_size_bytes: usize,
226    /// The minimum number of requests to send concurrently.
227    ///
228    /// Default: 5
229    pub downloader_min_concurrent_requests: usize,
230    /// The maximum number of requests to send concurrently.
231    /// This is equal to the max number of peers.
232    ///
233    /// Default: 100
234    pub downloader_max_concurrent_requests: usize,
235}
236
237impl Default for BodiesConfig {
238    fn default() -> Self {
239        Self {
240            downloader_request_limit: 200,
241            downloader_stream_batch_size: 1_000,
242            downloader_max_buffered_blocks_size_bytes: 2 * 1024 * 1024 * 1024, // ~2GB
243            downloader_min_concurrent_requests: 5,
244            downloader_max_concurrent_requests: 100,
245        }
246    }
247}
248
249/// Sender recovery stage configuration.
250#[derive(Debug, Clone, Copy, PartialEq, Eq)]
251#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
252#[cfg_attr(feature = "serde", serde(default))]
253pub struct SenderRecoveryConfig {
254    /// The maximum number of transactions to process before committing progress to the database.
255    pub commit_threshold: u64,
256}
257
258impl Default for SenderRecoveryConfig {
259    fn default() -> Self {
260        Self { commit_threshold: 5_000_000 }
261    }
262}
263
264/// Execution stage configuration.
265#[derive(Debug, Clone, Copy, PartialEq, Eq)]
266#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
267#[cfg_attr(feature = "serde", serde(default))]
268pub struct ExecutionConfig {
269    /// The maximum number of blocks to process before the execution stage commits.
270    pub max_blocks: Option<u64>,
271    /// The maximum number of state changes to keep in memory before the execution stage commits.
272    pub max_changes: Option<u64>,
273    /// The maximum cumulative amount of gas to process before the execution stage commits.
274    pub max_cumulative_gas: Option<u64>,
275    /// The maximum time spent on blocks processing before the execution stage commits.
276    #[cfg_attr(
277        feature = "serde",
278        serde(
279            serialize_with = "humantime_serde::serialize",
280            deserialize_with = "deserialize_duration"
281        )
282    )]
283    pub max_duration: Option<Duration>,
284}
285
286impl Default for ExecutionConfig {
287    fn default() -> Self {
288        Self {
289            max_blocks: Some(500_000),
290            max_changes: Some(5_000_000),
291            // 50k full blocks of 30M gas
292            max_cumulative_gas: Some(30_000_000 * 50_000),
293            // 10 minutes
294            max_duration: Some(Duration::from_secs(10 * 60)),
295        }
296    }
297}
298
299impl From<ExecutionConfig> for ExecutionStageThresholds {
300    fn from(config: ExecutionConfig) -> Self {
301        Self {
302            max_blocks: config.max_blocks,
303            max_changes: config.max_changes,
304            max_cumulative_gas: config.max_cumulative_gas,
305            max_duration: config.max_duration,
306        }
307    }
308}
309
310/// Prune stage configuration.
311#[derive(Debug, Clone, Copy, PartialEq, Eq)]
312#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
313#[cfg_attr(feature = "serde", serde(default))]
314pub struct PruneStageConfig {
315    /// The maximum number of entries to prune before committing progress to the database.
316    pub commit_threshold: usize,
317}
318
319impl Default for PruneStageConfig {
320    fn default() -> Self {
321        Self { commit_threshold: 1_000_000 }
322    }
323}
324
325/// Hashing stage configuration.
326#[derive(Debug, Clone, Copy, PartialEq, Eq)]
327#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
328#[cfg_attr(feature = "serde", serde(default))]
329pub struct HashingConfig {
330    /// The threshold (in number of blocks) for switching between
331    /// incremental hashing and full hashing.
332    pub clean_threshold: u64,
333    /// The maximum number of entities to process before committing progress to the database.
334    pub commit_threshold: u64,
335    /// The maximum number of changeset entries to process before committing progress. The stage
336    /// commits after either `commit_threshold` blocks or `commit_entries` entries, whichever
337    /// comes first. This bounds memory usage when blocks contain many state changes.
338    pub commit_entries: u64,
339}
340
341impl Default for HashingConfig {
342    fn default() -> Self {
343        Self { clean_threshold: 500_000, commit_threshold: 100_000, commit_entries: 30_000_000 }
344    }
345}
346
347/// Merkle stage configuration.
348#[derive(Debug, Clone, Copy, PartialEq, Eq)]
349#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
350#[cfg_attr(feature = "serde", serde(default))]
351pub struct MerkleConfig {
352    /// The number of blocks we will run the incremental root method for when we are catching up on
353    /// the merkle stage for a large number of blocks.
354    ///
355    /// When we are catching up for a large number of blocks, we can only run the incremental root
356    /// for a limited number of blocks, otherwise the incremental root method may cause the node to
357    /// OOM. This number determines how many blocks in a row we will run the incremental root
358    /// method for.
359    pub incremental_threshold: u64,
360    /// The threshold (in number of blocks) for switching from incremental trie building of changes
361    /// to whole rebuild.
362    pub rebuild_threshold: u64,
363}
364
365impl Default for MerkleConfig {
366    fn default() -> Self {
367        Self { incremental_threshold: 7_000, rebuild_threshold: 100_000 }
368    }
369}
370
371/// Transaction Lookup stage configuration.
372#[derive(Debug, Clone, Copy, PartialEq, Eq)]
373#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
374#[cfg_attr(feature = "serde", serde(default))]
375pub struct TransactionLookupConfig {
376    /// The maximum number of transactions to process before writing to disk.
377    pub chunk_size: u64,
378}
379
380impl Default for TransactionLookupConfig {
381    fn default() -> Self {
382        Self { chunk_size: 5_000_000 }
383    }
384}
385
386/// Common ETL related configuration.
387#[derive(Debug, Clone, PartialEq, Eq)]
388#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
389#[cfg_attr(feature = "serde", serde(default))]
390pub struct EtlConfig {
391    /// Data directory where temporary files are created.
392    pub dir: Option<PathBuf>,
393    /// The maximum size in bytes of data held in memory before being flushed to disk as a file.
394    pub file_size: usize,
395}
396
397impl Default for EtlConfig {
398    fn default() -> Self {
399        Self { dir: None, file_size: Self::default_file_size() }
400    }
401}
402
403impl EtlConfig {
404    /// Creates an ETL configuration
405    pub const fn new(dir: Option<PathBuf>, file_size: usize) -> Self {
406        Self { dir, file_size }
407    }
408
409    /// Return default ETL directory from datadir path.
410    pub fn from_datadir(path: &Path) -> PathBuf {
411        path.join("etl-tmp")
412    }
413
414    /// Default size in bytes of data held in memory before being flushed to disk as a file.
415    pub const fn default_file_size() -> usize {
416        // 500 MB
417        500 * (1024 * 1024)
418    }
419}
420
421/// Static files configuration.
422#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
423#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
424#[cfg_attr(feature = "serde", serde(default))]
425pub struct StaticFilesConfig {
426    /// Number of blocks per file for each segment.
427    pub blocks_per_file: BlocksPerFileConfig,
428}
429
430/// Configuration for the number of blocks per file for each segment.
431#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
432#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
433#[cfg_attr(feature = "serde", serde(default))]
434pub struct BlocksPerFileConfig {
435    /// Number of blocks per file for the headers segment.
436    pub headers: Option<u64>,
437    /// Number of blocks per file for the transactions segment.
438    pub transactions: Option<u64>,
439    /// Number of blocks per file for the receipts segment.
440    pub receipts: Option<u64>,
441    /// Number of blocks per file for the transaction senders segment.
442    pub transaction_senders: Option<u64>,
443    /// Number of blocks per file for the account changesets segment.
444    pub account_change_sets: Option<u64>,
445    /// Number of blocks per file for the storage changesets segment.
446    pub storage_change_sets: Option<u64>,
447}
448
449impl StaticFilesConfig {
450    /// Validates the static files configuration.
451    ///
452    /// Returns an error if any blocks per file value is zero.
453    pub fn validate(&self) -> eyre::Result<()> {
454        let BlocksPerFileConfig {
455            headers,
456            transactions,
457            receipts,
458            transaction_senders,
459            account_change_sets,
460            storage_change_sets,
461        } = self.blocks_per_file;
462        eyre::ensure!(headers != Some(0), "Headers segment blocks per file must be greater than 0");
463        eyre::ensure!(
464            transactions != Some(0),
465            "Transactions segment blocks per file must be greater than 0"
466        );
467        eyre::ensure!(
468            receipts != Some(0),
469            "Receipts segment blocks per file must be greater than 0"
470        );
471        eyre::ensure!(
472            transaction_senders != Some(0),
473            "Transaction senders segment blocks per file must be greater than 0"
474        );
475        eyre::ensure!(
476            account_change_sets != Some(0),
477            "Account changesets segment blocks per file must be greater than 0"
478        );
479        eyre::ensure!(
480            storage_change_sets != Some(0),
481            "Storage changesets segment blocks per file must be greater than 0"
482        );
483        Ok(())
484    }
485
486    /// Converts the blocks per file configuration into a [`StaticFileMap`].
487    pub fn as_blocks_per_file_map(&self) -> StaticFileMap<u64> {
488        let BlocksPerFileConfig {
489            headers,
490            transactions,
491            receipts,
492            transaction_senders,
493            account_change_sets,
494            storage_change_sets,
495        } = self.blocks_per_file;
496
497        let mut map = StaticFileMap::default();
498        // Iterating over all possible segments allows us to do an exhaustive match here,
499        // to not forget to configure new segments in the future.
500        for segment in StaticFileSegment::iter() {
501            let blocks_per_file = match segment {
502                StaticFileSegment::Headers => headers,
503                StaticFileSegment::Transactions => transactions,
504                StaticFileSegment::Receipts => receipts,
505                StaticFileSegment::TransactionSenders => transaction_senders,
506                StaticFileSegment::AccountChangeSets => account_change_sets,
507                StaticFileSegment::StorageChangeSets => storage_change_sets,
508            };
509
510            if let Some(blocks_per_file) = blocks_per_file {
511                map.insert(segment, blocks_per_file);
512            }
513        }
514        map
515    }
516}
517
518/// History stage configuration.
519#[derive(Debug, Clone, Copy, PartialEq, Eq)]
520#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
521#[cfg_attr(feature = "serde", serde(default))]
522pub struct IndexHistoryConfig {
523    /// The maximum number of blocks to process before committing progress to the database.
524    pub commit_threshold: u64,
525}
526
527impl Default for IndexHistoryConfig {
528    fn default() -> Self {
529        Self { commit_threshold: 100_000 }
530    }
531}
532
533/// Pruning configuration.
534#[derive(Debug, Clone, PartialEq, Eq)]
535#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
536#[cfg_attr(feature = "serde", serde(default))]
537pub struct PruneConfig {
538    /// Minimum pruning interval measured in blocks.
539    pub block_interval: usize,
540    /// Pruning configuration for every part of the data that can be pruned.
541    #[cfg_attr(feature = "serde", serde(alias = "parts"))]
542    pub segments: PruneModes,
543}
544
545impl Default for PruneConfig {
546    fn default() -> Self {
547        Self { block_interval: DEFAULT_BLOCK_INTERVAL, segments: PruneModes::default() }
548    }
549}
550
551impl PruneConfig {
552    /// Returns whether this configuration is the default one.
553    pub fn is_default(&self) -> bool {
554        self == &Self::default()
555    }
556
557    /// Returns whether there is any kind of receipt pruning configuration.
558    pub fn has_receipts_pruning(&self) -> bool {
559        self.segments.has_receipts_pruning()
560    }
561
562    /// Merges values from `other` into `self`.
563    /// - `Option<PruneMode>` fields: set from `other` only if `self` is `None`.
564    /// - `block_interval`: set from `other` only if `self.block_interval ==
565    ///   DEFAULT_BLOCK_INTERVAL`.
566    /// - `receipts_log_filter`: set from `other` only if `self` is empty and `other` is non-empty.
567    pub fn merge(&mut self, other: Self) {
568        let Self {
569            block_interval,
570            segments:
571                PruneModes {
572                    sender_recovery,
573                    transaction_lookup,
574                    receipts,
575                    account_history,
576                    storage_history,
577                    bodies_history,
578                    receipts_log_filter,
579                },
580        } = other;
581
582        // Merge block_interval, only update if it's the default interval
583        if self.block_interval == DEFAULT_BLOCK_INTERVAL {
584            self.block_interval = block_interval;
585        }
586
587        // Merge the various segment prune modes
588        self.segments.sender_recovery = self.segments.sender_recovery.or(sender_recovery);
589        self.segments.transaction_lookup = self.segments.transaction_lookup.or(transaction_lookup);
590        self.segments.receipts = self.segments.receipts.or(receipts);
591        self.segments.account_history = self.segments.account_history.or(account_history);
592        self.segments.storage_history = self.segments.storage_history.or(storage_history);
593        self.segments.bodies_history = self.segments.bodies_history.or(bodies_history);
594
595        if self.segments.receipts_log_filter.0.is_empty() && !receipts_log_filter.0.is_empty() {
596            self.segments.receipts_log_filter = receipts_log_filter;
597        }
598    }
599}
600
601/// Helper type to support older versions of Duration deserialization.
602#[cfg(feature = "serde")]
603fn deserialize_duration<'de, D>(deserializer: D) -> Result<Option<Duration>, D::Error>
604where
605    D: serde::de::Deserializer<'de>,
606{
607    #[derive(serde::Deserialize)]
608    #[serde(untagged)]
609    enum AnyDuration {
610        #[serde(deserialize_with = "humantime_serde::deserialize")]
611        Human(Option<Duration>),
612        Duration(Option<Duration>),
613    }
614
615    <AnyDuration as serde::Deserialize>::deserialize(deserializer).map(|d| match d {
616        AnyDuration::Human(duration) | AnyDuration::Duration(duration) => duration,
617    })
618}
619
620#[cfg(all(test, feature = "serde"))]
621mod tests {
622    use super::{Config, EXTENSION};
623    use crate::PruneConfig;
624    use alloy_primitives::Address;
625    use reth_network_peers::TrustedPeer;
626    use reth_prune_types::{PruneMode, PruneModes, ReceiptsLogPruneConfig};
627    use std::{collections::BTreeMap, path::Path, str::FromStr, time::Duration};
628
629    fn with_tempdir(filename: &str, proc: fn(&std::path::Path)) {
630        let temp_dir = tempfile::tempdir().unwrap();
631        let config_path = temp_dir.path().join(filename).with_extension(EXTENSION);
632
633        proc(&config_path);
634
635        temp_dir.close().unwrap()
636    }
637
638    /// Run a test function with a temporary config path as fixture.
639    fn with_config_path(test_fn: fn(&Path)) {
640        // Create a temporary directory for the config file
641        let config_dir = tempfile::tempdir().expect("creating test fixture failed");
642        // Create the config file path
643        let config_path =
644            config_dir.path().join("example-app").join("example-config").with_extension("toml");
645        // Run the test function with the config path
646        test_fn(&config_path);
647        config_dir.close().expect("removing test fixture failed");
648    }
649
650    #[test]
651    fn test_load_path_works() {
652        with_config_path(|path| {
653            let config = Config::from_path(path).expect("load_path failed");
654            assert_eq!(config, Config::default());
655        })
656    }
657
658    #[test]
659    fn test_load_path_reads_existing_config() {
660        with_config_path(|path| {
661            let config = Config::default();
662
663            // Create the parent directory if it doesn't exist
664            if let Some(parent) = path.parent() {
665                std::fs::create_dir_all(parent).expect("Failed to create directories");
666            }
667
668            // Write the config to the file
669            std::fs::write(path, toml::to_string(&config).unwrap())
670                .expect("Failed to write config");
671
672            // Load the config from the file and compare it
673            let loaded = Config::from_path(path).expect("load_path failed");
674            assert_eq!(config, loaded);
675        })
676    }
677
678    #[test]
679    fn test_load_path_fails_on_invalid_toml() {
680        with_config_path(|path| {
681            let invalid_toml = "invalid toml data";
682
683            // Create the parent directory if it doesn't exist
684            if let Some(parent) = path.parent() {
685                std::fs::create_dir_all(parent).expect("Failed to create directories");
686            }
687
688            // Write invalid TOML data to the file
689            std::fs::write(path, invalid_toml).expect("Failed to write invalid TOML");
690
691            // Attempt to load the config should fail
692            let result = Config::from_path(path);
693            assert!(result.is_err());
694        })
695    }
696
697    #[test]
698    fn test_load_path_creates_directory_if_not_exists() {
699        with_config_path(|path| {
700            // Ensure the directory does not exist
701            let parent = path.parent().unwrap();
702            assert!(!parent.exists());
703
704            // Load the configuration, which should create the directory and a default config file
705            let config = Config::from_path(path).expect("load_path failed");
706            assert_eq!(config, Config::default());
707
708            // The directory and file should now exist
709            assert!(parent.exists());
710            assert!(path.exists());
711        });
712    }
713
714    #[test]
715    fn test_store_config() {
716        with_tempdir("config-store-test", |config_path| {
717            let config = Config::default();
718            std::fs::write(
719                config_path,
720                toml::to_string(&config).expect("Failed to serialize config"),
721            )
722            .expect("Failed to write config file");
723        })
724    }
725
726    #[test]
727    fn test_store_config_method() {
728        with_tempdir("config-store-test-method", |config_path| {
729            let config = Config::default();
730            config.save(config_path).expect("Failed to store config");
731        })
732    }
733
734    #[test]
735    fn test_load_config() {
736        with_tempdir("config-load-test", |config_path| {
737            let config = Config::default();
738
739            // Write the config to a file
740            std::fs::write(
741                config_path,
742                toml::to_string(&config).expect("Failed to serialize config"),
743            )
744            .expect("Failed to write config file");
745
746            // Load the config from the file
747            let loaded_config = Config::from_path(config_path).unwrap();
748
749            // Compare the loaded config with the original config
750            assert_eq!(config, loaded_config);
751        })
752    }
753
754    #[test]
755    fn test_load_execution_stage() {
756        with_tempdir("config-load-test", |config_path| {
757            let mut config = Config::default();
758            config.stages.execution.max_duration = Some(Duration::from_secs(10 * 60));
759
760            // Write the config to a file
761            std::fs::write(
762                config_path,
763                toml::to_string(&config).expect("Failed to serialize config"),
764            )
765            .expect("Failed to write config file");
766
767            // Load the config from the file
768            let loaded_config = Config::from_path(config_path).unwrap();
769
770            // Compare the loaded config with the original config
771            assert_eq!(config, loaded_config);
772        })
773    }
774
775    // ensures config deserialization is backwards compatible
776    #[test]
777    fn test_backwards_compatibility() {
778        let alpha_0_0_8 = r"#
779[stages.headers]
780downloader_max_concurrent_requests = 100
781downloader_min_concurrent_requests = 5
782downloader_max_buffered_responses = 100
783downloader_request_limit = 1000
784commit_threshold = 10000
785
786[stages.bodies]
787downloader_request_limit = 200
788downloader_stream_batch_size = 1000
789downloader_max_buffered_blocks_size_bytes = 2147483648
790downloader_min_concurrent_requests = 5
791downloader_max_concurrent_requests = 100
792
793[stages.sender_recovery]
794commit_threshold = 5000000
795
796[stages.execution]
797max_blocks = 500000
798max_changes = 5000000
799
800[stages.account_hashing]
801clean_threshold = 500000
802commit_threshold = 100000
803
804[stages.storage_hashing]
805clean_threshold = 500000
806commit_threshold = 100000
807
808[stages.merkle]
809clean_threshold = 50000
810
811[stages.transaction_lookup]
812chunk_size = 5000000
813
814[stages.index_account_history]
815commit_threshold = 100000
816
817[stages.index_storage_history]
818commit_threshold = 100000
819
820[peers]
821refill_slots_interval = '1s'
822trusted_nodes = []
823connect_trusted_nodes_only = false
824max_backoff_count = 5
825ban_duration = '12h'
826
827[peers.connection_info]
828max_outbound = 100
829max_inbound = 30
830
831[peers.reputation_weights]
832bad_message = -16384
833bad_block = -16384
834bad_transactions = -16384
835already_seen_transactions = 0
836timeout = -4096
837bad_protocol = -2147483648
838failed_to_connect = -25600
839dropped = -4096
840
841[peers.backoff_durations]
842low = '30s'
843medium = '3m'
844high = '15m'
845max = '1h'
846
847[sessions]
848session_command_buffer = 32
849session_event_buffer = 260
850
851[sessions.limits]
852
853[sessions.initial_internal_request_timeout]
854secs = 20
855nanos = 0
856
857[sessions.protocol_breach_request_timeout]
858secs = 120
859nanos = 0
860
861[prune]
862block_interval = 5
863
864[prune.parts]
865sender_recovery = { distance = 16384 }
866transaction_lookup = 'full'
867receipts = { before = 1920000 }
868account_history = { distance = 16384 }
869storage_history = { distance = 16384 }
870[prune.parts.receipts_log_filter]
871'0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48' = { before = 17000000 }
872'0xdac17f958d2ee523a2206206994597c13d831ec7' = { distance = 1000 }
873#";
874        let _conf: Config = toml::from_str(alpha_0_0_8).unwrap();
875
876        let alpha_0_0_11 = r"#
877[prune.segments]
878sender_recovery = { distance = 16384 }
879transaction_lookup = 'full'
880receipts = { before = 1920000 }
881account_history = { distance = 16384 }
882storage_history = { distance = 16384 }
883[prune.segments.receipts_log_filter]
884'0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48' = { before = 17000000 }
885'0xdac17f958d2ee523a2206206994597c13d831ec7' = { distance = 1000 }
886#";
887        let _conf: Config = toml::from_str(alpha_0_0_11).unwrap();
888
889        let alpha_0_0_18 = r"#
890[stages.headers]
891downloader_max_concurrent_requests = 100
892downloader_min_concurrent_requests = 5
893downloader_max_buffered_responses = 100
894downloader_request_limit = 1000
895commit_threshold = 10000
896
897[stages.total_difficulty]
898commit_threshold = 100000
899
900[stages.bodies]
901downloader_request_limit = 200
902downloader_stream_batch_size = 1000
903downloader_max_buffered_blocks_size_bytes = 2147483648
904downloader_min_concurrent_requests = 5
905downloader_max_concurrent_requests = 100
906
907[stages.sender_recovery]
908commit_threshold = 5000000
909
910[stages.execution]
911max_blocks = 500000
912max_changes = 5000000
913max_cumulative_gas = 1500000000000
914[stages.execution.max_duration]
915secs = 600
916nanos = 0
917
918[stages.account_hashing]
919clean_threshold = 500000
920commit_threshold = 100000
921
922[stages.storage_hashing]
923clean_threshold = 500000
924commit_threshold = 100000
925
926[stages.merkle]
927clean_threshold = 50000
928
929[stages.transaction_lookup]
930commit_threshold = 5000000
931
932[stages.index_account_history]
933commit_threshold = 100000
934
935[stages.index_storage_history]
936commit_threshold = 100000
937
938[peers]
939refill_slots_interval = '5s'
940trusted_nodes = []
941connect_trusted_nodes_only = false
942max_backoff_count = 5
943ban_duration = '12h'
944
945[peers.connection_info]
946max_outbound = 100
947max_inbound = 30
948max_concurrent_outbound_dials = 10
949
950[peers.reputation_weights]
951bad_message = -16384
952bad_block = -16384
953bad_transactions = -16384
954already_seen_transactions = 0
955timeout = -4096
956bad_protocol = -2147483648
957failed_to_connect = -25600
958dropped = -4096
959bad_announcement = -1024
960
961[peers.backoff_durations]
962low = '30s'
963medium = '3m'
964high = '15m'
965max = '1h'
966
967[sessions]
968session_command_buffer = 32
969session_event_buffer = 260
970
971[sessions.limits]
972
973[sessions.initial_internal_request_timeout]
974secs = 20
975nanos = 0
976
977[sessions.protocol_breach_request_timeout]
978secs = 120
979nanos = 0
980#";
981        let conf: Config = toml::from_str(alpha_0_0_18).unwrap();
982        assert_eq!(conf.stages.execution.max_duration, Some(Duration::from_secs(10 * 60)));
983
984        let alpha_0_0_19 = r"#
985[stages.headers]
986downloader_max_concurrent_requests = 100
987downloader_min_concurrent_requests = 5
988downloader_max_buffered_responses = 100
989downloader_request_limit = 1000
990commit_threshold = 10000
991
992[stages.total_difficulty]
993commit_threshold = 100000
994
995[stages.bodies]
996downloader_request_limit = 200
997downloader_stream_batch_size = 1000
998downloader_max_buffered_blocks_size_bytes = 2147483648
999downloader_min_concurrent_requests = 5
1000downloader_max_concurrent_requests = 100
1001
1002[stages.sender_recovery]
1003commit_threshold = 5000000
1004
1005[stages.execution]
1006max_blocks = 500000
1007max_changes = 5000000
1008max_cumulative_gas = 1500000000000
1009max_duration = '10m'
1010
1011[stages.account_hashing]
1012clean_threshold = 500000
1013commit_threshold = 100000
1014
1015[stages.storage_hashing]
1016clean_threshold = 500000
1017commit_threshold = 100000
1018
1019[stages.merkle]
1020clean_threshold = 50000
1021
1022[stages.transaction_lookup]
1023commit_threshold = 5000000
1024
1025[stages.index_account_history]
1026commit_threshold = 100000
1027
1028[stages.index_storage_history]
1029commit_threshold = 100000
1030
1031[peers]
1032refill_slots_interval = '5s'
1033trusted_nodes = []
1034connect_trusted_nodes_only = false
1035max_backoff_count = 5
1036ban_duration = '12h'
1037
1038[peers.connection_info]
1039max_outbound = 100
1040max_inbound = 30
1041max_concurrent_outbound_dials = 10
1042
1043[peers.reputation_weights]
1044bad_message = -16384
1045bad_block = -16384
1046bad_transactions = -16384
1047already_seen_transactions = 0
1048timeout = -4096
1049bad_protocol = -2147483648
1050failed_to_connect = -25600
1051dropped = -4096
1052bad_announcement = -1024
1053
1054[peers.backoff_durations]
1055low = '30s'
1056medium = '3m'
1057high = '15m'
1058max = '1h'
1059
1060[sessions]
1061session_command_buffer = 32
1062session_event_buffer = 260
1063
1064[sessions.limits]
1065
1066[sessions.initial_internal_request_timeout]
1067secs = 20
1068nanos = 0
1069
1070[sessions.protocol_breach_request_timeout]
1071secs = 120
1072nanos = 0
1073#";
1074        let _conf: Config = toml::from_str(alpha_0_0_19).unwrap();
1075    }
1076
1077    // ensures prune config deserialization is backwards compatible
1078    #[test]
1079    fn test_backwards_compatibility_prune_full() {
1080        let s = r"#
1081[prune]
1082block_interval = 5
1083
1084[prune.segments]
1085sender_recovery = { distance = 16384 }
1086transaction_lookup = 'full'
1087receipts = { distance = 16384 }
1088#";
1089        let _conf: Config = toml::from_str(s).unwrap();
1090    }
1091
1092    #[test]
1093    fn test_prune_config_merge() {
1094        let mut config1 = PruneConfig {
1095            block_interval: 5,
1096            segments: PruneModes {
1097                sender_recovery: Some(PruneMode::Full),
1098                transaction_lookup: None,
1099                receipts: Some(PruneMode::Distance(1000)),
1100                account_history: None,
1101                storage_history: Some(PruneMode::Before(5000)),
1102                bodies_history: None,
1103                receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([(
1104                    Address::random(),
1105                    PruneMode::Full,
1106                )])),
1107            },
1108        };
1109
1110        let config2 = PruneConfig {
1111            block_interval: 10,
1112            segments: PruneModes {
1113                sender_recovery: Some(PruneMode::Distance(500)),
1114                transaction_lookup: Some(PruneMode::Full),
1115                receipts: Some(PruneMode::Full),
1116                account_history: Some(PruneMode::Distance(2000)),
1117                storage_history: Some(PruneMode::Distance(3000)),
1118                bodies_history: None,
1119                receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([
1120                    (Address::random(), PruneMode::Distance(1000)),
1121                    (Address::random(), PruneMode::Before(2000)),
1122                ])),
1123            },
1124        };
1125
1126        let original_filter = config1.segments.receipts_log_filter.clone();
1127        config1.merge(config2);
1128
1129        // Check that the configuration has been merged. Any configuration present in config1
1130        // should not be overwritten by config2
1131        assert_eq!(config1.block_interval, 10);
1132        assert_eq!(config1.segments.sender_recovery, Some(PruneMode::Full));
1133        assert_eq!(config1.segments.transaction_lookup, Some(PruneMode::Full));
1134        assert_eq!(config1.segments.receipts, Some(PruneMode::Distance(1000)));
1135        assert_eq!(config1.segments.account_history, Some(PruneMode::Distance(2000)));
1136        assert_eq!(config1.segments.storage_history, Some(PruneMode::Before(5000)));
1137        assert_eq!(config1.segments.receipts_log_filter, original_filter);
1138    }
1139
1140    #[test]
1141    fn test_conf_trust_nodes_only() {
1142        let trusted_nodes_only = r"#
1143[peers]
1144trusted_nodes_only = true
1145#";
1146        let conf: Config = toml::from_str(trusted_nodes_only).unwrap();
1147        assert!(conf.peers.trusted_nodes_only);
1148
1149        let trusted_nodes_only = r"#
1150[peers]
1151connect_trusted_nodes_only = true
1152#";
1153        let conf: Config = toml::from_str(trusted_nodes_only).unwrap();
1154        assert!(conf.peers.trusted_nodes_only);
1155    }
1156
1157    #[test]
1158    fn test_can_support_dns_in_trusted_nodes() {
1159        let reth_toml = r#"
1160    [peers]
1161    trusted_nodes = [
1162        "enode://0401e494dbd0c84c5c0f72adac5985d2f2525e08b68d448958aae218f5ac8198a80d1498e0ebec2ce38b1b18d6750f6e61a56b4614c5a6c6cf0981c39aed47dc@34.159.32.127:30303",
1163        "enode://e9675164b5e17b9d9edf0cc2bd79e6b6f487200c74d1331c220abb5b8ee80c2eefbf18213989585e9d0960683e819542e11d4eefb5f2b4019e1e49f9fd8fff18@berav2-bootnode.staketab.org:30303"
1164    ]
1165    "#;
1166
1167        let conf: Config = toml::from_str(reth_toml).unwrap();
1168        assert_eq!(conf.peers.trusted_nodes.len(), 2);
1169
1170        let expected_enodes = vec![
1171            "enode://0401e494dbd0c84c5c0f72adac5985d2f2525e08b68d448958aae218f5ac8198a80d1498e0ebec2ce38b1b18d6750f6e61a56b4614c5a6c6cf0981c39aed47dc@34.159.32.127:30303",
1172            "enode://e9675164b5e17b9d9edf0cc2bd79e6b6f487200c74d1331c220abb5b8ee80c2eefbf18213989585e9d0960683e819542e11d4eefb5f2b4019e1e49f9fd8fff18@berav2-bootnode.staketab.org:30303",
1173        ];
1174
1175        for enode in expected_enodes {
1176            let node = TrustedPeer::from_str(enode).unwrap();
1177            assert!(conf.peers.trusted_nodes.contains(&node));
1178        }
1179    }
1180}