reth_config/
config.rs

1//! Configuration files.
2use reth_network_types::{PeersConfig, SessionsConfig};
3use reth_prune_types::PruneModes;
4use reth_stages_types::ExecutionStageThresholds;
5use reth_static_file_types::StaticFileSegment;
6use std::{
7    collections::HashMap,
8    path::{Path, PathBuf},
9    time::Duration,
10};
11use url::Url;
12
13#[cfg(feature = "serde")]
14const EXTENSION: &str = "toml";
15
16/// The default prune block interval
17pub const DEFAULT_BLOCK_INTERVAL: usize = 5;
18
19/// Configuration for the reth node.
20#[derive(Debug, Clone, Default, PartialEq, Eq)]
21#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
22#[cfg_attr(feature = "serde", serde(default))]
23pub struct Config {
24    /// Configuration for each stage in the pipeline.
25    // TODO(onbjerg): Can we make this easier to maintain when we add/remove stages?
26    pub stages: StageConfig,
27    /// Configuration for pruning.
28    #[cfg_attr(feature = "serde", serde(default))]
29    pub prune: PruneConfig,
30    /// Configuration for the discovery service.
31    pub peers: PeersConfig,
32    /// Configuration for peer sessions.
33    pub sessions: SessionsConfig,
34    /// Configuration for static files.
35    #[cfg_attr(feature = "serde", serde(default))]
36    pub static_files: StaticFilesConfig,
37}
38
39impl Config {
40    /// Sets the pruning configuration.
41    pub fn set_prune_config(&mut self, prune_config: PruneConfig) {
42        self.prune = prune_config;
43    }
44}
45
46#[cfg(feature = "serde")]
47impl Config {
48    /// Load a [`Config`] from a specified path.
49    ///
50    /// A new configuration file is created with default values if none
51    /// exists.
52    pub fn from_path(path: impl AsRef<Path>) -> eyre::Result<Self> {
53        let path = path.as_ref();
54        match std::fs::read_to_string(path) {
55            Ok(cfg_string) => {
56                toml::from_str(&cfg_string).map_err(|e| eyre::eyre!("Failed to parse TOML: {e}"))
57            }
58            Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
59                if let Some(parent) = path.parent() {
60                    std::fs::create_dir_all(parent)
61                        .map_err(|e| eyre::eyre!("Failed to create directory: {e}"))?;
62                }
63                let cfg = Self::default();
64                let s = toml::to_string_pretty(&cfg)
65                    .map_err(|e| eyre::eyre!("Failed to serialize to TOML: {e}"))?;
66                std::fs::write(path, s)
67                    .map_err(|e| eyre::eyre!("Failed to write configuration file: {e}"))?;
68                Ok(cfg)
69            }
70            Err(e) => Err(eyre::eyre!("Failed to load configuration: {e}")),
71        }
72    }
73
74    /// Returns the [`PeersConfig`] for the node.
75    ///
76    /// If a peers file is provided, the basic nodes from the file are added to the configuration.
77    pub fn peers_config_with_basic_nodes_from_file(
78        &self,
79        peers_file: Option<&Path>,
80    ) -> PeersConfig {
81        self.peers
82            .clone()
83            .with_basic_nodes_from_file(peers_file)
84            .unwrap_or_else(|_| self.peers.clone())
85    }
86
87    /// Save the configuration to toml file.
88    pub fn save(&self, path: &Path) -> Result<(), std::io::Error> {
89        if path.extension() != Some(std::ffi::OsStr::new(EXTENSION)) {
90            return Err(std::io::Error::new(
91                std::io::ErrorKind::InvalidInput,
92                format!("reth config file extension must be '{EXTENSION}'"),
93            ));
94        }
95
96        std::fs::write(
97            path,
98            toml::to_string(self)
99                .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e.to_string()))?,
100        )
101    }
102}
103
104/// Configuration for each stage in the pipeline.
105#[derive(Debug, Clone, Default, PartialEq, Eq)]
106#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
107#[cfg_attr(feature = "serde", serde(default))]
108pub struct StageConfig {
109    /// ERA stage configuration.
110    pub era: EraConfig,
111    /// Header stage configuration.
112    pub headers: HeadersConfig,
113    /// Body stage configuration.
114    pub bodies: BodiesConfig,
115    /// Sender Recovery stage configuration.
116    pub sender_recovery: SenderRecoveryConfig,
117    /// Execution stage configuration.
118    pub execution: ExecutionConfig,
119    /// Prune stage configuration.
120    pub prune: PruneStageConfig,
121    /// Account Hashing stage configuration.
122    pub account_hashing: HashingConfig,
123    /// Storage Hashing stage configuration.
124    pub storage_hashing: HashingConfig,
125    /// Merkle stage configuration.
126    pub merkle: MerkleConfig,
127    /// Transaction Lookup stage configuration.
128    pub transaction_lookup: TransactionLookupConfig,
129    /// Index Account History stage configuration.
130    pub index_account_history: IndexHistoryConfig,
131    /// Index Storage History stage configuration.
132    pub index_storage_history: IndexHistoryConfig,
133    /// Common ETL related configuration.
134    pub etl: EtlConfig,
135}
136
137impl StageConfig {
138    /// The highest threshold (in number of blocks) for switching between incremental and full
139    /// calculations across `MerkleStage`, `AccountHashingStage` and `StorageHashingStage`. This is
140    /// required to figure out if can prune or not changesets on subsequent pipeline runs during
141    /// `ExecutionStage`
142    pub fn execution_external_clean_threshold(&self) -> u64 {
143        self.merkle
144            .incremental_threshold
145            .max(self.account_hashing.clean_threshold)
146            .max(self.storage_hashing.clean_threshold)
147    }
148}
149
150/// ERA stage configuration.
151#[derive(Debug, Clone, Default, PartialEq, Eq)]
152#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
153#[cfg_attr(feature = "serde", serde(default))]
154pub struct EraConfig {
155    /// Path to a local directory where ERA1 files are located.
156    ///
157    /// Conflicts with `url`.
158    pub path: Option<PathBuf>,
159    /// The base URL of an ERA1 file host to download from.
160    ///
161    /// Conflicts with `path`.
162    pub url: Option<Url>,
163    /// Path to a directory where files downloaded from `url` will be stored until processed.
164    ///
165    /// Required for `url`.
166    pub folder: Option<PathBuf>,
167}
168
169impl EraConfig {
170    /// Sets `folder` for temporary downloads as a directory called "era" inside `dir`.
171    pub fn with_datadir(mut self, dir: impl AsRef<Path>) -> Self {
172        self.folder = Some(dir.as_ref().join("era"));
173        self
174    }
175}
176
177/// Header stage configuration.
178#[derive(Debug, Clone, Copy, PartialEq, Eq)]
179#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
180#[cfg_attr(feature = "serde", serde(default))]
181pub struct HeadersConfig {
182    /// The maximum number of requests to send concurrently.
183    ///
184    /// Default: 100
185    pub downloader_max_concurrent_requests: usize,
186    /// The minimum number of requests to send concurrently.
187    ///
188    /// Default: 5
189    pub downloader_min_concurrent_requests: usize,
190    /// Maximum amount of responses to buffer internally.
191    /// The response contains multiple headers.
192    pub downloader_max_buffered_responses: usize,
193    /// The maximum number of headers to request from a peer at a time.
194    pub downloader_request_limit: u64,
195    /// The maximum number of headers to download before committing progress to the database.
196    pub commit_threshold: u64,
197}
198
199impl Default for HeadersConfig {
200    fn default() -> Self {
201        Self {
202            commit_threshold: 10_000,
203            downloader_request_limit: 1_000,
204            downloader_max_concurrent_requests: 100,
205            downloader_min_concurrent_requests: 5,
206            downloader_max_buffered_responses: 100,
207        }
208    }
209}
210
211/// Body stage configuration.
212#[derive(Debug, Clone, Copy, PartialEq, Eq)]
213#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
214#[cfg_attr(feature = "serde", serde(default))]
215pub struct BodiesConfig {
216    /// The batch size of non-empty blocks per one request
217    ///
218    /// Default: 200
219    pub downloader_request_limit: u64,
220    /// The maximum number of block bodies returned at once from the stream
221    ///
222    /// Default: `1_000`
223    pub downloader_stream_batch_size: usize,
224    /// The size of the internal block buffer in bytes.
225    ///
226    /// Default: 2GB
227    pub downloader_max_buffered_blocks_size_bytes: usize,
228    /// The minimum number of requests to send concurrently.
229    ///
230    /// Default: 5
231    pub downloader_min_concurrent_requests: usize,
232    /// The maximum number of requests to send concurrently.
233    /// This is equal to the max number of peers.
234    ///
235    /// Default: 100
236    pub downloader_max_concurrent_requests: usize,
237}
238
239impl Default for BodiesConfig {
240    fn default() -> Self {
241        Self {
242            downloader_request_limit: 200,
243            downloader_stream_batch_size: 1_000,
244            downloader_max_buffered_blocks_size_bytes: 2 * 1024 * 1024 * 1024, // ~2GB
245            downloader_min_concurrent_requests: 5,
246            downloader_max_concurrent_requests: 100,
247        }
248    }
249}
250
251/// Sender recovery stage configuration.
252#[derive(Debug, Clone, Copy, PartialEq, Eq)]
253#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
254#[cfg_attr(feature = "serde", serde(default))]
255pub struct SenderRecoveryConfig {
256    /// The maximum number of transactions to process before committing progress to the database.
257    pub commit_threshold: u64,
258}
259
260impl Default for SenderRecoveryConfig {
261    fn default() -> Self {
262        Self { commit_threshold: 5_000_000 }
263    }
264}
265
266/// Execution stage configuration.
267#[derive(Debug, Clone, Copy, PartialEq, Eq)]
268#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
269#[cfg_attr(feature = "serde", serde(default))]
270pub struct ExecutionConfig {
271    /// The maximum number of blocks to process before the execution stage commits.
272    pub max_blocks: Option<u64>,
273    /// The maximum number of state changes to keep in memory before the execution stage commits.
274    pub max_changes: Option<u64>,
275    /// The maximum cumulative amount of gas to process before the execution stage commits.
276    pub max_cumulative_gas: Option<u64>,
277    /// The maximum time spent on blocks processing before the execution stage commits.
278    #[cfg_attr(
279        feature = "serde",
280        serde(
281            serialize_with = "humantime_serde::serialize",
282            deserialize_with = "deserialize_duration"
283        )
284    )]
285    pub max_duration: Option<Duration>,
286}
287
288impl Default for ExecutionConfig {
289    fn default() -> Self {
290        Self {
291            max_blocks: Some(500_000),
292            max_changes: Some(5_000_000),
293            // 50k full blocks of 30M gas
294            max_cumulative_gas: Some(30_000_000 * 50_000),
295            // 10 minutes
296            max_duration: Some(Duration::from_secs(10 * 60)),
297        }
298    }
299}
300
301impl From<ExecutionConfig> for ExecutionStageThresholds {
302    fn from(config: ExecutionConfig) -> Self {
303        Self {
304            max_blocks: config.max_blocks,
305            max_changes: config.max_changes,
306            max_cumulative_gas: config.max_cumulative_gas,
307            max_duration: config.max_duration,
308        }
309    }
310}
311
312/// Prune stage configuration.
313#[derive(Debug, Clone, Copy, PartialEq, Eq)]
314#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
315#[cfg_attr(feature = "serde", serde(default))]
316pub struct PruneStageConfig {
317    /// The maximum number of entries to prune before committing progress to the database.
318    pub commit_threshold: usize,
319}
320
321impl Default for PruneStageConfig {
322    fn default() -> Self {
323        Self { commit_threshold: 1_000_000 }
324    }
325}
326
327/// Hashing stage configuration.
328#[derive(Debug, Clone, Copy, PartialEq, Eq)]
329#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
330#[cfg_attr(feature = "serde", serde(default))]
331pub struct HashingConfig {
332    /// The threshold (in number of blocks) for switching between
333    /// incremental hashing and full hashing.
334    pub clean_threshold: u64,
335    /// The maximum number of entities to process before committing progress to the database.
336    pub commit_threshold: u64,
337}
338
339impl Default for HashingConfig {
340    fn default() -> Self {
341        Self { clean_threshold: 500_000, commit_threshold: 100_000 }
342    }
343}
344
345/// Merkle stage configuration.
346#[derive(Debug, Clone, Copy, PartialEq, Eq)]
347#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
348#[cfg_attr(feature = "serde", serde(default))]
349pub struct MerkleConfig {
350    /// The number of blocks we will run the incremental root method for when we are catching up on
351    /// the merkle stage for a large number of blocks.
352    ///
353    /// When we are catching up for a large number of blocks, we can only run the incremental root
354    /// for a limited number of blocks, otherwise the incremental root method may cause the node to
355    /// OOM. This number determines how many blocks in a row we will run the incremental root
356    /// method for.
357    pub incremental_threshold: u64,
358    /// The threshold (in number of blocks) for switching from incremental trie building of changes
359    /// to whole rebuild.
360    pub rebuild_threshold: u64,
361}
362
363impl Default for MerkleConfig {
364    fn default() -> Self {
365        Self { incremental_threshold: 7_000, rebuild_threshold: 100_000 }
366    }
367}
368
369/// Transaction Lookup stage configuration.
370#[derive(Debug, Clone, Copy, PartialEq, Eq)]
371#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
372#[cfg_attr(feature = "serde", serde(default))]
373pub struct TransactionLookupConfig {
374    /// The maximum number of transactions to process before writing to disk.
375    pub chunk_size: u64,
376}
377
378impl Default for TransactionLookupConfig {
379    fn default() -> Self {
380        Self { chunk_size: 5_000_000 }
381    }
382}
383
384/// Common ETL related configuration.
385#[derive(Debug, Clone, PartialEq, Eq)]
386#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
387#[cfg_attr(feature = "serde", serde(default))]
388pub struct EtlConfig {
389    /// Data directory where temporary files are created.
390    pub dir: Option<PathBuf>,
391    /// The maximum size in bytes of data held in memory before being flushed to disk as a file.
392    pub file_size: usize,
393}
394
395impl Default for EtlConfig {
396    fn default() -> Self {
397        Self { dir: None, file_size: Self::default_file_size() }
398    }
399}
400
401impl EtlConfig {
402    /// Creates an ETL configuration
403    pub const fn new(dir: Option<PathBuf>, file_size: usize) -> Self {
404        Self { dir, file_size }
405    }
406
407    /// Return default ETL directory from datadir path.
408    pub fn from_datadir(path: &Path) -> PathBuf {
409        path.join("etl-tmp")
410    }
411
412    /// Default size in bytes of data held in memory before being flushed to disk as a file.
413    pub const fn default_file_size() -> usize {
414        // 500 MB
415        500 * (1024 * 1024)
416    }
417}
418
419/// Static files configuration.
420#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
421#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
422#[cfg_attr(feature = "serde", serde(default))]
423pub struct StaticFilesConfig {
424    /// Number of blocks per file for each segment.
425    pub blocks_per_file: BlocksPerFileConfig,
426}
427
428/// Configuration for the number of blocks per file for each segment.
429#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
430#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
431#[cfg_attr(feature = "serde", serde(default))]
432pub struct BlocksPerFileConfig {
433    /// Number of blocks per file for the headers segment.
434    pub headers: Option<u64>,
435    /// Number of blocks per file for the transactions segment.
436    pub transactions: Option<u64>,
437    /// Number of blocks per file for the receipts segment.
438    pub receipts: Option<u64>,
439    /// Number of blocks per file for the transaction senders segment.
440    pub transaction_senders: Option<u64>,
441}
442
443impl StaticFilesConfig {
444    /// Validates the static files configuration.
445    ///
446    /// Returns an error if any blocks per file value is zero.
447    pub fn validate(&self) -> eyre::Result<()> {
448        let BlocksPerFileConfig { headers, transactions, receipts, transaction_senders } =
449            self.blocks_per_file;
450        eyre::ensure!(headers != Some(0), "Headers segment blocks per file must be greater than 0");
451        eyre::ensure!(
452            transactions != Some(0),
453            "Transactions segment blocks per file must be greater than 0"
454        );
455        eyre::ensure!(
456            receipts != Some(0),
457            "Receipts segment blocks per file must be greater than 0"
458        );
459        eyre::ensure!(
460            transaction_senders != Some(0),
461            "Transaction senders segment blocks per file must be greater than 0"
462        );
463        Ok(())
464    }
465
466    /// Converts the blocks per file configuration into a [`HashMap`] per segment.
467    pub fn as_blocks_per_file_map(&self) -> HashMap<StaticFileSegment, u64> {
468        let BlocksPerFileConfig { headers, transactions, receipts, transaction_senders } =
469            self.blocks_per_file;
470
471        let mut map = HashMap::new();
472        // Iterating over all possible segments allows us to do an exhaustive match here,
473        // to not forget to configure new segments in the future.
474        for segment in StaticFileSegment::iter() {
475            let blocks_per_file = match segment {
476                StaticFileSegment::Headers => headers,
477                StaticFileSegment::Transactions => transactions,
478                StaticFileSegment::Receipts => receipts,
479                StaticFileSegment::TransactionSenders => transaction_senders,
480            };
481
482            if let Some(blocks_per_file) = blocks_per_file {
483                map.insert(segment, blocks_per_file);
484            }
485        }
486        map
487    }
488}
489
490/// History stage configuration.
491#[derive(Debug, Clone, Copy, PartialEq, Eq)]
492#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
493#[cfg_attr(feature = "serde", serde(default))]
494pub struct IndexHistoryConfig {
495    /// The maximum number of blocks to process before committing progress to the database.
496    pub commit_threshold: u64,
497}
498
499impl Default for IndexHistoryConfig {
500    fn default() -> Self {
501        Self { commit_threshold: 100_000 }
502    }
503}
504
505/// Pruning configuration.
506#[derive(Debug, Clone, PartialEq, Eq)]
507#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
508#[cfg_attr(feature = "serde", serde(default))]
509pub struct PruneConfig {
510    /// Minimum pruning interval measured in blocks.
511    pub block_interval: usize,
512    /// Pruning configuration for every part of the data that can be pruned.
513    #[cfg_attr(feature = "serde", serde(alias = "parts"))]
514    pub segments: PruneModes,
515}
516
517impl Default for PruneConfig {
518    fn default() -> Self {
519        Self { block_interval: DEFAULT_BLOCK_INTERVAL, segments: PruneModes::default() }
520    }
521}
522
523impl PruneConfig {
524    /// Returns whether this configuration is the default one.
525    pub fn is_default(&self) -> bool {
526        self == &Self::default()
527    }
528
529    /// Returns whether there is any kind of receipt pruning configuration.
530    pub fn has_receipts_pruning(&self) -> bool {
531        self.segments.receipts.is_some() || !self.segments.receipts_log_filter.is_empty()
532    }
533
534    /// Merges another `PruneConfig` into this one, taking values from the other config if and only
535    /// if the corresponding value in this config is not set.
536    pub fn merge(&mut self, other: Self) {
537        let Self {
538            block_interval,
539            segments:
540                PruneModes {
541                    sender_recovery,
542                    transaction_lookup,
543                    receipts,
544                    account_history,
545                    storage_history,
546                    bodies_history,
547                    merkle_changesets,
548                    receipts_log_filter,
549                },
550        } = other;
551
552        // Merge block_interval, only update if it's the default interval
553        if self.block_interval == DEFAULT_BLOCK_INTERVAL {
554            self.block_interval = block_interval;
555        }
556
557        // Merge the various segment prune modes
558        self.segments.sender_recovery = self.segments.sender_recovery.or(sender_recovery);
559        self.segments.transaction_lookup = self.segments.transaction_lookup.or(transaction_lookup);
560        self.segments.receipts = self.segments.receipts.or(receipts);
561        self.segments.account_history = self.segments.account_history.or(account_history);
562        self.segments.storage_history = self.segments.storage_history.or(storage_history);
563        self.segments.bodies_history = self.segments.bodies_history.or(bodies_history);
564        // Merkle changesets is not optional, so we just replace it if provided
565        self.segments.merkle_changesets = merkle_changesets;
566
567        if self.segments.receipts_log_filter.0.is_empty() && !receipts_log_filter.0.is_empty() {
568            self.segments.receipts_log_filter = receipts_log_filter;
569        }
570    }
571}
572
573/// Helper type to support older versions of Duration deserialization.
574#[cfg(feature = "serde")]
575fn deserialize_duration<'de, D>(deserializer: D) -> Result<Option<Duration>, D::Error>
576where
577    D: serde::de::Deserializer<'de>,
578{
579    #[derive(serde::Deserialize)]
580    #[serde(untagged)]
581    enum AnyDuration {
582        #[serde(deserialize_with = "humantime_serde::deserialize")]
583        Human(Option<Duration>),
584        Duration(Option<Duration>),
585    }
586
587    <AnyDuration as serde::Deserialize>::deserialize(deserializer).map(|d| match d {
588        AnyDuration::Human(duration) | AnyDuration::Duration(duration) => duration,
589    })
590}
591
592#[cfg(all(test, feature = "serde"))]
593mod tests {
594    use super::{Config, EXTENSION};
595    use crate::PruneConfig;
596    use alloy_primitives::Address;
597    use reth_network_peers::TrustedPeer;
598    use reth_prune_types::{PruneMode, PruneModes, ReceiptsLogPruneConfig};
599    use std::{collections::BTreeMap, path::Path, str::FromStr, time::Duration};
600
601    fn with_tempdir(filename: &str, proc: fn(&std::path::Path)) {
602        let temp_dir = tempfile::tempdir().unwrap();
603        let config_path = temp_dir.path().join(filename).with_extension(EXTENSION);
604
605        proc(&config_path);
606
607        temp_dir.close().unwrap()
608    }
609
610    /// Run a test function with a temporary config path as fixture.
611    fn with_config_path(test_fn: fn(&Path)) {
612        // Create a temporary directory for the config file
613        let config_dir = tempfile::tempdir().expect("creating test fixture failed");
614        // Create the config file path
615        let config_path =
616            config_dir.path().join("example-app").join("example-config").with_extension("toml");
617        // Run the test function with the config path
618        test_fn(&config_path);
619        config_dir.close().expect("removing test fixture failed");
620    }
621
622    #[test]
623    fn test_load_path_works() {
624        with_config_path(|path| {
625            let config = Config::from_path(path).expect("load_path failed");
626            assert_eq!(config, Config::default());
627        })
628    }
629
630    #[test]
631    fn test_load_path_reads_existing_config() {
632        with_config_path(|path| {
633            let config = Config::default();
634
635            // Create the parent directory if it doesn't exist
636            if let Some(parent) = path.parent() {
637                std::fs::create_dir_all(parent).expect("Failed to create directories");
638            }
639
640            // Write the config to the file
641            std::fs::write(path, toml::to_string(&config).unwrap())
642                .expect("Failed to write config");
643
644            // Load the config from the file and compare it
645            let loaded = Config::from_path(path).expect("load_path failed");
646            assert_eq!(config, loaded);
647        })
648    }
649
650    #[test]
651    fn test_load_path_fails_on_invalid_toml() {
652        with_config_path(|path| {
653            let invalid_toml = "invalid toml data";
654
655            // Create the parent directory if it doesn't exist
656            if let Some(parent) = path.parent() {
657                std::fs::create_dir_all(parent).expect("Failed to create directories");
658            }
659
660            // Write invalid TOML data to the file
661            std::fs::write(path, invalid_toml).expect("Failed to write invalid TOML");
662
663            // Attempt to load the config should fail
664            let result = Config::from_path(path);
665            assert!(result.is_err());
666        })
667    }
668
669    #[test]
670    fn test_load_path_creates_directory_if_not_exists() {
671        with_config_path(|path| {
672            // Ensure the directory does not exist
673            let parent = path.parent().unwrap();
674            assert!(!parent.exists());
675
676            // Load the configuration, which should create the directory and a default config file
677            let config = Config::from_path(path).expect("load_path failed");
678            assert_eq!(config, Config::default());
679
680            // The directory and file should now exist
681            assert!(parent.exists());
682            assert!(path.exists());
683        });
684    }
685
686    #[test]
687    fn test_store_config() {
688        with_tempdir("config-store-test", |config_path| {
689            let config = Config::default();
690            std::fs::write(
691                config_path,
692                toml::to_string(&config).expect("Failed to serialize config"),
693            )
694            .expect("Failed to write config file");
695        })
696    }
697
698    #[test]
699    fn test_store_config_method() {
700        with_tempdir("config-store-test-method", |config_path| {
701            let config = Config::default();
702            config.save(config_path).expect("Failed to store config");
703        })
704    }
705
706    #[test]
707    fn test_load_config() {
708        with_tempdir("config-load-test", |config_path| {
709            let config = Config::default();
710
711            // Write the config to a file
712            std::fs::write(
713                config_path,
714                toml::to_string(&config).expect("Failed to serialize config"),
715            )
716            .expect("Failed to write config file");
717
718            // Load the config from the file
719            let loaded_config = Config::from_path(config_path).unwrap();
720
721            // Compare the loaded config with the original config
722            assert_eq!(config, loaded_config);
723        })
724    }
725
726    #[test]
727    fn test_load_execution_stage() {
728        with_tempdir("config-load-test", |config_path| {
729            let mut config = Config::default();
730            config.stages.execution.max_duration = Some(Duration::from_secs(10 * 60));
731
732            // Write the config to a file
733            std::fs::write(
734                config_path,
735                toml::to_string(&config).expect("Failed to serialize config"),
736            )
737            .expect("Failed to write config file");
738
739            // Load the config from the file
740            let loaded_config = Config::from_path(config_path).unwrap();
741
742            // Compare the loaded config with the original config
743            assert_eq!(config, loaded_config);
744        })
745    }
746
747    // ensures config deserialization is backwards compatible
748    #[test]
749    fn test_backwards_compatibility() {
750        let alpha_0_0_8 = r"#
751[stages.headers]
752downloader_max_concurrent_requests = 100
753downloader_min_concurrent_requests = 5
754downloader_max_buffered_responses = 100
755downloader_request_limit = 1000
756commit_threshold = 10000
757
758[stages.bodies]
759downloader_request_limit = 200
760downloader_stream_batch_size = 1000
761downloader_max_buffered_blocks_size_bytes = 2147483648
762downloader_min_concurrent_requests = 5
763downloader_max_concurrent_requests = 100
764
765[stages.sender_recovery]
766commit_threshold = 5000000
767
768[stages.execution]
769max_blocks = 500000
770max_changes = 5000000
771
772[stages.account_hashing]
773clean_threshold = 500000
774commit_threshold = 100000
775
776[stages.storage_hashing]
777clean_threshold = 500000
778commit_threshold = 100000
779
780[stages.merkle]
781clean_threshold = 50000
782
783[stages.transaction_lookup]
784chunk_size = 5000000
785
786[stages.index_account_history]
787commit_threshold = 100000
788
789[stages.index_storage_history]
790commit_threshold = 100000
791
792[peers]
793refill_slots_interval = '1s'
794trusted_nodes = []
795connect_trusted_nodes_only = false
796max_backoff_count = 5
797ban_duration = '12h'
798
799[peers.connection_info]
800max_outbound = 100
801max_inbound = 30
802
803[peers.reputation_weights]
804bad_message = -16384
805bad_block = -16384
806bad_transactions = -16384
807already_seen_transactions = 0
808timeout = -4096
809bad_protocol = -2147483648
810failed_to_connect = -25600
811dropped = -4096
812
813[peers.backoff_durations]
814low = '30s'
815medium = '3m'
816high = '15m'
817max = '1h'
818
819[sessions]
820session_command_buffer = 32
821session_event_buffer = 260
822
823[sessions.limits]
824
825[sessions.initial_internal_request_timeout]
826secs = 20
827nanos = 0
828
829[sessions.protocol_breach_request_timeout]
830secs = 120
831nanos = 0
832
833[prune]
834block_interval = 5
835
836[prune.parts]
837sender_recovery = { distance = 16384 }
838transaction_lookup = 'full'
839receipts = { before = 1920000 }
840account_history = { distance = 16384 }
841storage_history = { distance = 16384 }
842[prune.parts.receipts_log_filter]
843'0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48' = { before = 17000000 }
844'0xdac17f958d2ee523a2206206994597c13d831ec7' = { distance = 1000 }
845#";
846        let _conf: Config = toml::from_str(alpha_0_0_8).unwrap();
847
848        let alpha_0_0_11 = r"#
849[prune.segments]
850sender_recovery = { distance = 16384 }
851transaction_lookup = 'full'
852receipts = { before = 1920000 }
853account_history = { distance = 16384 }
854storage_history = { distance = 16384 }
855[prune.segments.receipts_log_filter]
856'0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48' = { before = 17000000 }
857'0xdac17f958d2ee523a2206206994597c13d831ec7' = { distance = 1000 }
858#";
859        let _conf: Config = toml::from_str(alpha_0_0_11).unwrap();
860
861        let alpha_0_0_18 = r"#
862[stages.headers]
863downloader_max_concurrent_requests = 100
864downloader_min_concurrent_requests = 5
865downloader_max_buffered_responses = 100
866downloader_request_limit = 1000
867commit_threshold = 10000
868
869[stages.total_difficulty]
870commit_threshold = 100000
871
872[stages.bodies]
873downloader_request_limit = 200
874downloader_stream_batch_size = 1000
875downloader_max_buffered_blocks_size_bytes = 2147483648
876downloader_min_concurrent_requests = 5
877downloader_max_concurrent_requests = 100
878
879[stages.sender_recovery]
880commit_threshold = 5000000
881
882[stages.execution]
883max_blocks = 500000
884max_changes = 5000000
885max_cumulative_gas = 1500000000000
886[stages.execution.max_duration]
887secs = 600
888nanos = 0
889
890[stages.account_hashing]
891clean_threshold = 500000
892commit_threshold = 100000
893
894[stages.storage_hashing]
895clean_threshold = 500000
896commit_threshold = 100000
897
898[stages.merkle]
899clean_threshold = 50000
900
901[stages.transaction_lookup]
902commit_threshold = 5000000
903
904[stages.index_account_history]
905commit_threshold = 100000
906
907[stages.index_storage_history]
908commit_threshold = 100000
909
910[peers]
911refill_slots_interval = '5s'
912trusted_nodes = []
913connect_trusted_nodes_only = false
914max_backoff_count = 5
915ban_duration = '12h'
916
917[peers.connection_info]
918max_outbound = 100
919max_inbound = 30
920max_concurrent_outbound_dials = 10
921
922[peers.reputation_weights]
923bad_message = -16384
924bad_block = -16384
925bad_transactions = -16384
926already_seen_transactions = 0
927timeout = -4096
928bad_protocol = -2147483648
929failed_to_connect = -25600
930dropped = -4096
931bad_announcement = -1024
932
933[peers.backoff_durations]
934low = '30s'
935medium = '3m'
936high = '15m'
937max = '1h'
938
939[sessions]
940session_command_buffer = 32
941session_event_buffer = 260
942
943[sessions.limits]
944
945[sessions.initial_internal_request_timeout]
946secs = 20
947nanos = 0
948
949[sessions.protocol_breach_request_timeout]
950secs = 120
951nanos = 0
952#";
953        let conf: Config = toml::from_str(alpha_0_0_18).unwrap();
954        assert_eq!(conf.stages.execution.max_duration, Some(Duration::from_secs(10 * 60)));
955
956        let alpha_0_0_19 = r"#
957[stages.headers]
958downloader_max_concurrent_requests = 100
959downloader_min_concurrent_requests = 5
960downloader_max_buffered_responses = 100
961downloader_request_limit = 1000
962commit_threshold = 10000
963
964[stages.total_difficulty]
965commit_threshold = 100000
966
967[stages.bodies]
968downloader_request_limit = 200
969downloader_stream_batch_size = 1000
970downloader_max_buffered_blocks_size_bytes = 2147483648
971downloader_min_concurrent_requests = 5
972downloader_max_concurrent_requests = 100
973
974[stages.sender_recovery]
975commit_threshold = 5000000
976
977[stages.execution]
978max_blocks = 500000
979max_changes = 5000000
980max_cumulative_gas = 1500000000000
981max_duration = '10m'
982
983[stages.account_hashing]
984clean_threshold = 500000
985commit_threshold = 100000
986
987[stages.storage_hashing]
988clean_threshold = 500000
989commit_threshold = 100000
990
991[stages.merkle]
992clean_threshold = 50000
993
994[stages.transaction_lookup]
995commit_threshold = 5000000
996
997[stages.index_account_history]
998commit_threshold = 100000
999
1000[stages.index_storage_history]
1001commit_threshold = 100000
1002
1003[peers]
1004refill_slots_interval = '5s'
1005trusted_nodes = []
1006connect_trusted_nodes_only = false
1007max_backoff_count = 5
1008ban_duration = '12h'
1009
1010[peers.connection_info]
1011max_outbound = 100
1012max_inbound = 30
1013max_concurrent_outbound_dials = 10
1014
1015[peers.reputation_weights]
1016bad_message = -16384
1017bad_block = -16384
1018bad_transactions = -16384
1019already_seen_transactions = 0
1020timeout = -4096
1021bad_protocol = -2147483648
1022failed_to_connect = -25600
1023dropped = -4096
1024bad_announcement = -1024
1025
1026[peers.backoff_durations]
1027low = '30s'
1028medium = '3m'
1029high = '15m'
1030max = '1h'
1031
1032[sessions]
1033session_command_buffer = 32
1034session_event_buffer = 260
1035
1036[sessions.limits]
1037
1038[sessions.initial_internal_request_timeout]
1039secs = 20
1040nanos = 0
1041
1042[sessions.protocol_breach_request_timeout]
1043secs = 120
1044nanos = 0
1045#";
1046        let _conf: Config = toml::from_str(alpha_0_0_19).unwrap();
1047    }
1048
1049    // ensures prune config deserialization is backwards compatible
1050    #[test]
1051    fn test_backwards_compatibility_prune_full() {
1052        let s = r"#
1053[prune]
1054block_interval = 5
1055
1056[prune.segments]
1057sender_recovery = { distance = 16384 }
1058transaction_lookup = 'full'
1059receipts = { distance = 16384 }
1060#";
1061        let _conf: Config = toml::from_str(s).unwrap();
1062
1063        let s = r"#
1064[prune]
1065block_interval = 5
1066
1067[prune.segments]
1068sender_recovery = { distance = 16384 }
1069transaction_lookup = 'full'
1070receipts = 'full'
1071#";
1072        let err = toml::from_str::<Config>(s).unwrap_err().to_string();
1073        assert!(err.contains("invalid value: string \"full\""), "{}", err);
1074    }
1075
1076    #[test]
1077    fn test_prune_config_merge() {
1078        let mut config1 = PruneConfig {
1079            block_interval: 5,
1080            segments: PruneModes {
1081                sender_recovery: Some(PruneMode::Full),
1082                transaction_lookup: None,
1083                receipts: Some(PruneMode::Distance(1000)),
1084                account_history: None,
1085                storage_history: Some(PruneMode::Before(5000)),
1086                bodies_history: None,
1087                merkle_changesets: PruneMode::Before(0),
1088                receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([(
1089                    Address::random(),
1090                    PruneMode::Full,
1091                )])),
1092            },
1093        };
1094
1095        let config2 = PruneConfig {
1096            block_interval: 10,
1097            segments: PruneModes {
1098                sender_recovery: Some(PruneMode::Distance(500)),
1099                transaction_lookup: Some(PruneMode::Full),
1100                receipts: Some(PruneMode::Full),
1101                account_history: Some(PruneMode::Distance(2000)),
1102                storage_history: Some(PruneMode::Distance(3000)),
1103                bodies_history: None,
1104                merkle_changesets: PruneMode::Distance(10000),
1105                receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([
1106                    (Address::random(), PruneMode::Distance(1000)),
1107                    (Address::random(), PruneMode::Before(2000)),
1108                ])),
1109            },
1110        };
1111
1112        let original_filter = config1.segments.receipts_log_filter.clone();
1113        config1.merge(config2);
1114
1115        // Check that the configuration has been merged. Any configuration present in config1
1116        // should not be overwritten by config2
1117        assert_eq!(config1.block_interval, 10);
1118        assert_eq!(config1.segments.sender_recovery, Some(PruneMode::Full));
1119        assert_eq!(config1.segments.transaction_lookup, Some(PruneMode::Full));
1120        assert_eq!(config1.segments.receipts, Some(PruneMode::Distance(1000)));
1121        assert_eq!(config1.segments.account_history, Some(PruneMode::Distance(2000)));
1122        assert_eq!(config1.segments.storage_history, Some(PruneMode::Before(5000)));
1123        assert_eq!(config1.segments.merkle_changesets, PruneMode::Distance(10000));
1124        assert_eq!(config1.segments.receipts_log_filter, original_filter);
1125    }
1126
1127    #[test]
1128    fn test_conf_trust_nodes_only() {
1129        let trusted_nodes_only = r"#
1130[peers]
1131trusted_nodes_only = true
1132#";
1133        let conf: Config = toml::from_str(trusted_nodes_only).unwrap();
1134        assert!(conf.peers.trusted_nodes_only);
1135
1136        let trusted_nodes_only = r"#
1137[peers]
1138connect_trusted_nodes_only = true
1139#";
1140        let conf: Config = toml::from_str(trusted_nodes_only).unwrap();
1141        assert!(conf.peers.trusted_nodes_only);
1142    }
1143
1144    #[test]
1145    fn test_can_support_dns_in_trusted_nodes() {
1146        let reth_toml = r#"
1147    [peers]
1148    trusted_nodes = [
1149        "enode://0401e494dbd0c84c5c0f72adac5985d2f2525e08b68d448958aae218f5ac8198a80d1498e0ebec2ce38b1b18d6750f6e61a56b4614c5a6c6cf0981c39aed47dc@34.159.32.127:30303",
1150        "enode://e9675164b5e17b9d9edf0cc2bd79e6b6f487200c74d1331c220abb5b8ee80c2eefbf18213989585e9d0960683e819542e11d4eefb5f2b4019e1e49f9fd8fff18@berav2-bootnode.staketab.org:30303"
1151    ]
1152    "#;
1153
1154        let conf: Config = toml::from_str(reth_toml).unwrap();
1155        assert_eq!(conf.peers.trusted_nodes.len(), 2);
1156
1157        let expected_enodes = vec![
1158            "enode://0401e494dbd0c84c5c0f72adac5985d2f2525e08b68d448958aae218f5ac8198a80d1498e0ebec2ce38b1b18d6750f6e61a56b4614c5a6c6cf0981c39aed47dc@34.159.32.127:30303",
1159            "enode://e9675164b5e17b9d9edf0cc2bd79e6b6f487200c74d1331c220abb5b8ee80c2eefbf18213989585e9d0960683e819542e11d4eefb5f2b4019e1e49f9fd8fff18@berav2-bootnode.staketab.org:30303",
1160        ];
1161
1162        for enode in expected_enodes {
1163            let node = TrustedPeer::from_str(enode).unwrap();
1164            assert!(conf.peers.trusted_nodes.contains(&node));
1165        }
1166    }
1167}