reth_config/
config.rs

1//! Configuration files.
2use reth_network_types::{PeersConfig, SessionsConfig};
3use reth_prune_types::PruneModes;
4use reth_stages_types::ExecutionStageThresholds;
5use reth_static_file_types::StaticFileSegment;
6use std::{
7    collections::HashMap,
8    path::{Path, PathBuf},
9    time::Duration,
10};
11use url::Url;
12
13#[cfg(feature = "serde")]
14const EXTENSION: &str = "toml";
15
16/// The default prune block interval
17pub const DEFAULT_BLOCK_INTERVAL: usize = 5;
18
19/// Configuration for the reth node.
20#[derive(Debug, Clone, Default, PartialEq, Eq)]
21#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
22#[cfg_attr(feature = "serde", serde(default))]
23pub struct Config {
24    /// Configuration for each stage in the pipeline.
25    pub stages: StageConfig,
26    /// Configuration for pruning.
27    #[cfg_attr(feature = "serde", serde(default))]
28    pub prune: PruneConfig,
29    /// Configuration for the discovery service.
30    pub peers: PeersConfig,
31    /// Configuration for peer sessions.
32    pub sessions: SessionsConfig,
33    /// Configuration for static files.
34    #[cfg_attr(feature = "serde", serde(default))]
35    pub static_files: StaticFilesConfig,
36}
37
38impl Config {
39    /// Sets the pruning configuration.
40    pub fn set_prune_config(&mut self, prune_config: PruneConfig) {
41        self.prune = prune_config;
42    }
43}
44
45#[cfg(feature = "serde")]
46impl Config {
47    /// Load a [`Config`] from a specified path.
48    ///
49    /// A new configuration file is created with default values if none
50    /// exists.
51    pub fn from_path(path: impl AsRef<Path>) -> eyre::Result<Self> {
52        let path = path.as_ref();
53        match std::fs::read_to_string(path) {
54            Ok(cfg_string) => {
55                toml::from_str(&cfg_string).map_err(|e| eyre::eyre!("Failed to parse TOML: {e}"))
56            }
57            Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
58                if let Some(parent) = path.parent() {
59                    std::fs::create_dir_all(parent)
60                        .map_err(|e| eyre::eyre!("Failed to create directory: {e}"))?;
61                }
62                let cfg = Self::default();
63                let s = toml::to_string_pretty(&cfg)
64                    .map_err(|e| eyre::eyre!("Failed to serialize to TOML: {e}"))?;
65                std::fs::write(path, s)
66                    .map_err(|e| eyre::eyre!("Failed to write configuration file: {e}"))?;
67                Ok(cfg)
68            }
69            Err(e) => Err(eyre::eyre!("Failed to load configuration: {e}")),
70        }
71    }
72
73    /// Returns the [`PeersConfig`] for the node.
74    ///
75    /// If a peers file is provided, the basic nodes from the file are added to the configuration.
76    pub fn peers_config_with_basic_nodes_from_file(
77        &self,
78        peers_file: Option<&Path>,
79    ) -> PeersConfig {
80        self.peers
81            .clone()
82            .with_basic_nodes_from_file(peers_file)
83            .unwrap_or_else(|_| self.peers.clone())
84    }
85
86    /// Save the configuration to toml file.
87    pub fn save(&self, path: &Path) -> Result<(), std::io::Error> {
88        if path.extension() != Some(std::ffi::OsStr::new(EXTENSION)) {
89            return Err(std::io::Error::new(
90                std::io::ErrorKind::InvalidInput,
91                format!("reth config file extension must be '{EXTENSION}'"),
92            ));
93        }
94
95        std::fs::write(
96            path,
97            toml::to_string(self)
98                .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e.to_string()))?,
99        )
100    }
101}
102
103/// Configuration for each stage in the pipeline.
104#[derive(Debug, Clone, Default, PartialEq, Eq)]
105#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
106#[cfg_attr(feature = "serde", serde(default))]
107pub struct StageConfig {
108    /// ERA stage configuration.
109    pub era: EraConfig,
110    /// Header stage configuration.
111    pub headers: HeadersConfig,
112    /// Body stage configuration.
113    pub bodies: BodiesConfig,
114    /// Sender Recovery stage configuration.
115    pub sender_recovery: SenderRecoveryConfig,
116    /// Execution stage configuration.
117    pub execution: ExecutionConfig,
118    /// Prune stage configuration.
119    pub prune: PruneStageConfig,
120    /// Account Hashing stage configuration.
121    pub account_hashing: HashingConfig,
122    /// Storage Hashing stage configuration.
123    pub storage_hashing: HashingConfig,
124    /// Merkle stage configuration.
125    pub merkle: MerkleConfig,
126    /// Transaction Lookup stage configuration.
127    pub transaction_lookup: TransactionLookupConfig,
128    /// Index Account History stage configuration.
129    pub index_account_history: IndexHistoryConfig,
130    /// Index Storage History stage configuration.
131    pub index_storage_history: IndexHistoryConfig,
132    /// Common ETL related configuration.
133    pub etl: EtlConfig,
134}
135
136impl StageConfig {
137    /// The highest threshold (in number of blocks) for switching between incremental and full
138    /// calculations across `MerkleStage`, `AccountHashingStage` and `StorageHashingStage`. This is
139    /// required to figure out if can prune or not changesets on subsequent pipeline runs during
140    /// `ExecutionStage`
141    pub fn execution_external_clean_threshold(&self) -> u64 {
142        self.merkle
143            .incremental_threshold
144            .max(self.account_hashing.clean_threshold)
145            .max(self.storage_hashing.clean_threshold)
146    }
147}
148
149/// ERA stage configuration.
150#[derive(Debug, Clone, Default, PartialEq, Eq)]
151#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
152#[cfg_attr(feature = "serde", serde(default))]
153pub struct EraConfig {
154    /// Path to a local directory where ERA1 files are located.
155    ///
156    /// Conflicts with `url`.
157    pub path: Option<PathBuf>,
158    /// The base URL of an ERA1 file host to download from.
159    ///
160    /// Conflicts with `path`.
161    pub url: Option<Url>,
162    /// Path to a directory where files downloaded from `url` will be stored until processed.
163    ///
164    /// Required for `url`.
165    pub folder: Option<PathBuf>,
166}
167
168impl EraConfig {
169    /// Sets `folder` for temporary downloads as a directory called "era" inside `dir`.
170    pub fn with_datadir(mut self, dir: impl AsRef<Path>) -> Self {
171        self.folder = Some(dir.as_ref().join("era"));
172        self
173    }
174}
175
176/// Header stage configuration.
177#[derive(Debug, Clone, Copy, PartialEq, Eq)]
178#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
179#[cfg_attr(feature = "serde", serde(default))]
180pub struct HeadersConfig {
181    /// The maximum number of requests to send concurrently.
182    ///
183    /// Default: 100
184    pub downloader_max_concurrent_requests: usize,
185    /// The minimum number of requests to send concurrently.
186    ///
187    /// Default: 5
188    pub downloader_min_concurrent_requests: usize,
189    /// Maximum amount of responses to buffer internally.
190    /// The response contains multiple headers.
191    pub downloader_max_buffered_responses: usize,
192    /// The maximum number of headers to request from a peer at a time.
193    pub downloader_request_limit: u64,
194    /// The maximum number of headers to download before committing progress to the database.
195    pub commit_threshold: u64,
196}
197
198impl Default for HeadersConfig {
199    fn default() -> Self {
200        Self {
201            commit_threshold: 10_000,
202            downloader_request_limit: 1_000,
203            downloader_max_concurrent_requests: 100,
204            downloader_min_concurrent_requests: 5,
205            downloader_max_buffered_responses: 100,
206        }
207    }
208}
209
210/// Body stage configuration.
211#[derive(Debug, Clone, Copy, PartialEq, Eq)]
212#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
213#[cfg_attr(feature = "serde", serde(default))]
214pub struct BodiesConfig {
215    /// The batch size of non-empty blocks per one request
216    ///
217    /// Default: 200
218    pub downloader_request_limit: u64,
219    /// The maximum number of block bodies returned at once from the stream
220    ///
221    /// Default: `1_000`
222    pub downloader_stream_batch_size: usize,
223    /// The size of the internal block buffer in bytes.
224    ///
225    /// Default: 2GB
226    pub downloader_max_buffered_blocks_size_bytes: usize,
227    /// The minimum number of requests to send concurrently.
228    ///
229    /// Default: 5
230    pub downloader_min_concurrent_requests: usize,
231    /// The maximum number of requests to send concurrently.
232    /// This is equal to the max number of peers.
233    ///
234    /// Default: 100
235    pub downloader_max_concurrent_requests: usize,
236}
237
238impl Default for BodiesConfig {
239    fn default() -> Self {
240        Self {
241            downloader_request_limit: 200,
242            downloader_stream_batch_size: 1_000,
243            downloader_max_buffered_blocks_size_bytes: 2 * 1024 * 1024 * 1024, // ~2GB
244            downloader_min_concurrent_requests: 5,
245            downloader_max_concurrent_requests: 100,
246        }
247    }
248}
249
250/// Sender recovery stage configuration.
251#[derive(Debug, Clone, Copy, PartialEq, Eq)]
252#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
253#[cfg_attr(feature = "serde", serde(default))]
254pub struct SenderRecoveryConfig {
255    /// The maximum number of transactions to process before committing progress to the database.
256    pub commit_threshold: u64,
257}
258
259impl Default for SenderRecoveryConfig {
260    fn default() -> Self {
261        Self { commit_threshold: 5_000_000 }
262    }
263}
264
265/// Execution stage configuration.
266#[derive(Debug, Clone, Copy, PartialEq, Eq)]
267#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
268#[cfg_attr(feature = "serde", serde(default))]
269pub struct ExecutionConfig {
270    /// The maximum number of blocks to process before the execution stage commits.
271    pub max_blocks: Option<u64>,
272    /// The maximum number of state changes to keep in memory before the execution stage commits.
273    pub max_changes: Option<u64>,
274    /// The maximum cumulative amount of gas to process before the execution stage commits.
275    pub max_cumulative_gas: Option<u64>,
276    /// The maximum time spent on blocks processing before the execution stage commits.
277    #[cfg_attr(
278        feature = "serde",
279        serde(
280            serialize_with = "humantime_serde::serialize",
281            deserialize_with = "deserialize_duration"
282        )
283    )]
284    pub max_duration: Option<Duration>,
285}
286
287impl Default for ExecutionConfig {
288    fn default() -> Self {
289        Self {
290            max_blocks: Some(500_000),
291            max_changes: Some(5_000_000),
292            // 50k full blocks of 30M gas
293            max_cumulative_gas: Some(30_000_000 * 50_000),
294            // 10 minutes
295            max_duration: Some(Duration::from_secs(10 * 60)),
296        }
297    }
298}
299
300impl From<ExecutionConfig> for ExecutionStageThresholds {
301    fn from(config: ExecutionConfig) -> Self {
302        Self {
303            max_blocks: config.max_blocks,
304            max_changes: config.max_changes,
305            max_cumulative_gas: config.max_cumulative_gas,
306            max_duration: config.max_duration,
307        }
308    }
309}
310
311/// Prune stage configuration.
312#[derive(Debug, Clone, Copy, PartialEq, Eq)]
313#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
314#[cfg_attr(feature = "serde", serde(default))]
315pub struct PruneStageConfig {
316    /// The maximum number of entries to prune before committing progress to the database.
317    pub commit_threshold: usize,
318}
319
320impl Default for PruneStageConfig {
321    fn default() -> Self {
322        Self { commit_threshold: 1_000_000 }
323    }
324}
325
326/// Hashing stage configuration.
327#[derive(Debug, Clone, Copy, PartialEq, Eq)]
328#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
329#[cfg_attr(feature = "serde", serde(default))]
330pub struct HashingConfig {
331    /// The threshold (in number of blocks) for switching between
332    /// incremental hashing and full hashing.
333    pub clean_threshold: u64,
334    /// The maximum number of entities to process before committing progress to the database.
335    pub commit_threshold: u64,
336}
337
338impl Default for HashingConfig {
339    fn default() -> Self {
340        Self { clean_threshold: 500_000, commit_threshold: 100_000 }
341    }
342}
343
344/// Merkle stage configuration.
345#[derive(Debug, Clone, Copy, PartialEq, Eq)]
346#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
347#[cfg_attr(feature = "serde", serde(default))]
348pub struct MerkleConfig {
349    /// The number of blocks we will run the incremental root method for when we are catching up on
350    /// the merkle stage for a large number of blocks.
351    ///
352    /// When we are catching up for a large number of blocks, we can only run the incremental root
353    /// for a limited number of blocks, otherwise the incremental root method may cause the node to
354    /// OOM. This number determines how many blocks in a row we will run the incremental root
355    /// method for.
356    pub incremental_threshold: u64,
357    /// The threshold (in number of blocks) for switching from incremental trie building of changes
358    /// to whole rebuild.
359    pub rebuild_threshold: u64,
360}
361
362impl Default for MerkleConfig {
363    fn default() -> Self {
364        Self { incremental_threshold: 7_000, rebuild_threshold: 100_000 }
365    }
366}
367
368/// Transaction Lookup stage configuration.
369#[derive(Debug, Clone, Copy, PartialEq, Eq)]
370#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
371#[cfg_attr(feature = "serde", serde(default))]
372pub struct TransactionLookupConfig {
373    /// The maximum number of transactions to process before writing to disk.
374    pub chunk_size: u64,
375}
376
377impl Default for TransactionLookupConfig {
378    fn default() -> Self {
379        Self { chunk_size: 5_000_000 }
380    }
381}
382
383/// Common ETL related configuration.
384#[derive(Debug, Clone, PartialEq, Eq)]
385#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
386#[cfg_attr(feature = "serde", serde(default))]
387pub struct EtlConfig {
388    /// Data directory where temporary files are created.
389    pub dir: Option<PathBuf>,
390    /// The maximum size in bytes of data held in memory before being flushed to disk as a file.
391    pub file_size: usize,
392}
393
394impl Default for EtlConfig {
395    fn default() -> Self {
396        Self { dir: None, file_size: Self::default_file_size() }
397    }
398}
399
400impl EtlConfig {
401    /// Creates an ETL configuration
402    pub const fn new(dir: Option<PathBuf>, file_size: usize) -> Self {
403        Self { dir, file_size }
404    }
405
406    /// Return default ETL directory from datadir path.
407    pub fn from_datadir(path: &Path) -> PathBuf {
408        path.join("etl-tmp")
409    }
410
411    /// Default size in bytes of data held in memory before being flushed to disk as a file.
412    pub const fn default_file_size() -> usize {
413        // 500 MB
414        500 * (1024 * 1024)
415    }
416}
417
418/// Static files configuration.
419#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
420#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
421#[cfg_attr(feature = "serde", serde(default))]
422pub struct StaticFilesConfig {
423    /// Number of blocks per file for each segment.
424    pub blocks_per_file: BlocksPerFileConfig,
425}
426
427/// Configuration for the number of blocks per file for each segment.
428#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
429#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
430#[cfg_attr(feature = "serde", serde(default))]
431pub struct BlocksPerFileConfig {
432    /// Number of blocks per file for the headers segment.
433    pub headers: Option<u64>,
434    /// Number of blocks per file for the transactions segment.
435    pub transactions: Option<u64>,
436    /// Number of blocks per file for the receipts segment.
437    pub receipts: Option<u64>,
438    /// Number of blocks per file for the transaction senders segment.
439    pub transaction_senders: Option<u64>,
440}
441
442impl StaticFilesConfig {
443    /// Validates the static files configuration.
444    ///
445    /// Returns an error if any blocks per file value is zero.
446    pub fn validate(&self) -> eyre::Result<()> {
447        let BlocksPerFileConfig { headers, transactions, receipts, transaction_senders } =
448            self.blocks_per_file;
449        eyre::ensure!(headers != Some(0), "Headers segment blocks per file must be greater than 0");
450        eyre::ensure!(
451            transactions != Some(0),
452            "Transactions segment blocks per file must be greater than 0"
453        );
454        eyre::ensure!(
455            receipts != Some(0),
456            "Receipts segment blocks per file must be greater than 0"
457        );
458        eyre::ensure!(
459            transaction_senders != Some(0),
460            "Transaction senders segment blocks per file must be greater than 0"
461        );
462        Ok(())
463    }
464
465    /// Converts the blocks per file configuration into a [`HashMap`] per segment.
466    pub fn as_blocks_per_file_map(&self) -> HashMap<StaticFileSegment, u64> {
467        let BlocksPerFileConfig { headers, transactions, receipts, transaction_senders } =
468            self.blocks_per_file;
469
470        let mut map = HashMap::new();
471        // Iterating over all possible segments allows us to do an exhaustive match here,
472        // to not forget to configure new segments in the future.
473        for segment in StaticFileSegment::iter() {
474            let blocks_per_file = match segment {
475                StaticFileSegment::Headers => headers,
476                StaticFileSegment::Transactions => transactions,
477                StaticFileSegment::Receipts => receipts,
478                StaticFileSegment::TransactionSenders => transaction_senders,
479            };
480
481            if let Some(blocks_per_file) = blocks_per_file {
482                map.insert(segment, blocks_per_file);
483            }
484        }
485        map
486    }
487}
488
489/// History stage configuration.
490#[derive(Debug, Clone, Copy, PartialEq, Eq)]
491#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
492#[cfg_attr(feature = "serde", serde(default))]
493pub struct IndexHistoryConfig {
494    /// The maximum number of blocks to process before committing progress to the database.
495    pub commit_threshold: u64,
496}
497
498impl Default for IndexHistoryConfig {
499    fn default() -> Self {
500        Self { commit_threshold: 100_000 }
501    }
502}
503
504/// Pruning configuration.
505#[derive(Debug, Clone, PartialEq, Eq)]
506#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
507#[cfg_attr(feature = "serde", serde(default))]
508pub struct PruneConfig {
509    /// Minimum pruning interval measured in blocks.
510    pub block_interval: usize,
511    /// Pruning configuration for every part of the data that can be pruned.
512    #[cfg_attr(feature = "serde", serde(alias = "parts"))]
513    pub segments: PruneModes,
514}
515
516impl Default for PruneConfig {
517    fn default() -> Self {
518        Self { block_interval: DEFAULT_BLOCK_INTERVAL, segments: PruneModes::default() }
519    }
520}
521
522impl PruneConfig {
523    /// Returns whether this configuration is the default one.
524    pub fn is_default(&self) -> bool {
525        self == &Self::default()
526    }
527
528    /// Returns whether there is any kind of receipt pruning configuration.
529    pub fn has_receipts_pruning(&self) -> bool {
530        self.segments.receipts.is_some() || !self.segments.receipts_log_filter.is_empty()
531    }
532
533    /// Merges values from `other` into `self`.
534    /// - `Option<PruneMode>` fields: set from `other` only if `self` is `None`.
535    /// - `block_interval`: set from `other` only if `self.block_interval ==
536    ///   DEFAULT_BLOCK_INTERVAL`.
537    /// - `merkle_changesets`: always set from `other`.
538    /// - `receipts_log_filter`: set from `other` only if `self` is empty and `other` is non-empty.
539    pub fn merge(&mut self, other: Self) {
540        let Self {
541            block_interval,
542            segments:
543                PruneModes {
544                    sender_recovery,
545                    transaction_lookup,
546                    receipts,
547                    account_history,
548                    storage_history,
549                    bodies_history,
550                    merkle_changesets,
551                    receipts_log_filter,
552                },
553        } = other;
554
555        // Merge block_interval, only update if it's the default interval
556        if self.block_interval == DEFAULT_BLOCK_INTERVAL {
557            self.block_interval = block_interval;
558        }
559
560        // Merge the various segment prune modes
561        self.segments.sender_recovery = self.segments.sender_recovery.or(sender_recovery);
562        self.segments.transaction_lookup = self.segments.transaction_lookup.or(transaction_lookup);
563        self.segments.receipts = self.segments.receipts.or(receipts);
564        self.segments.account_history = self.segments.account_history.or(account_history);
565        self.segments.storage_history = self.segments.storage_history.or(storage_history);
566        self.segments.bodies_history = self.segments.bodies_history.or(bodies_history);
567        // Merkle changesets is not optional; always take the value from `other`
568        self.segments.merkle_changesets = merkle_changesets;
569
570        if self.segments.receipts_log_filter.0.is_empty() && !receipts_log_filter.0.is_empty() {
571            self.segments.receipts_log_filter = receipts_log_filter;
572        }
573    }
574}
575
576/// Helper type to support older versions of Duration deserialization.
577#[cfg(feature = "serde")]
578fn deserialize_duration<'de, D>(deserializer: D) -> Result<Option<Duration>, D::Error>
579where
580    D: serde::de::Deserializer<'de>,
581{
582    #[derive(serde::Deserialize)]
583    #[serde(untagged)]
584    enum AnyDuration {
585        #[serde(deserialize_with = "humantime_serde::deserialize")]
586        Human(Option<Duration>),
587        Duration(Option<Duration>),
588    }
589
590    <AnyDuration as serde::Deserialize>::deserialize(deserializer).map(|d| match d {
591        AnyDuration::Human(duration) | AnyDuration::Duration(duration) => duration,
592    })
593}
594
595#[cfg(all(test, feature = "serde"))]
596mod tests {
597    use super::{Config, EXTENSION};
598    use crate::PruneConfig;
599    use alloy_primitives::Address;
600    use reth_network_peers::TrustedPeer;
601    use reth_prune_types::{PruneMode, PruneModes, ReceiptsLogPruneConfig};
602    use std::{collections::BTreeMap, path::Path, str::FromStr, time::Duration};
603
604    fn with_tempdir(filename: &str, proc: fn(&std::path::Path)) {
605        let temp_dir = tempfile::tempdir().unwrap();
606        let config_path = temp_dir.path().join(filename).with_extension(EXTENSION);
607
608        proc(&config_path);
609
610        temp_dir.close().unwrap()
611    }
612
613    /// Run a test function with a temporary config path as fixture.
614    fn with_config_path(test_fn: fn(&Path)) {
615        // Create a temporary directory for the config file
616        let config_dir = tempfile::tempdir().expect("creating test fixture failed");
617        // Create the config file path
618        let config_path =
619            config_dir.path().join("example-app").join("example-config").with_extension("toml");
620        // Run the test function with the config path
621        test_fn(&config_path);
622        config_dir.close().expect("removing test fixture failed");
623    }
624
625    #[test]
626    fn test_load_path_works() {
627        with_config_path(|path| {
628            let config = Config::from_path(path).expect("load_path failed");
629            assert_eq!(config, Config::default());
630        })
631    }
632
633    #[test]
634    fn test_load_path_reads_existing_config() {
635        with_config_path(|path| {
636            let config = Config::default();
637
638            // Create the parent directory if it doesn't exist
639            if let Some(parent) = path.parent() {
640                std::fs::create_dir_all(parent).expect("Failed to create directories");
641            }
642
643            // Write the config to the file
644            std::fs::write(path, toml::to_string(&config).unwrap())
645                .expect("Failed to write config");
646
647            // Load the config from the file and compare it
648            let loaded = Config::from_path(path).expect("load_path failed");
649            assert_eq!(config, loaded);
650        })
651    }
652
653    #[test]
654    fn test_load_path_fails_on_invalid_toml() {
655        with_config_path(|path| {
656            let invalid_toml = "invalid toml data";
657
658            // Create the parent directory if it doesn't exist
659            if let Some(parent) = path.parent() {
660                std::fs::create_dir_all(parent).expect("Failed to create directories");
661            }
662
663            // Write invalid TOML data to the file
664            std::fs::write(path, invalid_toml).expect("Failed to write invalid TOML");
665
666            // Attempt to load the config should fail
667            let result = Config::from_path(path);
668            assert!(result.is_err());
669        })
670    }
671
672    #[test]
673    fn test_load_path_creates_directory_if_not_exists() {
674        with_config_path(|path| {
675            // Ensure the directory does not exist
676            let parent = path.parent().unwrap();
677            assert!(!parent.exists());
678
679            // Load the configuration, which should create the directory and a default config file
680            let config = Config::from_path(path).expect("load_path failed");
681            assert_eq!(config, Config::default());
682
683            // The directory and file should now exist
684            assert!(parent.exists());
685            assert!(path.exists());
686        });
687    }
688
689    #[test]
690    fn test_store_config() {
691        with_tempdir("config-store-test", |config_path| {
692            let config = Config::default();
693            std::fs::write(
694                config_path,
695                toml::to_string(&config).expect("Failed to serialize config"),
696            )
697            .expect("Failed to write config file");
698        })
699    }
700
701    #[test]
702    fn test_store_config_method() {
703        with_tempdir("config-store-test-method", |config_path| {
704            let config = Config::default();
705            config.save(config_path).expect("Failed to store config");
706        })
707    }
708
709    #[test]
710    fn test_load_config() {
711        with_tempdir("config-load-test", |config_path| {
712            let config = Config::default();
713
714            // Write the config to a file
715            std::fs::write(
716                config_path,
717                toml::to_string(&config).expect("Failed to serialize config"),
718            )
719            .expect("Failed to write config file");
720
721            // Load the config from the file
722            let loaded_config = Config::from_path(config_path).unwrap();
723
724            // Compare the loaded config with the original config
725            assert_eq!(config, loaded_config);
726        })
727    }
728
729    #[test]
730    fn test_load_execution_stage() {
731        with_tempdir("config-load-test", |config_path| {
732            let mut config = Config::default();
733            config.stages.execution.max_duration = Some(Duration::from_secs(10 * 60));
734
735            // Write the config to a file
736            std::fs::write(
737                config_path,
738                toml::to_string(&config).expect("Failed to serialize config"),
739            )
740            .expect("Failed to write config file");
741
742            // Load the config from the file
743            let loaded_config = Config::from_path(config_path).unwrap();
744
745            // Compare the loaded config with the original config
746            assert_eq!(config, loaded_config);
747        })
748    }
749
750    // ensures config deserialization is backwards compatible
751    #[test]
752    fn test_backwards_compatibility() {
753        let alpha_0_0_8 = r"#
754[stages.headers]
755downloader_max_concurrent_requests = 100
756downloader_min_concurrent_requests = 5
757downloader_max_buffered_responses = 100
758downloader_request_limit = 1000
759commit_threshold = 10000
760
761[stages.bodies]
762downloader_request_limit = 200
763downloader_stream_batch_size = 1000
764downloader_max_buffered_blocks_size_bytes = 2147483648
765downloader_min_concurrent_requests = 5
766downloader_max_concurrent_requests = 100
767
768[stages.sender_recovery]
769commit_threshold = 5000000
770
771[stages.execution]
772max_blocks = 500000
773max_changes = 5000000
774
775[stages.account_hashing]
776clean_threshold = 500000
777commit_threshold = 100000
778
779[stages.storage_hashing]
780clean_threshold = 500000
781commit_threshold = 100000
782
783[stages.merkle]
784clean_threshold = 50000
785
786[stages.transaction_lookup]
787chunk_size = 5000000
788
789[stages.index_account_history]
790commit_threshold = 100000
791
792[stages.index_storage_history]
793commit_threshold = 100000
794
795[peers]
796refill_slots_interval = '1s'
797trusted_nodes = []
798connect_trusted_nodes_only = false
799max_backoff_count = 5
800ban_duration = '12h'
801
802[peers.connection_info]
803max_outbound = 100
804max_inbound = 30
805
806[peers.reputation_weights]
807bad_message = -16384
808bad_block = -16384
809bad_transactions = -16384
810already_seen_transactions = 0
811timeout = -4096
812bad_protocol = -2147483648
813failed_to_connect = -25600
814dropped = -4096
815
816[peers.backoff_durations]
817low = '30s'
818medium = '3m'
819high = '15m'
820max = '1h'
821
822[sessions]
823session_command_buffer = 32
824session_event_buffer = 260
825
826[sessions.limits]
827
828[sessions.initial_internal_request_timeout]
829secs = 20
830nanos = 0
831
832[sessions.protocol_breach_request_timeout]
833secs = 120
834nanos = 0
835
836[prune]
837block_interval = 5
838
839[prune.parts]
840sender_recovery = { distance = 16384 }
841transaction_lookup = 'full'
842receipts = { before = 1920000 }
843account_history = { distance = 16384 }
844storage_history = { distance = 16384 }
845[prune.parts.receipts_log_filter]
846'0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48' = { before = 17000000 }
847'0xdac17f958d2ee523a2206206994597c13d831ec7' = { distance = 1000 }
848#";
849        let _conf: Config = toml::from_str(alpha_0_0_8).unwrap();
850
851        let alpha_0_0_11 = r"#
852[prune.segments]
853sender_recovery = { distance = 16384 }
854transaction_lookup = 'full'
855receipts = { before = 1920000 }
856account_history = { distance = 16384 }
857storage_history = { distance = 16384 }
858[prune.segments.receipts_log_filter]
859'0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48' = { before = 17000000 }
860'0xdac17f958d2ee523a2206206994597c13d831ec7' = { distance = 1000 }
861#";
862        let _conf: Config = toml::from_str(alpha_0_0_11).unwrap();
863
864        let alpha_0_0_18 = r"#
865[stages.headers]
866downloader_max_concurrent_requests = 100
867downloader_min_concurrent_requests = 5
868downloader_max_buffered_responses = 100
869downloader_request_limit = 1000
870commit_threshold = 10000
871
872[stages.total_difficulty]
873commit_threshold = 100000
874
875[stages.bodies]
876downloader_request_limit = 200
877downloader_stream_batch_size = 1000
878downloader_max_buffered_blocks_size_bytes = 2147483648
879downloader_min_concurrent_requests = 5
880downloader_max_concurrent_requests = 100
881
882[stages.sender_recovery]
883commit_threshold = 5000000
884
885[stages.execution]
886max_blocks = 500000
887max_changes = 5000000
888max_cumulative_gas = 1500000000000
889[stages.execution.max_duration]
890secs = 600
891nanos = 0
892
893[stages.account_hashing]
894clean_threshold = 500000
895commit_threshold = 100000
896
897[stages.storage_hashing]
898clean_threshold = 500000
899commit_threshold = 100000
900
901[stages.merkle]
902clean_threshold = 50000
903
904[stages.transaction_lookup]
905commit_threshold = 5000000
906
907[stages.index_account_history]
908commit_threshold = 100000
909
910[stages.index_storage_history]
911commit_threshold = 100000
912
913[peers]
914refill_slots_interval = '5s'
915trusted_nodes = []
916connect_trusted_nodes_only = false
917max_backoff_count = 5
918ban_duration = '12h'
919
920[peers.connection_info]
921max_outbound = 100
922max_inbound = 30
923max_concurrent_outbound_dials = 10
924
925[peers.reputation_weights]
926bad_message = -16384
927bad_block = -16384
928bad_transactions = -16384
929already_seen_transactions = 0
930timeout = -4096
931bad_protocol = -2147483648
932failed_to_connect = -25600
933dropped = -4096
934bad_announcement = -1024
935
936[peers.backoff_durations]
937low = '30s'
938medium = '3m'
939high = '15m'
940max = '1h'
941
942[sessions]
943session_command_buffer = 32
944session_event_buffer = 260
945
946[sessions.limits]
947
948[sessions.initial_internal_request_timeout]
949secs = 20
950nanos = 0
951
952[sessions.protocol_breach_request_timeout]
953secs = 120
954nanos = 0
955#";
956        let conf: Config = toml::from_str(alpha_0_0_18).unwrap();
957        assert_eq!(conf.stages.execution.max_duration, Some(Duration::from_secs(10 * 60)));
958
959        let alpha_0_0_19 = r"#
960[stages.headers]
961downloader_max_concurrent_requests = 100
962downloader_min_concurrent_requests = 5
963downloader_max_buffered_responses = 100
964downloader_request_limit = 1000
965commit_threshold = 10000
966
967[stages.total_difficulty]
968commit_threshold = 100000
969
970[stages.bodies]
971downloader_request_limit = 200
972downloader_stream_batch_size = 1000
973downloader_max_buffered_blocks_size_bytes = 2147483648
974downloader_min_concurrent_requests = 5
975downloader_max_concurrent_requests = 100
976
977[stages.sender_recovery]
978commit_threshold = 5000000
979
980[stages.execution]
981max_blocks = 500000
982max_changes = 5000000
983max_cumulative_gas = 1500000000000
984max_duration = '10m'
985
986[stages.account_hashing]
987clean_threshold = 500000
988commit_threshold = 100000
989
990[stages.storage_hashing]
991clean_threshold = 500000
992commit_threshold = 100000
993
994[stages.merkle]
995clean_threshold = 50000
996
997[stages.transaction_lookup]
998commit_threshold = 5000000
999
1000[stages.index_account_history]
1001commit_threshold = 100000
1002
1003[stages.index_storage_history]
1004commit_threshold = 100000
1005
1006[peers]
1007refill_slots_interval = '5s'
1008trusted_nodes = []
1009connect_trusted_nodes_only = false
1010max_backoff_count = 5
1011ban_duration = '12h'
1012
1013[peers.connection_info]
1014max_outbound = 100
1015max_inbound = 30
1016max_concurrent_outbound_dials = 10
1017
1018[peers.reputation_weights]
1019bad_message = -16384
1020bad_block = -16384
1021bad_transactions = -16384
1022already_seen_transactions = 0
1023timeout = -4096
1024bad_protocol = -2147483648
1025failed_to_connect = -25600
1026dropped = -4096
1027bad_announcement = -1024
1028
1029[peers.backoff_durations]
1030low = '30s'
1031medium = '3m'
1032high = '15m'
1033max = '1h'
1034
1035[sessions]
1036session_command_buffer = 32
1037session_event_buffer = 260
1038
1039[sessions.limits]
1040
1041[sessions.initial_internal_request_timeout]
1042secs = 20
1043nanos = 0
1044
1045[sessions.protocol_breach_request_timeout]
1046secs = 120
1047nanos = 0
1048#";
1049        let _conf: Config = toml::from_str(alpha_0_0_19).unwrap();
1050    }
1051
1052    // ensures prune config deserialization is backwards compatible
1053    #[test]
1054    fn test_backwards_compatibility_prune_full() {
1055        let s = r"#
1056[prune]
1057block_interval = 5
1058
1059[prune.segments]
1060sender_recovery = { distance = 16384 }
1061transaction_lookup = 'full'
1062receipts = { distance = 16384 }
1063#";
1064        let _conf: Config = toml::from_str(s).unwrap();
1065
1066        let s = r"#
1067[prune]
1068block_interval = 5
1069
1070[prune.segments]
1071sender_recovery = { distance = 16384 }
1072transaction_lookup = 'full'
1073receipts = 'full'
1074#";
1075        let err = toml::from_str::<Config>(s).unwrap_err().to_string();
1076        assert!(err.contains("invalid value: string \"full\""), "{}", err);
1077    }
1078
1079    #[test]
1080    fn test_prune_config_merge() {
1081        let mut config1 = PruneConfig {
1082            block_interval: 5,
1083            segments: PruneModes {
1084                sender_recovery: Some(PruneMode::Full),
1085                transaction_lookup: None,
1086                receipts: Some(PruneMode::Distance(1000)),
1087                account_history: None,
1088                storage_history: Some(PruneMode::Before(5000)),
1089                bodies_history: None,
1090                merkle_changesets: PruneMode::Before(0),
1091                receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([(
1092                    Address::random(),
1093                    PruneMode::Full,
1094                )])),
1095            },
1096        };
1097
1098        let config2 = PruneConfig {
1099            block_interval: 10,
1100            segments: PruneModes {
1101                sender_recovery: Some(PruneMode::Distance(500)),
1102                transaction_lookup: Some(PruneMode::Full),
1103                receipts: Some(PruneMode::Full),
1104                account_history: Some(PruneMode::Distance(2000)),
1105                storage_history: Some(PruneMode::Distance(3000)),
1106                bodies_history: None,
1107                merkle_changesets: PruneMode::Distance(10000),
1108                receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([
1109                    (Address::random(), PruneMode::Distance(1000)),
1110                    (Address::random(), PruneMode::Before(2000)),
1111                ])),
1112            },
1113        };
1114
1115        let original_filter = config1.segments.receipts_log_filter.clone();
1116        config1.merge(config2);
1117
1118        // Check that the configuration has been merged. Any configuration present in config1
1119        // should not be overwritten by config2
1120        assert_eq!(config1.block_interval, 10);
1121        assert_eq!(config1.segments.sender_recovery, Some(PruneMode::Full));
1122        assert_eq!(config1.segments.transaction_lookup, Some(PruneMode::Full));
1123        assert_eq!(config1.segments.receipts, Some(PruneMode::Distance(1000)));
1124        assert_eq!(config1.segments.account_history, Some(PruneMode::Distance(2000)));
1125        assert_eq!(config1.segments.storage_history, Some(PruneMode::Before(5000)));
1126        assert_eq!(config1.segments.merkle_changesets, PruneMode::Distance(10000));
1127        assert_eq!(config1.segments.receipts_log_filter, original_filter);
1128    }
1129
1130    #[test]
1131    fn test_conf_trust_nodes_only() {
1132        let trusted_nodes_only = r"#
1133[peers]
1134trusted_nodes_only = true
1135#";
1136        let conf: Config = toml::from_str(trusted_nodes_only).unwrap();
1137        assert!(conf.peers.trusted_nodes_only);
1138
1139        let trusted_nodes_only = r"#
1140[peers]
1141connect_trusted_nodes_only = true
1142#";
1143        let conf: Config = toml::from_str(trusted_nodes_only).unwrap();
1144        assert!(conf.peers.trusted_nodes_only);
1145    }
1146
1147    #[test]
1148    fn test_can_support_dns_in_trusted_nodes() {
1149        let reth_toml = r#"
1150    [peers]
1151    trusted_nodes = [
1152        "enode://0401e494dbd0c84c5c0f72adac5985d2f2525e08b68d448958aae218f5ac8198a80d1498e0ebec2ce38b1b18d6750f6e61a56b4614c5a6c6cf0981c39aed47dc@34.159.32.127:30303",
1153        "enode://e9675164b5e17b9d9edf0cc2bd79e6b6f487200c74d1331c220abb5b8ee80c2eefbf18213989585e9d0960683e819542e11d4eefb5f2b4019e1e49f9fd8fff18@berav2-bootnode.staketab.org:30303"
1154    ]
1155    "#;
1156
1157        let conf: Config = toml::from_str(reth_toml).unwrap();
1158        assert_eq!(conf.peers.trusted_nodes.len(), 2);
1159
1160        let expected_enodes = vec![
1161            "enode://0401e494dbd0c84c5c0f72adac5985d2f2525e08b68d448958aae218f5ac8198a80d1498e0ebec2ce38b1b18d6750f6e61a56b4614c5a6c6cf0981c39aed47dc@34.159.32.127:30303",
1162            "enode://e9675164b5e17b9d9edf0cc2bd79e6b6f487200c74d1331c220abb5b8ee80c2eefbf18213989585e9d0960683e819542e11d4eefb5f2b4019e1e49f9fd8fff18@berav2-bootnode.staketab.org:30303",
1163        ];
1164
1165        for enode in expected_enodes {
1166            let node = TrustedPeer::from_str(enode).unwrap();
1167            assert!(conf.peers.trusted_nodes.contains(&node));
1168        }
1169    }
1170}