reth_config/
config.rs

1//! Configuration files.
2use reth_network_types::{PeersConfig, SessionsConfig};
3use reth_prune_types::PruneModes;
4use reth_stages_types::ExecutionStageThresholds;
5use reth_static_file_types::StaticFileSegment;
6use std::{
7    collections::HashMap,
8    path::{Path, PathBuf},
9    time::Duration,
10};
11use url::Url;
12
13#[cfg(feature = "serde")]
14const EXTENSION: &str = "toml";
15
16/// The default prune block interval
17pub const DEFAULT_BLOCK_INTERVAL: usize = 5;
18
19/// Configuration for the reth node.
20#[derive(Debug, Clone, Default, PartialEq, Eq)]
21#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
22#[cfg_attr(feature = "serde", serde(default))]
23pub struct Config {
24    /// Configuration for each stage in the pipeline.
25    // TODO(onbjerg): Can we make this easier to maintain when we add/remove stages?
26    pub stages: StageConfig,
27    /// Configuration for pruning.
28    #[cfg_attr(feature = "serde", serde(default))]
29    pub prune: PruneConfig,
30    /// Configuration for the discovery service.
31    pub peers: PeersConfig,
32    /// Configuration for peer sessions.
33    pub sessions: SessionsConfig,
34    /// Configuration for static files.
35    #[cfg_attr(feature = "serde", serde(default))]
36    pub static_files: StaticFilesConfig,
37}
38
39impl Config {
40    /// Sets the pruning configuration.
41    pub fn set_prune_config(&mut self, prune_config: PruneConfig) {
42        self.prune = prune_config;
43    }
44}
45
46#[cfg(feature = "serde")]
47impl Config {
48    /// Load a [`Config`] from a specified path.
49    ///
50    /// A new configuration file is created with default values if none
51    /// exists.
52    pub fn from_path(path: impl AsRef<Path>) -> eyre::Result<Self> {
53        let path = path.as_ref();
54        match std::fs::read_to_string(path) {
55            Ok(cfg_string) => {
56                toml::from_str(&cfg_string).map_err(|e| eyre::eyre!("Failed to parse TOML: {e}"))
57            }
58            Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
59                if let Some(parent) = path.parent() {
60                    std::fs::create_dir_all(parent)
61                        .map_err(|e| eyre::eyre!("Failed to create directory: {e}"))?;
62                }
63                let cfg = Self::default();
64                let s = toml::to_string_pretty(&cfg)
65                    .map_err(|e| eyre::eyre!("Failed to serialize to TOML: {e}"))?;
66                std::fs::write(path, s)
67                    .map_err(|e| eyre::eyre!("Failed to write configuration file: {e}"))?;
68                Ok(cfg)
69            }
70            Err(e) => Err(eyre::eyre!("Failed to load configuration: {e}")),
71        }
72    }
73
74    /// Returns the [`PeersConfig`] for the node.
75    ///
76    /// If a peers file is provided, the basic nodes from the file are added to the configuration.
77    pub fn peers_config_with_basic_nodes_from_file(
78        &self,
79        peers_file: Option<&Path>,
80    ) -> PeersConfig {
81        self.peers
82            .clone()
83            .with_basic_nodes_from_file(peers_file)
84            .unwrap_or_else(|_| self.peers.clone())
85    }
86
87    /// Save the configuration to toml file.
88    pub fn save(&self, path: &Path) -> Result<(), std::io::Error> {
89        if path.extension() != Some(std::ffi::OsStr::new(EXTENSION)) {
90            return Err(std::io::Error::new(
91                std::io::ErrorKind::InvalidInput,
92                format!("reth config file extension must be '{EXTENSION}'"),
93            ));
94        }
95
96        std::fs::write(
97            path,
98            toml::to_string(self)
99                .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e.to_string()))?,
100        )
101    }
102}
103
104/// Configuration for each stage in the pipeline.
105#[derive(Debug, Clone, Default, PartialEq, Eq)]
106#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
107#[cfg_attr(feature = "serde", serde(default))]
108pub struct StageConfig {
109    /// ERA stage configuration.
110    pub era: EraConfig,
111    /// Header stage configuration.
112    pub headers: HeadersConfig,
113    /// Body stage configuration.
114    pub bodies: BodiesConfig,
115    /// Sender Recovery stage configuration.
116    pub sender_recovery: SenderRecoveryConfig,
117    /// Execution stage configuration.
118    pub execution: ExecutionConfig,
119    /// Prune stage configuration.
120    pub prune: PruneStageConfig,
121    /// Account Hashing stage configuration.
122    pub account_hashing: HashingConfig,
123    /// Storage Hashing stage configuration.
124    pub storage_hashing: HashingConfig,
125    /// Merkle stage configuration.
126    pub merkle: MerkleConfig,
127    /// Transaction Lookup stage configuration.
128    pub transaction_lookup: TransactionLookupConfig,
129    /// Index Account History stage configuration.
130    pub index_account_history: IndexHistoryConfig,
131    /// Index Storage History stage configuration.
132    pub index_storage_history: IndexHistoryConfig,
133    /// Common ETL related configuration.
134    pub etl: EtlConfig,
135}
136
137impl StageConfig {
138    /// The highest threshold (in number of blocks) for switching between incremental and full
139    /// calculations across `MerkleStage`, `AccountHashingStage` and `StorageHashingStage`. This is
140    /// required to figure out if can prune or not changesets on subsequent pipeline runs during
141    /// `ExecutionStage`
142    pub fn execution_external_clean_threshold(&self) -> u64 {
143        self.merkle
144            .incremental_threshold
145            .max(self.account_hashing.clean_threshold)
146            .max(self.storage_hashing.clean_threshold)
147    }
148}
149
150/// ERA stage configuration.
151#[derive(Debug, Clone, Default, PartialEq, Eq)]
152#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
153#[cfg_attr(feature = "serde", serde(default))]
154pub struct EraConfig {
155    /// Path to a local directory where ERA1 files are located.
156    ///
157    /// Conflicts with `url`.
158    pub path: Option<PathBuf>,
159    /// The base URL of an ERA1 file host to download from.
160    ///
161    /// Conflicts with `path`.
162    pub url: Option<Url>,
163    /// Path to a directory where files downloaded from `url` will be stored until processed.
164    ///
165    /// Required for `url`.
166    pub folder: Option<PathBuf>,
167}
168
169impl EraConfig {
170    /// Sets `folder` for temporary downloads as a directory called "era" inside `dir`.
171    pub fn with_datadir(mut self, dir: impl AsRef<Path>) -> Self {
172        self.folder = Some(dir.as_ref().join("era"));
173        self
174    }
175}
176
177/// Header stage configuration.
178#[derive(Debug, Clone, Copy, PartialEq, Eq)]
179#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
180#[cfg_attr(feature = "serde", serde(default))]
181pub struct HeadersConfig {
182    /// The maximum number of requests to send concurrently.
183    ///
184    /// Default: 100
185    pub downloader_max_concurrent_requests: usize,
186    /// The minimum number of requests to send concurrently.
187    ///
188    /// Default: 5
189    pub downloader_min_concurrent_requests: usize,
190    /// Maximum amount of responses to buffer internally.
191    /// The response contains multiple headers.
192    pub downloader_max_buffered_responses: usize,
193    /// The maximum number of headers to request from a peer at a time.
194    pub downloader_request_limit: u64,
195    /// The maximum number of headers to download before committing progress to the database.
196    pub commit_threshold: u64,
197}
198
199impl Default for HeadersConfig {
200    fn default() -> Self {
201        Self {
202            commit_threshold: 10_000,
203            downloader_request_limit: 1_000,
204            downloader_max_concurrent_requests: 100,
205            downloader_min_concurrent_requests: 5,
206            downloader_max_buffered_responses: 100,
207        }
208    }
209}
210
211/// Body stage configuration.
212#[derive(Debug, Clone, Copy, PartialEq, Eq)]
213#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
214#[cfg_attr(feature = "serde", serde(default))]
215pub struct BodiesConfig {
216    /// The batch size of non-empty blocks per one request
217    ///
218    /// Default: 200
219    pub downloader_request_limit: u64,
220    /// The maximum number of block bodies returned at once from the stream
221    ///
222    /// Default: `1_000`
223    pub downloader_stream_batch_size: usize,
224    /// The size of the internal block buffer in bytes.
225    ///
226    /// Default: 2GB
227    pub downloader_max_buffered_blocks_size_bytes: usize,
228    /// The minimum number of requests to send concurrently.
229    ///
230    /// Default: 5
231    pub downloader_min_concurrent_requests: usize,
232    /// The maximum number of requests to send concurrently.
233    /// This is equal to the max number of peers.
234    ///
235    /// Default: 100
236    pub downloader_max_concurrent_requests: usize,
237}
238
239impl Default for BodiesConfig {
240    fn default() -> Self {
241        Self {
242            downloader_request_limit: 200,
243            downloader_stream_batch_size: 1_000,
244            downloader_max_buffered_blocks_size_bytes: 2 * 1024 * 1024 * 1024, // ~2GB
245            downloader_min_concurrent_requests: 5,
246            downloader_max_concurrent_requests: 100,
247        }
248    }
249}
250
251/// Sender recovery stage configuration.
252#[derive(Debug, Clone, Copy, PartialEq, Eq)]
253#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
254#[cfg_attr(feature = "serde", serde(default))]
255pub struct SenderRecoveryConfig {
256    /// The maximum number of transactions to process before committing progress to the database.
257    pub commit_threshold: u64,
258}
259
260impl Default for SenderRecoveryConfig {
261    fn default() -> Self {
262        Self { commit_threshold: 5_000_000 }
263    }
264}
265
266/// Execution stage configuration.
267#[derive(Debug, Clone, Copy, PartialEq, Eq)]
268#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
269#[cfg_attr(feature = "serde", serde(default))]
270pub struct ExecutionConfig {
271    /// The maximum number of blocks to process before the execution stage commits.
272    pub max_blocks: Option<u64>,
273    /// The maximum number of state changes to keep in memory before the execution stage commits.
274    pub max_changes: Option<u64>,
275    /// The maximum cumulative amount of gas to process before the execution stage commits.
276    pub max_cumulative_gas: Option<u64>,
277    /// The maximum time spent on blocks processing before the execution stage commits.
278    #[cfg_attr(
279        feature = "serde",
280        serde(
281            serialize_with = "humantime_serde::serialize",
282            deserialize_with = "deserialize_duration"
283        )
284    )]
285    pub max_duration: Option<Duration>,
286}
287
288impl Default for ExecutionConfig {
289    fn default() -> Self {
290        Self {
291            max_blocks: Some(500_000),
292            max_changes: Some(5_000_000),
293            // 50k full blocks of 30M gas
294            max_cumulative_gas: Some(30_000_000 * 50_000),
295            // 10 minutes
296            max_duration: Some(Duration::from_secs(10 * 60)),
297        }
298    }
299}
300
301impl From<ExecutionConfig> for ExecutionStageThresholds {
302    fn from(config: ExecutionConfig) -> Self {
303        Self {
304            max_blocks: config.max_blocks,
305            max_changes: config.max_changes,
306            max_cumulative_gas: config.max_cumulative_gas,
307            max_duration: config.max_duration,
308        }
309    }
310}
311
312/// Prune stage configuration.
313#[derive(Debug, Clone, Copy, PartialEq, Eq)]
314#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
315#[cfg_attr(feature = "serde", serde(default))]
316pub struct PruneStageConfig {
317    /// The maximum number of entries to prune before committing progress to the database.
318    pub commit_threshold: usize,
319}
320
321impl Default for PruneStageConfig {
322    fn default() -> Self {
323        Self { commit_threshold: 1_000_000 }
324    }
325}
326
327/// Hashing stage configuration.
328#[derive(Debug, Clone, Copy, PartialEq, Eq)]
329#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
330#[cfg_attr(feature = "serde", serde(default))]
331pub struct HashingConfig {
332    /// The threshold (in number of blocks) for switching between
333    /// incremental hashing and full hashing.
334    pub clean_threshold: u64,
335    /// The maximum number of entities to process before committing progress to the database.
336    pub commit_threshold: u64,
337}
338
339impl Default for HashingConfig {
340    fn default() -> Self {
341        Self { clean_threshold: 500_000, commit_threshold: 100_000 }
342    }
343}
344
345/// Merkle stage configuration.
346#[derive(Debug, Clone, Copy, PartialEq, Eq)]
347#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
348#[cfg_attr(feature = "serde", serde(default))]
349pub struct MerkleConfig {
350    /// The number of blocks we will run the incremental root method for when we are catching up on
351    /// the merkle stage for a large number of blocks.
352    ///
353    /// When we are catching up for a large number of blocks, we can only run the incremental root
354    /// for a limited number of blocks, otherwise the incremental root method may cause the node to
355    /// OOM. This number determines how many blocks in a row we will run the incremental root
356    /// method for.
357    pub incremental_threshold: u64,
358    /// The threshold (in number of blocks) for switching from incremental trie building of changes
359    /// to whole rebuild.
360    pub rebuild_threshold: u64,
361}
362
363impl Default for MerkleConfig {
364    fn default() -> Self {
365        Self { incremental_threshold: 7_000, rebuild_threshold: 100_000 }
366    }
367}
368
369/// Transaction Lookup stage configuration.
370#[derive(Debug, Clone, Copy, PartialEq, Eq)]
371#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
372#[cfg_attr(feature = "serde", serde(default))]
373pub struct TransactionLookupConfig {
374    /// The maximum number of transactions to process before writing to disk.
375    pub chunk_size: u64,
376}
377
378impl Default for TransactionLookupConfig {
379    fn default() -> Self {
380        Self { chunk_size: 5_000_000 }
381    }
382}
383
384/// Common ETL related configuration.
385#[derive(Debug, Clone, PartialEq, Eq)]
386#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
387#[cfg_attr(feature = "serde", serde(default))]
388pub struct EtlConfig {
389    /// Data directory where temporary files are created.
390    pub dir: Option<PathBuf>,
391    /// The maximum size in bytes of data held in memory before being flushed to disk as a file.
392    pub file_size: usize,
393}
394
395impl Default for EtlConfig {
396    fn default() -> Self {
397        Self { dir: None, file_size: Self::default_file_size() }
398    }
399}
400
401impl EtlConfig {
402    /// Creates an ETL configuration
403    pub const fn new(dir: Option<PathBuf>, file_size: usize) -> Self {
404        Self { dir, file_size }
405    }
406
407    /// Return default ETL directory from datadir path.
408    pub fn from_datadir(path: &Path) -> PathBuf {
409        path.join("etl-tmp")
410    }
411
412    /// Default size in bytes of data held in memory before being flushed to disk as a file.
413    pub const fn default_file_size() -> usize {
414        // 500 MB
415        500 * (1024 * 1024)
416    }
417}
418
419/// Static files configuration.
420#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
421#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
422#[cfg_attr(feature = "serde", serde(default))]
423pub struct StaticFilesConfig {
424    /// Number of blocks per file for each segment.
425    pub blocks_per_file: BlocksPerFileConfig,
426}
427
428/// Configuration for the number of blocks per file for each segment.
429#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
430#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
431#[cfg_attr(feature = "serde", serde(default))]
432pub struct BlocksPerFileConfig {
433    /// Number of blocks per file for the headers segment.
434    pub headers: Option<u64>,
435    /// Number of blocks per file for the transactions segment.
436    pub transactions: Option<u64>,
437    /// Number of blocks per file for the receipts segment.
438    pub receipts: Option<u64>,
439    /// Number of blocks per file for the transaction senders segment.
440    pub transaction_senders: Option<u64>,
441}
442
443impl StaticFilesConfig {
444    /// Validates the static files configuration.
445    ///
446    /// Returns an error if any blocks per file value is zero.
447    pub fn validate(&self) -> eyre::Result<()> {
448        let BlocksPerFileConfig { headers, transactions, receipts, transaction_senders } =
449            self.blocks_per_file;
450        eyre::ensure!(headers != Some(0), "Headers segment blocks per file must be greater than 0");
451        eyre::ensure!(
452            transactions != Some(0),
453            "Transactions segment blocks per file must be greater than 0"
454        );
455        eyre::ensure!(
456            receipts != Some(0),
457            "Receipts segment blocks per file must be greater than 0"
458        );
459        eyre::ensure!(
460            transaction_senders != Some(0),
461            "Transaction senders segment blocks per file must be greater than 0"
462        );
463        Ok(())
464    }
465
466    /// Converts the blocks per file configuration into a [`HashMap`] per segment.
467    pub fn as_blocks_per_file_map(&self) -> HashMap<StaticFileSegment, u64> {
468        let BlocksPerFileConfig { headers, transactions, receipts, transaction_senders } =
469            self.blocks_per_file;
470
471        let mut map = HashMap::new();
472        // Iterating over all possible segments allows us to do an exhaustive match here,
473        // to not forget to configure new segments in the future.
474        for segment in StaticFileSegment::iter() {
475            let blocks_per_file = match segment {
476                StaticFileSegment::Headers => headers,
477                StaticFileSegment::Transactions => transactions,
478                StaticFileSegment::Receipts => receipts,
479                StaticFileSegment::TransactionSenders => transaction_senders,
480            };
481
482            if let Some(blocks_per_file) = blocks_per_file {
483                map.insert(segment, blocks_per_file);
484            }
485        }
486        map
487    }
488}
489
490/// History stage configuration.
491#[derive(Debug, Clone, Copy, PartialEq, Eq)]
492#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
493#[cfg_attr(feature = "serde", serde(default))]
494pub struct IndexHistoryConfig {
495    /// The maximum number of blocks to process before committing progress to the database.
496    pub commit_threshold: u64,
497}
498
499impl Default for IndexHistoryConfig {
500    fn default() -> Self {
501        Self { commit_threshold: 100_000 }
502    }
503}
504
505/// Pruning configuration.
506#[derive(Debug, Clone, PartialEq, Eq)]
507#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
508#[cfg_attr(feature = "serde", serde(default))]
509pub struct PruneConfig {
510    /// Minimum pruning interval measured in blocks.
511    pub block_interval: usize,
512    /// Pruning configuration for every part of the data that can be pruned.
513    #[cfg_attr(feature = "serde", serde(alias = "parts"))]
514    pub segments: PruneModes,
515}
516
517impl Default for PruneConfig {
518    fn default() -> Self {
519        Self { block_interval: DEFAULT_BLOCK_INTERVAL, segments: PruneModes::default() }
520    }
521}
522
523impl PruneConfig {
524    /// Returns whether this configuration is the default one.
525    pub fn is_default(&self) -> bool {
526        self == &Self::default()
527    }
528
529    /// Returns whether there is any kind of receipt pruning configuration.
530    pub fn has_receipts_pruning(&self) -> bool {
531        self.segments.receipts.is_some() || !self.segments.receipts_log_filter.is_empty()
532    }
533
534    /// Merges values from `other` into `self`.
535    /// - `Option<PruneMode>` fields: set from `other` only if `self` is `None`.
536    /// - `block_interval`: set from `other` only if `self.block_interval ==
537    ///   DEFAULT_BLOCK_INTERVAL`.
538    /// - `merkle_changesets`: always set from `other`.
539    /// - `receipts_log_filter`: set from `other` only if `self` is empty and `other` is non-empty.
540    pub fn merge(&mut self, other: Self) {
541        let Self {
542            block_interval,
543            segments:
544                PruneModes {
545                    sender_recovery,
546                    transaction_lookup,
547                    receipts,
548                    account_history,
549                    storage_history,
550                    bodies_history,
551                    merkle_changesets,
552                    receipts_log_filter,
553                },
554        } = other;
555
556        // Merge block_interval, only update if it's the default interval
557        if self.block_interval == DEFAULT_BLOCK_INTERVAL {
558            self.block_interval = block_interval;
559        }
560
561        // Merge the various segment prune modes
562        self.segments.sender_recovery = self.segments.sender_recovery.or(sender_recovery);
563        self.segments.transaction_lookup = self.segments.transaction_lookup.or(transaction_lookup);
564        self.segments.receipts = self.segments.receipts.or(receipts);
565        self.segments.account_history = self.segments.account_history.or(account_history);
566        self.segments.storage_history = self.segments.storage_history.or(storage_history);
567        self.segments.bodies_history = self.segments.bodies_history.or(bodies_history);
568        // Merkle changesets is not optional; always take the value from `other`
569        self.segments.merkle_changesets = merkle_changesets;
570
571        if self.segments.receipts_log_filter.0.is_empty() && !receipts_log_filter.0.is_empty() {
572            self.segments.receipts_log_filter = receipts_log_filter;
573        }
574    }
575}
576
577/// Helper type to support older versions of Duration deserialization.
578#[cfg(feature = "serde")]
579fn deserialize_duration<'de, D>(deserializer: D) -> Result<Option<Duration>, D::Error>
580where
581    D: serde::de::Deserializer<'de>,
582{
583    #[derive(serde::Deserialize)]
584    #[serde(untagged)]
585    enum AnyDuration {
586        #[serde(deserialize_with = "humantime_serde::deserialize")]
587        Human(Option<Duration>),
588        Duration(Option<Duration>),
589    }
590
591    <AnyDuration as serde::Deserialize>::deserialize(deserializer).map(|d| match d {
592        AnyDuration::Human(duration) | AnyDuration::Duration(duration) => duration,
593    })
594}
595
596#[cfg(all(test, feature = "serde"))]
597mod tests {
598    use super::{Config, EXTENSION};
599    use crate::PruneConfig;
600    use alloy_primitives::Address;
601    use reth_network_peers::TrustedPeer;
602    use reth_prune_types::{PruneMode, PruneModes, ReceiptsLogPruneConfig};
603    use std::{collections::BTreeMap, path::Path, str::FromStr, time::Duration};
604
605    fn with_tempdir(filename: &str, proc: fn(&std::path::Path)) {
606        let temp_dir = tempfile::tempdir().unwrap();
607        let config_path = temp_dir.path().join(filename).with_extension(EXTENSION);
608
609        proc(&config_path);
610
611        temp_dir.close().unwrap()
612    }
613
614    /// Run a test function with a temporary config path as fixture.
615    fn with_config_path(test_fn: fn(&Path)) {
616        // Create a temporary directory for the config file
617        let config_dir = tempfile::tempdir().expect("creating test fixture failed");
618        // Create the config file path
619        let config_path =
620            config_dir.path().join("example-app").join("example-config").with_extension("toml");
621        // Run the test function with the config path
622        test_fn(&config_path);
623        config_dir.close().expect("removing test fixture failed");
624    }
625
626    #[test]
627    fn test_load_path_works() {
628        with_config_path(|path| {
629            let config = Config::from_path(path).expect("load_path failed");
630            assert_eq!(config, Config::default());
631        })
632    }
633
634    #[test]
635    fn test_load_path_reads_existing_config() {
636        with_config_path(|path| {
637            let config = Config::default();
638
639            // Create the parent directory if it doesn't exist
640            if let Some(parent) = path.parent() {
641                std::fs::create_dir_all(parent).expect("Failed to create directories");
642            }
643
644            // Write the config to the file
645            std::fs::write(path, toml::to_string(&config).unwrap())
646                .expect("Failed to write config");
647
648            // Load the config from the file and compare it
649            let loaded = Config::from_path(path).expect("load_path failed");
650            assert_eq!(config, loaded);
651        })
652    }
653
654    #[test]
655    fn test_load_path_fails_on_invalid_toml() {
656        with_config_path(|path| {
657            let invalid_toml = "invalid toml data";
658
659            // Create the parent directory if it doesn't exist
660            if let Some(parent) = path.parent() {
661                std::fs::create_dir_all(parent).expect("Failed to create directories");
662            }
663
664            // Write invalid TOML data to the file
665            std::fs::write(path, invalid_toml).expect("Failed to write invalid TOML");
666
667            // Attempt to load the config should fail
668            let result = Config::from_path(path);
669            assert!(result.is_err());
670        })
671    }
672
673    #[test]
674    fn test_load_path_creates_directory_if_not_exists() {
675        with_config_path(|path| {
676            // Ensure the directory does not exist
677            let parent = path.parent().unwrap();
678            assert!(!parent.exists());
679
680            // Load the configuration, which should create the directory and a default config file
681            let config = Config::from_path(path).expect("load_path failed");
682            assert_eq!(config, Config::default());
683
684            // The directory and file should now exist
685            assert!(parent.exists());
686            assert!(path.exists());
687        });
688    }
689
690    #[test]
691    fn test_store_config() {
692        with_tempdir("config-store-test", |config_path| {
693            let config = Config::default();
694            std::fs::write(
695                config_path,
696                toml::to_string(&config).expect("Failed to serialize config"),
697            )
698            .expect("Failed to write config file");
699        })
700    }
701
702    #[test]
703    fn test_store_config_method() {
704        with_tempdir("config-store-test-method", |config_path| {
705            let config = Config::default();
706            config.save(config_path).expect("Failed to store config");
707        })
708    }
709
710    #[test]
711    fn test_load_config() {
712        with_tempdir("config-load-test", |config_path| {
713            let config = Config::default();
714
715            // Write the config to a file
716            std::fs::write(
717                config_path,
718                toml::to_string(&config).expect("Failed to serialize config"),
719            )
720            .expect("Failed to write config file");
721
722            // Load the config from the file
723            let loaded_config = Config::from_path(config_path).unwrap();
724
725            // Compare the loaded config with the original config
726            assert_eq!(config, loaded_config);
727        })
728    }
729
730    #[test]
731    fn test_load_execution_stage() {
732        with_tempdir("config-load-test", |config_path| {
733            let mut config = Config::default();
734            config.stages.execution.max_duration = Some(Duration::from_secs(10 * 60));
735
736            // Write the config to a file
737            std::fs::write(
738                config_path,
739                toml::to_string(&config).expect("Failed to serialize config"),
740            )
741            .expect("Failed to write config file");
742
743            // Load the config from the file
744            let loaded_config = Config::from_path(config_path).unwrap();
745
746            // Compare the loaded config with the original config
747            assert_eq!(config, loaded_config);
748        })
749    }
750
751    // ensures config deserialization is backwards compatible
752    #[test]
753    fn test_backwards_compatibility() {
754        let alpha_0_0_8 = r"#
755[stages.headers]
756downloader_max_concurrent_requests = 100
757downloader_min_concurrent_requests = 5
758downloader_max_buffered_responses = 100
759downloader_request_limit = 1000
760commit_threshold = 10000
761
762[stages.bodies]
763downloader_request_limit = 200
764downloader_stream_batch_size = 1000
765downloader_max_buffered_blocks_size_bytes = 2147483648
766downloader_min_concurrent_requests = 5
767downloader_max_concurrent_requests = 100
768
769[stages.sender_recovery]
770commit_threshold = 5000000
771
772[stages.execution]
773max_blocks = 500000
774max_changes = 5000000
775
776[stages.account_hashing]
777clean_threshold = 500000
778commit_threshold = 100000
779
780[stages.storage_hashing]
781clean_threshold = 500000
782commit_threshold = 100000
783
784[stages.merkle]
785clean_threshold = 50000
786
787[stages.transaction_lookup]
788chunk_size = 5000000
789
790[stages.index_account_history]
791commit_threshold = 100000
792
793[stages.index_storage_history]
794commit_threshold = 100000
795
796[peers]
797refill_slots_interval = '1s'
798trusted_nodes = []
799connect_trusted_nodes_only = false
800max_backoff_count = 5
801ban_duration = '12h'
802
803[peers.connection_info]
804max_outbound = 100
805max_inbound = 30
806
807[peers.reputation_weights]
808bad_message = -16384
809bad_block = -16384
810bad_transactions = -16384
811already_seen_transactions = 0
812timeout = -4096
813bad_protocol = -2147483648
814failed_to_connect = -25600
815dropped = -4096
816
817[peers.backoff_durations]
818low = '30s'
819medium = '3m'
820high = '15m'
821max = '1h'
822
823[sessions]
824session_command_buffer = 32
825session_event_buffer = 260
826
827[sessions.limits]
828
829[sessions.initial_internal_request_timeout]
830secs = 20
831nanos = 0
832
833[sessions.protocol_breach_request_timeout]
834secs = 120
835nanos = 0
836
837[prune]
838block_interval = 5
839
840[prune.parts]
841sender_recovery = { distance = 16384 }
842transaction_lookup = 'full'
843receipts = { before = 1920000 }
844account_history = { distance = 16384 }
845storage_history = { distance = 16384 }
846[prune.parts.receipts_log_filter]
847'0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48' = { before = 17000000 }
848'0xdac17f958d2ee523a2206206994597c13d831ec7' = { distance = 1000 }
849#";
850        let _conf: Config = toml::from_str(alpha_0_0_8).unwrap();
851
852        let alpha_0_0_11 = r"#
853[prune.segments]
854sender_recovery = { distance = 16384 }
855transaction_lookup = 'full'
856receipts = { before = 1920000 }
857account_history = { distance = 16384 }
858storage_history = { distance = 16384 }
859[prune.segments.receipts_log_filter]
860'0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48' = { before = 17000000 }
861'0xdac17f958d2ee523a2206206994597c13d831ec7' = { distance = 1000 }
862#";
863        let _conf: Config = toml::from_str(alpha_0_0_11).unwrap();
864
865        let alpha_0_0_18 = r"#
866[stages.headers]
867downloader_max_concurrent_requests = 100
868downloader_min_concurrent_requests = 5
869downloader_max_buffered_responses = 100
870downloader_request_limit = 1000
871commit_threshold = 10000
872
873[stages.total_difficulty]
874commit_threshold = 100000
875
876[stages.bodies]
877downloader_request_limit = 200
878downloader_stream_batch_size = 1000
879downloader_max_buffered_blocks_size_bytes = 2147483648
880downloader_min_concurrent_requests = 5
881downloader_max_concurrent_requests = 100
882
883[stages.sender_recovery]
884commit_threshold = 5000000
885
886[stages.execution]
887max_blocks = 500000
888max_changes = 5000000
889max_cumulative_gas = 1500000000000
890[stages.execution.max_duration]
891secs = 600
892nanos = 0
893
894[stages.account_hashing]
895clean_threshold = 500000
896commit_threshold = 100000
897
898[stages.storage_hashing]
899clean_threshold = 500000
900commit_threshold = 100000
901
902[stages.merkle]
903clean_threshold = 50000
904
905[stages.transaction_lookup]
906commit_threshold = 5000000
907
908[stages.index_account_history]
909commit_threshold = 100000
910
911[stages.index_storage_history]
912commit_threshold = 100000
913
914[peers]
915refill_slots_interval = '5s'
916trusted_nodes = []
917connect_trusted_nodes_only = false
918max_backoff_count = 5
919ban_duration = '12h'
920
921[peers.connection_info]
922max_outbound = 100
923max_inbound = 30
924max_concurrent_outbound_dials = 10
925
926[peers.reputation_weights]
927bad_message = -16384
928bad_block = -16384
929bad_transactions = -16384
930already_seen_transactions = 0
931timeout = -4096
932bad_protocol = -2147483648
933failed_to_connect = -25600
934dropped = -4096
935bad_announcement = -1024
936
937[peers.backoff_durations]
938low = '30s'
939medium = '3m'
940high = '15m'
941max = '1h'
942
943[sessions]
944session_command_buffer = 32
945session_event_buffer = 260
946
947[sessions.limits]
948
949[sessions.initial_internal_request_timeout]
950secs = 20
951nanos = 0
952
953[sessions.protocol_breach_request_timeout]
954secs = 120
955nanos = 0
956#";
957        let conf: Config = toml::from_str(alpha_0_0_18).unwrap();
958        assert_eq!(conf.stages.execution.max_duration, Some(Duration::from_secs(10 * 60)));
959
960        let alpha_0_0_19 = r"#
961[stages.headers]
962downloader_max_concurrent_requests = 100
963downloader_min_concurrent_requests = 5
964downloader_max_buffered_responses = 100
965downloader_request_limit = 1000
966commit_threshold = 10000
967
968[stages.total_difficulty]
969commit_threshold = 100000
970
971[stages.bodies]
972downloader_request_limit = 200
973downloader_stream_batch_size = 1000
974downloader_max_buffered_blocks_size_bytes = 2147483648
975downloader_min_concurrent_requests = 5
976downloader_max_concurrent_requests = 100
977
978[stages.sender_recovery]
979commit_threshold = 5000000
980
981[stages.execution]
982max_blocks = 500000
983max_changes = 5000000
984max_cumulative_gas = 1500000000000
985max_duration = '10m'
986
987[stages.account_hashing]
988clean_threshold = 500000
989commit_threshold = 100000
990
991[stages.storage_hashing]
992clean_threshold = 500000
993commit_threshold = 100000
994
995[stages.merkle]
996clean_threshold = 50000
997
998[stages.transaction_lookup]
999commit_threshold = 5000000
1000
1001[stages.index_account_history]
1002commit_threshold = 100000
1003
1004[stages.index_storage_history]
1005commit_threshold = 100000
1006
1007[peers]
1008refill_slots_interval = '5s'
1009trusted_nodes = []
1010connect_trusted_nodes_only = false
1011max_backoff_count = 5
1012ban_duration = '12h'
1013
1014[peers.connection_info]
1015max_outbound = 100
1016max_inbound = 30
1017max_concurrent_outbound_dials = 10
1018
1019[peers.reputation_weights]
1020bad_message = -16384
1021bad_block = -16384
1022bad_transactions = -16384
1023already_seen_transactions = 0
1024timeout = -4096
1025bad_protocol = -2147483648
1026failed_to_connect = -25600
1027dropped = -4096
1028bad_announcement = -1024
1029
1030[peers.backoff_durations]
1031low = '30s'
1032medium = '3m'
1033high = '15m'
1034max = '1h'
1035
1036[sessions]
1037session_command_buffer = 32
1038session_event_buffer = 260
1039
1040[sessions.limits]
1041
1042[sessions.initial_internal_request_timeout]
1043secs = 20
1044nanos = 0
1045
1046[sessions.protocol_breach_request_timeout]
1047secs = 120
1048nanos = 0
1049#";
1050        let _conf: Config = toml::from_str(alpha_0_0_19).unwrap();
1051    }
1052
1053    // ensures prune config deserialization is backwards compatible
1054    #[test]
1055    fn test_backwards_compatibility_prune_full() {
1056        let s = r"#
1057[prune]
1058block_interval = 5
1059
1060[prune.segments]
1061sender_recovery = { distance = 16384 }
1062transaction_lookup = 'full'
1063receipts = { distance = 16384 }
1064#";
1065        let _conf: Config = toml::from_str(s).unwrap();
1066
1067        let s = r"#
1068[prune]
1069block_interval = 5
1070
1071[prune.segments]
1072sender_recovery = { distance = 16384 }
1073transaction_lookup = 'full'
1074receipts = 'full'
1075#";
1076        let err = toml::from_str::<Config>(s).unwrap_err().to_string();
1077        assert!(err.contains("invalid value: string \"full\""), "{}", err);
1078    }
1079
1080    #[test]
1081    fn test_prune_config_merge() {
1082        let mut config1 = PruneConfig {
1083            block_interval: 5,
1084            segments: PruneModes {
1085                sender_recovery: Some(PruneMode::Full),
1086                transaction_lookup: None,
1087                receipts: Some(PruneMode::Distance(1000)),
1088                account_history: None,
1089                storage_history: Some(PruneMode::Before(5000)),
1090                bodies_history: None,
1091                merkle_changesets: PruneMode::Before(0),
1092                receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([(
1093                    Address::random(),
1094                    PruneMode::Full,
1095                )])),
1096            },
1097        };
1098
1099        let config2 = PruneConfig {
1100            block_interval: 10,
1101            segments: PruneModes {
1102                sender_recovery: Some(PruneMode::Distance(500)),
1103                transaction_lookup: Some(PruneMode::Full),
1104                receipts: Some(PruneMode::Full),
1105                account_history: Some(PruneMode::Distance(2000)),
1106                storage_history: Some(PruneMode::Distance(3000)),
1107                bodies_history: None,
1108                merkle_changesets: PruneMode::Distance(10000),
1109                receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([
1110                    (Address::random(), PruneMode::Distance(1000)),
1111                    (Address::random(), PruneMode::Before(2000)),
1112                ])),
1113            },
1114        };
1115
1116        let original_filter = config1.segments.receipts_log_filter.clone();
1117        config1.merge(config2);
1118
1119        // Check that the configuration has been merged. Any configuration present in config1
1120        // should not be overwritten by config2
1121        assert_eq!(config1.block_interval, 10);
1122        assert_eq!(config1.segments.sender_recovery, Some(PruneMode::Full));
1123        assert_eq!(config1.segments.transaction_lookup, Some(PruneMode::Full));
1124        assert_eq!(config1.segments.receipts, Some(PruneMode::Distance(1000)));
1125        assert_eq!(config1.segments.account_history, Some(PruneMode::Distance(2000)));
1126        assert_eq!(config1.segments.storage_history, Some(PruneMode::Before(5000)));
1127        assert_eq!(config1.segments.merkle_changesets, PruneMode::Distance(10000));
1128        assert_eq!(config1.segments.receipts_log_filter, original_filter);
1129    }
1130
1131    #[test]
1132    fn test_conf_trust_nodes_only() {
1133        let trusted_nodes_only = r"#
1134[peers]
1135trusted_nodes_only = true
1136#";
1137        let conf: Config = toml::from_str(trusted_nodes_only).unwrap();
1138        assert!(conf.peers.trusted_nodes_only);
1139
1140        let trusted_nodes_only = r"#
1141[peers]
1142connect_trusted_nodes_only = true
1143#";
1144        let conf: Config = toml::from_str(trusted_nodes_only).unwrap();
1145        assert!(conf.peers.trusted_nodes_only);
1146    }
1147
1148    #[test]
1149    fn test_can_support_dns_in_trusted_nodes() {
1150        let reth_toml = r#"
1151    [peers]
1152    trusted_nodes = [
1153        "enode://0401e494dbd0c84c5c0f72adac5985d2f2525e08b68d448958aae218f5ac8198a80d1498e0ebec2ce38b1b18d6750f6e61a56b4614c5a6c6cf0981c39aed47dc@34.159.32.127:30303",
1154        "enode://e9675164b5e17b9d9edf0cc2bd79e6b6f487200c74d1331c220abb5b8ee80c2eefbf18213989585e9d0960683e819542e11d4eefb5f2b4019e1e49f9fd8fff18@berav2-bootnode.staketab.org:30303"
1155    ]
1156    "#;
1157
1158        let conf: Config = toml::from_str(reth_toml).unwrap();
1159        assert_eq!(conf.peers.trusted_nodes.len(), 2);
1160
1161        let expected_enodes = vec![
1162            "enode://0401e494dbd0c84c5c0f72adac5985d2f2525e08b68d448958aae218f5ac8198a80d1498e0ebec2ce38b1b18d6750f6e61a56b4614c5a6c6cf0981c39aed47dc@34.159.32.127:30303",
1163            "enode://e9675164b5e17b9d9edf0cc2bd79e6b6f487200c74d1331c220abb5b8ee80c2eefbf18213989585e9d0960683e819542e11d4eefb5f2b4019e1e49f9fd8fff18@berav2-bootnode.staketab.org:30303",
1164        ];
1165
1166        for enode in expected_enodes {
1167            let node = TrustedPeer::from_str(enode).unwrap();
1168            assert!(conf.peers.trusted_nodes.contains(&node));
1169        }
1170    }
1171}