reth_config/
config.rs

1//! Configuration files.
2use reth_network_types::{PeersConfig, SessionsConfig};
3use reth_prune_types::PruneModes;
4use reth_stages_types::ExecutionStageThresholds;
5use reth_static_file_types::{StaticFileMap, StaticFileSegment};
6use std::{
7    path::{Path, PathBuf},
8    time::Duration,
9};
10use url::Url;
11
12#[cfg(feature = "serde")]
13const EXTENSION: &str = "toml";
14
15/// The default prune block interval
16pub const DEFAULT_BLOCK_INTERVAL: usize = 5;
17
18/// Configuration for the reth node.
19#[derive(Debug, Clone, Default, PartialEq, Eq)]
20#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
21#[cfg_attr(feature = "serde", serde(default))]
22pub struct Config {
23    /// Configuration for each stage in the pipeline.
24    pub stages: StageConfig,
25    /// Configuration for pruning.
26    #[cfg_attr(feature = "serde", serde(default))]
27    pub prune: PruneConfig,
28    /// Configuration for the discovery service.
29    pub peers: PeersConfig,
30    /// Configuration for peer sessions.
31    pub sessions: SessionsConfig,
32    /// Configuration for static files.
33    #[cfg_attr(feature = "serde", serde(default))]
34    pub static_files: StaticFilesConfig,
35}
36
37impl Config {
38    /// Sets the pruning configuration.
39    pub fn set_prune_config(&mut self, prune_config: PruneConfig) {
40        self.prune = prune_config;
41    }
42}
43
44#[cfg(feature = "serde")]
45impl Config {
46    /// Load a [`Config`] from a specified path.
47    ///
48    /// A new configuration file is created with default values if none
49    /// exists.
50    pub fn from_path(path: impl AsRef<Path>) -> eyre::Result<Self> {
51        let path = path.as_ref();
52        match std::fs::read_to_string(path) {
53            Ok(cfg_string) => {
54                toml::from_str(&cfg_string).map_err(|e| eyre::eyre!("Failed to parse TOML: {e}"))
55            }
56            Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
57                if let Some(parent) = path.parent() {
58                    std::fs::create_dir_all(parent)
59                        .map_err(|e| eyre::eyre!("Failed to create directory: {e}"))?;
60                }
61                let cfg = Self::default();
62                let s = toml::to_string_pretty(&cfg)
63                    .map_err(|e| eyre::eyre!("Failed to serialize to TOML: {e}"))?;
64                std::fs::write(path, s)
65                    .map_err(|e| eyre::eyre!("Failed to write configuration file: {e}"))?;
66                Ok(cfg)
67            }
68            Err(e) => Err(eyre::eyre!("Failed to load configuration: {e}")),
69        }
70    }
71
72    /// Returns the [`PeersConfig`] for the node.
73    ///
74    /// If a peers file is provided, the basic nodes from the file are added to the configuration.
75    pub fn peers_config_with_basic_nodes_from_file(
76        &self,
77        peers_file: Option<&Path>,
78    ) -> PeersConfig {
79        self.peers
80            .clone()
81            .with_basic_nodes_from_file(peers_file)
82            .unwrap_or_else(|_| self.peers.clone())
83    }
84
85    /// Save the configuration to toml file.
86    pub fn save(&self, path: &Path) -> Result<(), std::io::Error> {
87        if path.extension() != Some(std::ffi::OsStr::new(EXTENSION)) {
88            return Err(std::io::Error::new(
89                std::io::ErrorKind::InvalidInput,
90                format!("reth config file extension must be '{EXTENSION}'"),
91            ));
92        }
93
94        std::fs::write(
95            path,
96            toml::to_string(self)
97                .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e.to_string()))?,
98        )
99    }
100}
101
102/// Configuration for each stage in the pipeline.
103#[derive(Debug, Clone, Default, PartialEq, Eq)]
104#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
105#[cfg_attr(feature = "serde", serde(default))]
106pub struct StageConfig {
107    /// ERA stage configuration.
108    pub era: EraConfig,
109    /// Header stage configuration.
110    pub headers: HeadersConfig,
111    /// Body stage configuration.
112    pub bodies: BodiesConfig,
113    /// Sender Recovery stage configuration.
114    pub sender_recovery: SenderRecoveryConfig,
115    /// Execution stage configuration.
116    pub execution: ExecutionConfig,
117    /// Prune stage configuration.
118    pub prune: PruneStageConfig,
119    /// Account Hashing stage configuration.
120    pub account_hashing: HashingConfig,
121    /// Storage Hashing stage configuration.
122    pub storage_hashing: HashingConfig,
123    /// Merkle stage configuration.
124    pub merkle: MerkleConfig,
125    /// Transaction Lookup stage configuration.
126    pub transaction_lookup: TransactionLookupConfig,
127    /// Index Account History stage configuration.
128    pub index_account_history: IndexHistoryConfig,
129    /// Index Storage History stage configuration.
130    pub index_storage_history: IndexHistoryConfig,
131    /// Common ETL related configuration.
132    pub etl: EtlConfig,
133}
134
135impl StageConfig {
136    /// The highest threshold (in number of blocks) for switching between incremental and full
137    /// calculations across `MerkleStage`, `AccountHashingStage` and `StorageHashingStage`. This is
138    /// required to figure out if can prune or not changesets on subsequent pipeline runs during
139    /// `ExecutionStage`
140    pub fn execution_external_clean_threshold(&self) -> u64 {
141        self.merkle
142            .incremental_threshold
143            .max(self.account_hashing.clean_threshold)
144            .max(self.storage_hashing.clean_threshold)
145    }
146}
147
148/// ERA stage configuration.
149#[derive(Debug, Clone, Default, PartialEq, Eq)]
150#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
151#[cfg_attr(feature = "serde", serde(default))]
152pub struct EraConfig {
153    /// Path to a local directory where ERA1 files are located.
154    ///
155    /// Conflicts with `url`.
156    pub path: Option<PathBuf>,
157    /// The base URL of an ERA1 file host to download from.
158    ///
159    /// Conflicts with `path`.
160    pub url: Option<Url>,
161    /// Path to a directory where files downloaded from `url` will be stored until processed.
162    ///
163    /// Required for `url`.
164    pub folder: Option<PathBuf>,
165}
166
167impl EraConfig {
168    /// Sets `folder` for temporary downloads as a directory called "era" inside `dir`.
169    pub fn with_datadir(mut self, dir: impl AsRef<Path>) -> Self {
170        self.folder = Some(dir.as_ref().join("era"));
171        self
172    }
173}
174
175/// Header stage configuration.
176#[derive(Debug, Clone, Copy, PartialEq, Eq)]
177#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
178#[cfg_attr(feature = "serde", serde(default))]
179pub struct HeadersConfig {
180    /// The maximum number of requests to send concurrently.
181    ///
182    /// Default: 100
183    pub downloader_max_concurrent_requests: usize,
184    /// The minimum number of requests to send concurrently.
185    ///
186    /// Default: 5
187    pub downloader_min_concurrent_requests: usize,
188    /// Maximum amount of responses to buffer internally.
189    /// The response contains multiple headers.
190    pub downloader_max_buffered_responses: usize,
191    /// The maximum number of headers to request from a peer at a time.
192    pub downloader_request_limit: u64,
193    /// The maximum number of headers to download before committing progress to the database.
194    pub commit_threshold: u64,
195}
196
197impl Default for HeadersConfig {
198    fn default() -> Self {
199        Self {
200            commit_threshold: 10_000,
201            downloader_request_limit: 1_000,
202            downloader_max_concurrent_requests: 100,
203            downloader_min_concurrent_requests: 5,
204            downloader_max_buffered_responses: 100,
205        }
206    }
207}
208
209/// Body stage configuration.
210#[derive(Debug, Clone, Copy, PartialEq, Eq)]
211#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
212#[cfg_attr(feature = "serde", serde(default))]
213pub struct BodiesConfig {
214    /// The batch size of non-empty blocks per one request
215    ///
216    /// Default: 200
217    pub downloader_request_limit: u64,
218    /// The maximum number of block bodies returned at once from the stream
219    ///
220    /// Default: `1_000`
221    pub downloader_stream_batch_size: usize,
222    /// The size of the internal block buffer in bytes.
223    ///
224    /// Default: 2GB
225    pub downloader_max_buffered_blocks_size_bytes: usize,
226    /// The minimum number of requests to send concurrently.
227    ///
228    /// Default: 5
229    pub downloader_min_concurrent_requests: usize,
230    /// The maximum number of requests to send concurrently.
231    /// This is equal to the max number of peers.
232    ///
233    /// Default: 100
234    pub downloader_max_concurrent_requests: usize,
235}
236
237impl Default for BodiesConfig {
238    fn default() -> Self {
239        Self {
240            downloader_request_limit: 200,
241            downloader_stream_batch_size: 1_000,
242            downloader_max_buffered_blocks_size_bytes: 2 * 1024 * 1024 * 1024, // ~2GB
243            downloader_min_concurrent_requests: 5,
244            downloader_max_concurrent_requests: 100,
245        }
246    }
247}
248
249/// Sender recovery stage configuration.
250#[derive(Debug, Clone, Copy, PartialEq, Eq)]
251#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
252#[cfg_attr(feature = "serde", serde(default))]
253pub struct SenderRecoveryConfig {
254    /// The maximum number of transactions to process before committing progress to the database.
255    pub commit_threshold: u64,
256}
257
258impl Default for SenderRecoveryConfig {
259    fn default() -> Self {
260        Self { commit_threshold: 5_000_000 }
261    }
262}
263
264/// Execution stage configuration.
265#[derive(Debug, Clone, Copy, PartialEq, Eq)]
266#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
267#[cfg_attr(feature = "serde", serde(default))]
268pub struct ExecutionConfig {
269    /// The maximum number of blocks to process before the execution stage commits.
270    pub max_blocks: Option<u64>,
271    /// The maximum number of state changes to keep in memory before the execution stage commits.
272    pub max_changes: Option<u64>,
273    /// The maximum cumulative amount of gas to process before the execution stage commits.
274    pub max_cumulative_gas: Option<u64>,
275    /// The maximum time spent on blocks processing before the execution stage commits.
276    #[cfg_attr(
277        feature = "serde",
278        serde(
279            serialize_with = "humantime_serde::serialize",
280            deserialize_with = "deserialize_duration"
281        )
282    )]
283    pub max_duration: Option<Duration>,
284}
285
286impl Default for ExecutionConfig {
287    fn default() -> Self {
288        Self {
289            max_blocks: Some(500_000),
290            max_changes: Some(5_000_000),
291            // 50k full blocks of 30M gas
292            max_cumulative_gas: Some(30_000_000 * 50_000),
293            // 10 minutes
294            max_duration: Some(Duration::from_secs(10 * 60)),
295        }
296    }
297}
298
299impl From<ExecutionConfig> for ExecutionStageThresholds {
300    fn from(config: ExecutionConfig) -> Self {
301        Self {
302            max_blocks: config.max_blocks,
303            max_changes: config.max_changes,
304            max_cumulative_gas: config.max_cumulative_gas,
305            max_duration: config.max_duration,
306        }
307    }
308}
309
310/// Prune stage configuration.
311#[derive(Debug, Clone, Copy, PartialEq, Eq)]
312#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
313#[cfg_attr(feature = "serde", serde(default))]
314pub struct PruneStageConfig {
315    /// The maximum number of entries to prune before committing progress to the database.
316    pub commit_threshold: usize,
317}
318
319impl Default for PruneStageConfig {
320    fn default() -> Self {
321        Self { commit_threshold: 1_000_000 }
322    }
323}
324
325/// Hashing stage configuration.
326#[derive(Debug, Clone, Copy, PartialEq, Eq)]
327#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
328#[cfg_attr(feature = "serde", serde(default))]
329pub struct HashingConfig {
330    /// The threshold (in number of blocks) for switching between
331    /// incremental hashing and full hashing.
332    pub clean_threshold: u64,
333    /// The maximum number of entities to process before committing progress to the database.
334    pub commit_threshold: u64,
335}
336
337impl Default for HashingConfig {
338    fn default() -> Self {
339        Self { clean_threshold: 500_000, commit_threshold: 100_000 }
340    }
341}
342
343/// Merkle stage configuration.
344#[derive(Debug, Clone, Copy, PartialEq, Eq)]
345#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
346#[cfg_attr(feature = "serde", serde(default))]
347pub struct MerkleConfig {
348    /// The number of blocks we will run the incremental root method for when we are catching up on
349    /// the merkle stage for a large number of blocks.
350    ///
351    /// When we are catching up for a large number of blocks, we can only run the incremental root
352    /// for a limited number of blocks, otherwise the incremental root method may cause the node to
353    /// OOM. This number determines how many blocks in a row we will run the incremental root
354    /// method for.
355    pub incremental_threshold: u64,
356    /// The threshold (in number of blocks) for switching from incremental trie building of changes
357    /// to whole rebuild.
358    pub rebuild_threshold: u64,
359}
360
361impl Default for MerkleConfig {
362    fn default() -> Self {
363        Self { incremental_threshold: 7_000, rebuild_threshold: 100_000 }
364    }
365}
366
367/// Transaction Lookup stage configuration.
368#[derive(Debug, Clone, Copy, PartialEq, Eq)]
369#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
370#[cfg_attr(feature = "serde", serde(default))]
371pub struct TransactionLookupConfig {
372    /// The maximum number of transactions to process before writing to disk.
373    pub chunk_size: u64,
374}
375
376impl Default for TransactionLookupConfig {
377    fn default() -> Self {
378        Self { chunk_size: 5_000_000 }
379    }
380}
381
382/// Common ETL related configuration.
383#[derive(Debug, Clone, PartialEq, Eq)]
384#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
385#[cfg_attr(feature = "serde", serde(default))]
386pub struct EtlConfig {
387    /// Data directory where temporary files are created.
388    pub dir: Option<PathBuf>,
389    /// The maximum size in bytes of data held in memory before being flushed to disk as a file.
390    pub file_size: usize,
391}
392
393impl Default for EtlConfig {
394    fn default() -> Self {
395        Self { dir: None, file_size: Self::default_file_size() }
396    }
397}
398
399impl EtlConfig {
400    /// Creates an ETL configuration
401    pub const fn new(dir: Option<PathBuf>, file_size: usize) -> Self {
402        Self { dir, file_size }
403    }
404
405    /// Return default ETL directory from datadir path.
406    pub fn from_datadir(path: &Path) -> PathBuf {
407        path.join("etl-tmp")
408    }
409
410    /// Default size in bytes of data held in memory before being flushed to disk as a file.
411    pub const fn default_file_size() -> usize {
412        // 500 MB
413        500 * (1024 * 1024)
414    }
415}
416
417/// Static files configuration.
418#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
419#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
420#[cfg_attr(feature = "serde", serde(default))]
421pub struct StaticFilesConfig {
422    /// Number of blocks per file for each segment.
423    pub blocks_per_file: BlocksPerFileConfig,
424}
425
426/// Configuration for the number of blocks per file for each segment.
427#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
428#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
429#[cfg_attr(feature = "serde", serde(default))]
430pub struct BlocksPerFileConfig {
431    /// Number of blocks per file for the headers segment.
432    pub headers: Option<u64>,
433    /// Number of blocks per file for the transactions segment.
434    pub transactions: Option<u64>,
435    /// Number of blocks per file for the receipts segment.
436    pub receipts: Option<u64>,
437    /// Number of blocks per file for the transaction senders segment.
438    pub transaction_senders: Option<u64>,
439    /// Number of blocks per file for the account changesets segment.
440    pub account_change_sets: Option<u64>,
441    /// Number of blocks per file for the storage changesets segment.
442    pub storage_change_sets: Option<u64>,
443}
444
445impl StaticFilesConfig {
446    /// Validates the static files configuration.
447    ///
448    /// Returns an error if any blocks per file value is zero.
449    pub fn validate(&self) -> eyre::Result<()> {
450        let BlocksPerFileConfig {
451            headers,
452            transactions,
453            receipts,
454            transaction_senders,
455            account_change_sets,
456            storage_change_sets,
457        } = self.blocks_per_file;
458        eyre::ensure!(headers != Some(0), "Headers segment blocks per file must be greater than 0");
459        eyre::ensure!(
460            transactions != Some(0),
461            "Transactions segment blocks per file must be greater than 0"
462        );
463        eyre::ensure!(
464            receipts != Some(0),
465            "Receipts segment blocks per file must be greater than 0"
466        );
467        eyre::ensure!(
468            transaction_senders != Some(0),
469            "Transaction senders segment blocks per file must be greater than 0"
470        );
471        eyre::ensure!(
472            account_change_sets != Some(0),
473            "Account changesets segment blocks per file must be greater than 0"
474        );
475        eyre::ensure!(
476            storage_change_sets != Some(0),
477            "Storage changesets segment blocks per file must be greater than 0"
478        );
479        Ok(())
480    }
481
482    /// Converts the blocks per file configuration into a [`StaticFileMap`].
483    pub fn as_blocks_per_file_map(&self) -> StaticFileMap<u64> {
484        let BlocksPerFileConfig {
485            headers,
486            transactions,
487            receipts,
488            transaction_senders,
489            account_change_sets,
490            storage_change_sets,
491        } = self.blocks_per_file;
492
493        let mut map = StaticFileMap::default();
494        // Iterating over all possible segments allows us to do an exhaustive match here,
495        // to not forget to configure new segments in the future.
496        for segment in StaticFileSegment::iter() {
497            let blocks_per_file = match segment {
498                StaticFileSegment::Headers => headers,
499                StaticFileSegment::Transactions => transactions,
500                StaticFileSegment::Receipts => receipts,
501                StaticFileSegment::TransactionSenders => transaction_senders,
502                StaticFileSegment::AccountChangeSets => account_change_sets,
503                StaticFileSegment::StorageChangeSets => storage_change_sets,
504            };
505
506            if let Some(blocks_per_file) = blocks_per_file {
507                map.insert(segment, blocks_per_file);
508            }
509        }
510        map
511    }
512}
513
514/// History stage configuration.
515#[derive(Debug, Clone, Copy, PartialEq, Eq)]
516#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
517#[cfg_attr(feature = "serde", serde(default))]
518pub struct IndexHistoryConfig {
519    /// The maximum number of blocks to process before committing progress to the database.
520    pub commit_threshold: u64,
521}
522
523impl Default for IndexHistoryConfig {
524    fn default() -> Self {
525        Self { commit_threshold: 100_000 }
526    }
527}
528
529/// Pruning configuration.
530#[derive(Debug, Clone, PartialEq, Eq)]
531#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
532#[cfg_attr(feature = "serde", serde(default))]
533pub struct PruneConfig {
534    /// Minimum pruning interval measured in blocks.
535    pub block_interval: usize,
536    /// Pruning configuration for every part of the data that can be pruned.
537    #[cfg_attr(feature = "serde", serde(alias = "parts"))]
538    pub segments: PruneModes,
539}
540
541impl Default for PruneConfig {
542    fn default() -> Self {
543        Self { block_interval: DEFAULT_BLOCK_INTERVAL, segments: PruneModes::default() }
544    }
545}
546
547impl PruneConfig {
548    /// Returns whether this configuration is the default one.
549    pub fn is_default(&self) -> bool {
550        self == &Self::default()
551    }
552
553    /// Returns whether there is any kind of receipt pruning configuration.
554    pub fn has_receipts_pruning(&self) -> bool {
555        self.segments.has_receipts_pruning()
556    }
557
558    /// Merges values from `other` into `self`.
559    /// - `Option<PruneMode>` fields: set from `other` only if `self` is `None`.
560    /// - `block_interval`: set from `other` only if `self.block_interval ==
561    ///   DEFAULT_BLOCK_INTERVAL`.
562    /// - `receipts_log_filter`: set from `other` only if `self` is empty and `other` is non-empty.
563    pub fn merge(&mut self, other: Self) {
564        let Self {
565            block_interval,
566            segments:
567                PruneModes {
568                    sender_recovery,
569                    transaction_lookup,
570                    receipts,
571                    account_history,
572                    storage_history,
573                    bodies_history,
574                    receipts_log_filter,
575                },
576        } = other;
577
578        // Merge block_interval, only update if it's the default interval
579        if self.block_interval == DEFAULT_BLOCK_INTERVAL {
580            self.block_interval = block_interval;
581        }
582
583        // Merge the various segment prune modes
584        self.segments.sender_recovery = self.segments.sender_recovery.or(sender_recovery);
585        self.segments.transaction_lookup = self.segments.transaction_lookup.or(transaction_lookup);
586        self.segments.receipts = self.segments.receipts.or(receipts);
587        self.segments.account_history = self.segments.account_history.or(account_history);
588        self.segments.storage_history = self.segments.storage_history.or(storage_history);
589        self.segments.bodies_history = self.segments.bodies_history.or(bodies_history);
590
591        if self.segments.receipts_log_filter.0.is_empty() && !receipts_log_filter.0.is_empty() {
592            self.segments.receipts_log_filter = receipts_log_filter;
593        }
594    }
595}
596
597/// Helper type to support older versions of Duration deserialization.
598#[cfg(feature = "serde")]
599fn deserialize_duration<'de, D>(deserializer: D) -> Result<Option<Duration>, D::Error>
600where
601    D: serde::de::Deserializer<'de>,
602{
603    #[derive(serde::Deserialize)]
604    #[serde(untagged)]
605    enum AnyDuration {
606        #[serde(deserialize_with = "humantime_serde::deserialize")]
607        Human(Option<Duration>),
608        Duration(Option<Duration>),
609    }
610
611    <AnyDuration as serde::Deserialize>::deserialize(deserializer).map(|d| match d {
612        AnyDuration::Human(duration) | AnyDuration::Duration(duration) => duration,
613    })
614}
615
616#[cfg(all(test, feature = "serde"))]
617mod tests {
618    use super::{Config, EXTENSION};
619    use crate::PruneConfig;
620    use alloy_primitives::Address;
621    use reth_network_peers::TrustedPeer;
622    use reth_prune_types::{PruneMode, PruneModes, ReceiptsLogPruneConfig};
623    use std::{collections::BTreeMap, path::Path, str::FromStr, time::Duration};
624
625    fn with_tempdir(filename: &str, proc: fn(&std::path::Path)) {
626        let temp_dir = tempfile::tempdir().unwrap();
627        let config_path = temp_dir.path().join(filename).with_extension(EXTENSION);
628
629        proc(&config_path);
630
631        temp_dir.close().unwrap()
632    }
633
634    /// Run a test function with a temporary config path as fixture.
635    fn with_config_path(test_fn: fn(&Path)) {
636        // Create a temporary directory for the config file
637        let config_dir = tempfile::tempdir().expect("creating test fixture failed");
638        // Create the config file path
639        let config_path =
640            config_dir.path().join("example-app").join("example-config").with_extension("toml");
641        // Run the test function with the config path
642        test_fn(&config_path);
643        config_dir.close().expect("removing test fixture failed");
644    }
645
646    #[test]
647    fn test_load_path_works() {
648        with_config_path(|path| {
649            let config = Config::from_path(path).expect("load_path failed");
650            assert_eq!(config, Config::default());
651        })
652    }
653
654    #[test]
655    fn test_load_path_reads_existing_config() {
656        with_config_path(|path| {
657            let config = Config::default();
658
659            // Create the parent directory if it doesn't exist
660            if let Some(parent) = path.parent() {
661                std::fs::create_dir_all(parent).expect("Failed to create directories");
662            }
663
664            // Write the config to the file
665            std::fs::write(path, toml::to_string(&config).unwrap())
666                .expect("Failed to write config");
667
668            // Load the config from the file and compare it
669            let loaded = Config::from_path(path).expect("load_path failed");
670            assert_eq!(config, loaded);
671        })
672    }
673
674    #[test]
675    fn test_load_path_fails_on_invalid_toml() {
676        with_config_path(|path| {
677            let invalid_toml = "invalid toml data";
678
679            // Create the parent directory if it doesn't exist
680            if let Some(parent) = path.parent() {
681                std::fs::create_dir_all(parent).expect("Failed to create directories");
682            }
683
684            // Write invalid TOML data to the file
685            std::fs::write(path, invalid_toml).expect("Failed to write invalid TOML");
686
687            // Attempt to load the config should fail
688            let result = Config::from_path(path);
689            assert!(result.is_err());
690        })
691    }
692
693    #[test]
694    fn test_load_path_creates_directory_if_not_exists() {
695        with_config_path(|path| {
696            // Ensure the directory does not exist
697            let parent = path.parent().unwrap();
698            assert!(!parent.exists());
699
700            // Load the configuration, which should create the directory and a default config file
701            let config = Config::from_path(path).expect("load_path failed");
702            assert_eq!(config, Config::default());
703
704            // The directory and file should now exist
705            assert!(parent.exists());
706            assert!(path.exists());
707        });
708    }
709
710    #[test]
711    fn test_store_config() {
712        with_tempdir("config-store-test", |config_path| {
713            let config = Config::default();
714            std::fs::write(
715                config_path,
716                toml::to_string(&config).expect("Failed to serialize config"),
717            )
718            .expect("Failed to write config file");
719        })
720    }
721
722    #[test]
723    fn test_store_config_method() {
724        with_tempdir("config-store-test-method", |config_path| {
725            let config = Config::default();
726            config.save(config_path).expect("Failed to store config");
727        })
728    }
729
730    #[test]
731    fn test_load_config() {
732        with_tempdir("config-load-test", |config_path| {
733            let config = Config::default();
734
735            // Write the config to a file
736            std::fs::write(
737                config_path,
738                toml::to_string(&config).expect("Failed to serialize config"),
739            )
740            .expect("Failed to write config file");
741
742            // Load the config from the file
743            let loaded_config = Config::from_path(config_path).unwrap();
744
745            // Compare the loaded config with the original config
746            assert_eq!(config, loaded_config);
747        })
748    }
749
750    #[test]
751    fn test_load_execution_stage() {
752        with_tempdir("config-load-test", |config_path| {
753            let mut config = Config::default();
754            config.stages.execution.max_duration = Some(Duration::from_secs(10 * 60));
755
756            // Write the config to a file
757            std::fs::write(
758                config_path,
759                toml::to_string(&config).expect("Failed to serialize config"),
760            )
761            .expect("Failed to write config file");
762
763            // Load the config from the file
764            let loaded_config = Config::from_path(config_path).unwrap();
765
766            // Compare the loaded config with the original config
767            assert_eq!(config, loaded_config);
768        })
769    }
770
771    // ensures config deserialization is backwards compatible
772    #[test]
773    fn test_backwards_compatibility() {
774        let alpha_0_0_8 = r"#
775[stages.headers]
776downloader_max_concurrent_requests = 100
777downloader_min_concurrent_requests = 5
778downloader_max_buffered_responses = 100
779downloader_request_limit = 1000
780commit_threshold = 10000
781
782[stages.bodies]
783downloader_request_limit = 200
784downloader_stream_batch_size = 1000
785downloader_max_buffered_blocks_size_bytes = 2147483648
786downloader_min_concurrent_requests = 5
787downloader_max_concurrent_requests = 100
788
789[stages.sender_recovery]
790commit_threshold = 5000000
791
792[stages.execution]
793max_blocks = 500000
794max_changes = 5000000
795
796[stages.account_hashing]
797clean_threshold = 500000
798commit_threshold = 100000
799
800[stages.storage_hashing]
801clean_threshold = 500000
802commit_threshold = 100000
803
804[stages.merkle]
805clean_threshold = 50000
806
807[stages.transaction_lookup]
808chunk_size = 5000000
809
810[stages.index_account_history]
811commit_threshold = 100000
812
813[stages.index_storage_history]
814commit_threshold = 100000
815
816[peers]
817refill_slots_interval = '1s'
818trusted_nodes = []
819connect_trusted_nodes_only = false
820max_backoff_count = 5
821ban_duration = '12h'
822
823[peers.connection_info]
824max_outbound = 100
825max_inbound = 30
826
827[peers.reputation_weights]
828bad_message = -16384
829bad_block = -16384
830bad_transactions = -16384
831already_seen_transactions = 0
832timeout = -4096
833bad_protocol = -2147483648
834failed_to_connect = -25600
835dropped = -4096
836
837[peers.backoff_durations]
838low = '30s'
839medium = '3m'
840high = '15m'
841max = '1h'
842
843[sessions]
844session_command_buffer = 32
845session_event_buffer = 260
846
847[sessions.limits]
848
849[sessions.initial_internal_request_timeout]
850secs = 20
851nanos = 0
852
853[sessions.protocol_breach_request_timeout]
854secs = 120
855nanos = 0
856
857[prune]
858block_interval = 5
859
860[prune.parts]
861sender_recovery = { distance = 16384 }
862transaction_lookup = 'full'
863receipts = { before = 1920000 }
864account_history = { distance = 16384 }
865storage_history = { distance = 16384 }
866[prune.parts.receipts_log_filter]
867'0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48' = { before = 17000000 }
868'0xdac17f958d2ee523a2206206994597c13d831ec7' = { distance = 1000 }
869#";
870        let _conf: Config = toml::from_str(alpha_0_0_8).unwrap();
871
872        let alpha_0_0_11 = r"#
873[prune.segments]
874sender_recovery = { distance = 16384 }
875transaction_lookup = 'full'
876receipts = { before = 1920000 }
877account_history = { distance = 16384 }
878storage_history = { distance = 16384 }
879[prune.segments.receipts_log_filter]
880'0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48' = { before = 17000000 }
881'0xdac17f958d2ee523a2206206994597c13d831ec7' = { distance = 1000 }
882#";
883        let _conf: Config = toml::from_str(alpha_0_0_11).unwrap();
884
885        let alpha_0_0_18 = r"#
886[stages.headers]
887downloader_max_concurrent_requests = 100
888downloader_min_concurrent_requests = 5
889downloader_max_buffered_responses = 100
890downloader_request_limit = 1000
891commit_threshold = 10000
892
893[stages.total_difficulty]
894commit_threshold = 100000
895
896[stages.bodies]
897downloader_request_limit = 200
898downloader_stream_batch_size = 1000
899downloader_max_buffered_blocks_size_bytes = 2147483648
900downloader_min_concurrent_requests = 5
901downloader_max_concurrent_requests = 100
902
903[stages.sender_recovery]
904commit_threshold = 5000000
905
906[stages.execution]
907max_blocks = 500000
908max_changes = 5000000
909max_cumulative_gas = 1500000000000
910[stages.execution.max_duration]
911secs = 600
912nanos = 0
913
914[stages.account_hashing]
915clean_threshold = 500000
916commit_threshold = 100000
917
918[stages.storage_hashing]
919clean_threshold = 500000
920commit_threshold = 100000
921
922[stages.merkle]
923clean_threshold = 50000
924
925[stages.transaction_lookup]
926commit_threshold = 5000000
927
928[stages.index_account_history]
929commit_threshold = 100000
930
931[stages.index_storage_history]
932commit_threshold = 100000
933
934[peers]
935refill_slots_interval = '5s'
936trusted_nodes = []
937connect_trusted_nodes_only = false
938max_backoff_count = 5
939ban_duration = '12h'
940
941[peers.connection_info]
942max_outbound = 100
943max_inbound = 30
944max_concurrent_outbound_dials = 10
945
946[peers.reputation_weights]
947bad_message = -16384
948bad_block = -16384
949bad_transactions = -16384
950already_seen_transactions = 0
951timeout = -4096
952bad_protocol = -2147483648
953failed_to_connect = -25600
954dropped = -4096
955bad_announcement = -1024
956
957[peers.backoff_durations]
958low = '30s'
959medium = '3m'
960high = '15m'
961max = '1h'
962
963[sessions]
964session_command_buffer = 32
965session_event_buffer = 260
966
967[sessions.limits]
968
969[sessions.initial_internal_request_timeout]
970secs = 20
971nanos = 0
972
973[sessions.protocol_breach_request_timeout]
974secs = 120
975nanos = 0
976#";
977        let conf: Config = toml::from_str(alpha_0_0_18).unwrap();
978        assert_eq!(conf.stages.execution.max_duration, Some(Duration::from_secs(10 * 60)));
979
980        let alpha_0_0_19 = r"#
981[stages.headers]
982downloader_max_concurrent_requests = 100
983downloader_min_concurrent_requests = 5
984downloader_max_buffered_responses = 100
985downloader_request_limit = 1000
986commit_threshold = 10000
987
988[stages.total_difficulty]
989commit_threshold = 100000
990
991[stages.bodies]
992downloader_request_limit = 200
993downloader_stream_batch_size = 1000
994downloader_max_buffered_blocks_size_bytes = 2147483648
995downloader_min_concurrent_requests = 5
996downloader_max_concurrent_requests = 100
997
998[stages.sender_recovery]
999commit_threshold = 5000000
1000
1001[stages.execution]
1002max_blocks = 500000
1003max_changes = 5000000
1004max_cumulative_gas = 1500000000000
1005max_duration = '10m'
1006
1007[stages.account_hashing]
1008clean_threshold = 500000
1009commit_threshold = 100000
1010
1011[stages.storage_hashing]
1012clean_threshold = 500000
1013commit_threshold = 100000
1014
1015[stages.merkle]
1016clean_threshold = 50000
1017
1018[stages.transaction_lookup]
1019commit_threshold = 5000000
1020
1021[stages.index_account_history]
1022commit_threshold = 100000
1023
1024[stages.index_storage_history]
1025commit_threshold = 100000
1026
1027[peers]
1028refill_slots_interval = '5s'
1029trusted_nodes = []
1030connect_trusted_nodes_only = false
1031max_backoff_count = 5
1032ban_duration = '12h'
1033
1034[peers.connection_info]
1035max_outbound = 100
1036max_inbound = 30
1037max_concurrent_outbound_dials = 10
1038
1039[peers.reputation_weights]
1040bad_message = -16384
1041bad_block = -16384
1042bad_transactions = -16384
1043already_seen_transactions = 0
1044timeout = -4096
1045bad_protocol = -2147483648
1046failed_to_connect = -25600
1047dropped = -4096
1048bad_announcement = -1024
1049
1050[peers.backoff_durations]
1051low = '30s'
1052medium = '3m'
1053high = '15m'
1054max = '1h'
1055
1056[sessions]
1057session_command_buffer = 32
1058session_event_buffer = 260
1059
1060[sessions.limits]
1061
1062[sessions.initial_internal_request_timeout]
1063secs = 20
1064nanos = 0
1065
1066[sessions.protocol_breach_request_timeout]
1067secs = 120
1068nanos = 0
1069#";
1070        let _conf: Config = toml::from_str(alpha_0_0_19).unwrap();
1071    }
1072
1073    // ensures prune config deserialization is backwards compatible
1074    #[test]
1075    fn test_backwards_compatibility_prune_full() {
1076        let s = r"#
1077[prune]
1078block_interval = 5
1079
1080[prune.segments]
1081sender_recovery = { distance = 16384 }
1082transaction_lookup = 'full'
1083receipts = { distance = 16384 }
1084#";
1085        let _conf: Config = toml::from_str(s).unwrap();
1086    }
1087
1088    #[test]
1089    fn test_prune_config_merge() {
1090        let mut config1 = PruneConfig {
1091            block_interval: 5,
1092            segments: PruneModes {
1093                sender_recovery: Some(PruneMode::Full),
1094                transaction_lookup: None,
1095                receipts: Some(PruneMode::Distance(1000)),
1096                account_history: None,
1097                storage_history: Some(PruneMode::Before(5000)),
1098                bodies_history: None,
1099                receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([(
1100                    Address::random(),
1101                    PruneMode::Full,
1102                )])),
1103            },
1104        };
1105
1106        let config2 = PruneConfig {
1107            block_interval: 10,
1108            segments: PruneModes {
1109                sender_recovery: Some(PruneMode::Distance(500)),
1110                transaction_lookup: Some(PruneMode::Full),
1111                receipts: Some(PruneMode::Full),
1112                account_history: Some(PruneMode::Distance(2000)),
1113                storage_history: Some(PruneMode::Distance(3000)),
1114                bodies_history: None,
1115                receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([
1116                    (Address::random(), PruneMode::Distance(1000)),
1117                    (Address::random(), PruneMode::Before(2000)),
1118                ])),
1119            },
1120        };
1121
1122        let original_filter = config1.segments.receipts_log_filter.clone();
1123        config1.merge(config2);
1124
1125        // Check that the configuration has been merged. Any configuration present in config1
1126        // should not be overwritten by config2
1127        assert_eq!(config1.block_interval, 10);
1128        assert_eq!(config1.segments.sender_recovery, Some(PruneMode::Full));
1129        assert_eq!(config1.segments.transaction_lookup, Some(PruneMode::Full));
1130        assert_eq!(config1.segments.receipts, Some(PruneMode::Distance(1000)));
1131        assert_eq!(config1.segments.account_history, Some(PruneMode::Distance(2000)));
1132        assert_eq!(config1.segments.storage_history, Some(PruneMode::Before(5000)));
1133        assert_eq!(config1.segments.receipts_log_filter, original_filter);
1134    }
1135
1136    #[test]
1137    fn test_conf_trust_nodes_only() {
1138        let trusted_nodes_only = r"#
1139[peers]
1140trusted_nodes_only = true
1141#";
1142        let conf: Config = toml::from_str(trusted_nodes_only).unwrap();
1143        assert!(conf.peers.trusted_nodes_only);
1144
1145        let trusted_nodes_only = r"#
1146[peers]
1147connect_trusted_nodes_only = true
1148#";
1149        let conf: Config = toml::from_str(trusted_nodes_only).unwrap();
1150        assert!(conf.peers.trusted_nodes_only);
1151    }
1152
1153    #[test]
1154    fn test_can_support_dns_in_trusted_nodes() {
1155        let reth_toml = r#"
1156    [peers]
1157    trusted_nodes = [
1158        "enode://0401e494dbd0c84c5c0f72adac5985d2f2525e08b68d448958aae218f5ac8198a80d1498e0ebec2ce38b1b18d6750f6e61a56b4614c5a6c6cf0981c39aed47dc@34.159.32.127:30303",
1159        "enode://e9675164b5e17b9d9edf0cc2bd79e6b6f487200c74d1331c220abb5b8ee80c2eefbf18213989585e9d0960683e819542e11d4eefb5f2b4019e1e49f9fd8fff18@berav2-bootnode.staketab.org:30303"
1160    ]
1161    "#;
1162
1163        let conf: Config = toml::from_str(reth_toml).unwrap();
1164        assert_eq!(conf.peers.trusted_nodes.len(), 2);
1165
1166        let expected_enodes = vec![
1167            "enode://0401e494dbd0c84c5c0f72adac5985d2f2525e08b68d448958aae218f5ac8198a80d1498e0ebec2ce38b1b18d6750f6e61a56b4614c5a6c6cf0981c39aed47dc@34.159.32.127:30303",
1168            "enode://e9675164b5e17b9d9edf0cc2bd79e6b6f487200c74d1331c220abb5b8ee80c2eefbf18213989585e9d0960683e819542e11d4eefb5f2b4019e1e49f9fd8fff18@berav2-bootnode.staketab.org:30303",
1169        ];
1170
1171        for enode in expected_enodes {
1172            let node = TrustedPeer::from_str(enode).unwrap();
1173            assert!(conf.peers.trusted_nodes.contains(&node));
1174        }
1175    }
1176}