Skip to main content

reth_config/
config.rs

1//! Configuration files.
2use reth_network_types::{PeersConfig, SessionsConfig};
3use reth_prune_types::{PruneModes, MINIMUM_UNWIND_SAFE_DISTANCE};
4use reth_stages_types::ExecutionStageThresholds;
5use reth_static_file_types::{StaticFileMap, StaticFileSegment};
6use std::{
7    path::{Path, PathBuf},
8    time::Duration,
9};
10use url::Url;
11
12#[cfg(feature = "serde")]
13const EXTENSION: &str = "toml";
14
15/// The default prune block interval
16pub const DEFAULT_BLOCK_INTERVAL: usize = 5;
17
18/// Configuration for the reth node.
19#[derive(Debug, Clone, Default, PartialEq, Eq)]
20#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
21#[cfg_attr(feature = "serde", serde(default))]
22pub struct Config {
23    /// Configuration for each stage in the pipeline.
24    pub stages: StageConfig,
25    /// Configuration for pruning.
26    #[cfg_attr(feature = "serde", serde(default))]
27    pub prune: PruneConfig,
28    /// Configuration for the discovery service.
29    pub peers: PeersConfig,
30    /// Configuration for peer sessions.
31    pub sessions: SessionsConfig,
32    /// Configuration for static files.
33    #[cfg_attr(feature = "serde", serde(default))]
34    pub static_files: StaticFilesConfig,
35}
36
37impl Config {
38    /// Sets the pruning configuration.
39    pub fn set_prune_config(&mut self, prune_config: PruneConfig) {
40        self.prune = prune_config;
41    }
42}
43
44#[cfg(feature = "serde")]
45impl Config {
46    /// Load a [`Config`] from a specified path.
47    ///
48    /// A new configuration file is created with default values if none
49    /// exists.
50    pub fn from_path(path: impl AsRef<Path>) -> eyre::Result<Self> {
51        let path = path.as_ref();
52        match std::fs::read_to_string(path) {
53            Ok(cfg_string) => {
54                toml::from_str(&cfg_string).map_err(|e| eyre::eyre!("Failed to parse TOML: {e}"))
55            }
56            Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
57                if let Some(parent) = path.parent() {
58                    std::fs::create_dir_all(parent)
59                        .map_err(|e| eyre::eyre!("Failed to create directory: {e}"))?;
60                }
61                let cfg = Self::default();
62                let s = toml::to_string_pretty(&cfg)
63                    .map_err(|e| eyre::eyre!("Failed to serialize to TOML: {e}"))?;
64                std::fs::write(path, s)
65                    .map_err(|e| eyre::eyre!("Failed to write configuration file: {e}"))?;
66                Ok(cfg)
67            }
68            Err(e) => Err(eyre::eyre!("Failed to load configuration: {e}")),
69        }
70    }
71
72    /// Returns the [`PeersConfig`] for the node.
73    ///
74    /// If a peers file is provided, the basic nodes from the file are added to the configuration.
75    pub fn peers_config_with_basic_nodes_from_file(
76        &self,
77        peers_file: Option<&Path>,
78    ) -> PeersConfig {
79        self.peers
80            .clone()
81            .with_basic_nodes_from_file(peers_file)
82            .unwrap_or_else(|_| self.peers.clone())
83    }
84
85    /// Save the configuration to toml file.
86    pub fn save(&self, path: &Path) -> Result<(), std::io::Error> {
87        if path.extension() != Some(std::ffi::OsStr::new(EXTENSION)) {
88            return Err(std::io::Error::new(
89                std::io::ErrorKind::InvalidInput,
90                format!("reth config file extension must be '{EXTENSION}'"),
91            ));
92        }
93
94        std::fs::write(
95            path,
96            toml::to_string(self)
97                .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e.to_string()))?,
98        )
99    }
100}
101
102/// Configuration for each stage in the pipeline.
103#[derive(Debug, Clone, Default, PartialEq, Eq)]
104#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
105#[cfg_attr(feature = "serde", serde(default))]
106pub struct StageConfig {
107    /// ERA stage configuration.
108    pub era: EraConfig,
109    /// Header stage configuration.
110    pub headers: HeadersConfig,
111    /// Body stage configuration.
112    pub bodies: BodiesConfig,
113    /// Sender Recovery stage configuration.
114    pub sender_recovery: SenderRecoveryConfig,
115    /// Execution stage configuration.
116    pub execution: ExecutionConfig,
117    /// Prune stage configuration.
118    pub prune: PruneStageConfig,
119    /// Account Hashing stage configuration.
120    pub account_hashing: HashingConfig,
121    /// Storage Hashing stage configuration.
122    pub storage_hashing: HashingConfig,
123    /// Merkle stage configuration.
124    pub merkle: MerkleConfig,
125    /// Transaction Lookup stage configuration.
126    pub transaction_lookup: TransactionLookupConfig,
127    /// Index Account History stage configuration.
128    pub index_account_history: IndexHistoryConfig,
129    /// Index Storage History stage configuration.
130    pub index_storage_history: IndexHistoryConfig,
131    /// Common ETL related configuration.
132    pub etl: EtlConfig,
133}
134
135impl StageConfig {
136    /// The highest threshold (in number of blocks) for switching between incremental and full
137    /// calculations across `MerkleStage`, `AccountHashingStage` and `StorageHashingStage`. This is
138    /// required to figure out if can prune or not changesets on subsequent pipeline runs during
139    /// `ExecutionStage`
140    pub fn execution_external_clean_threshold(&self) -> u64 {
141        self.merkle
142            .incremental_threshold
143            .max(self.account_hashing.clean_threshold)
144            .max(self.storage_hashing.clean_threshold)
145    }
146}
147
148/// ERA stage configuration.
149#[derive(Debug, Clone, Default, PartialEq, Eq)]
150#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
151#[cfg_attr(feature = "serde", serde(default))]
152pub struct EraConfig {
153    /// Path to a local directory where ERA1 files are located.
154    ///
155    /// Conflicts with `url`.
156    pub path: Option<PathBuf>,
157    /// The base URL of an ERA1 file host to download from.
158    ///
159    /// Conflicts with `path`.
160    pub url: Option<Url>,
161    /// Path to a directory where files downloaded from `url` will be stored until processed.
162    ///
163    /// Required for `url`.
164    pub folder: Option<PathBuf>,
165}
166
167impl EraConfig {
168    /// Sets `folder` for temporary downloads as a directory called "era" inside `dir`.
169    pub fn with_datadir(mut self, dir: impl AsRef<Path>) -> Self {
170        self.folder = Some(dir.as_ref().join("era"));
171        self
172    }
173}
174
175/// Header stage configuration.
176#[derive(Debug, Clone, Copy, PartialEq, Eq)]
177#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
178#[cfg_attr(feature = "serde", serde(default))]
179pub struct HeadersConfig {
180    /// The maximum number of requests to send concurrently.
181    ///
182    /// Default: 100
183    pub downloader_max_concurrent_requests: usize,
184    /// The minimum number of requests to send concurrently.
185    ///
186    /// Default: 5
187    pub downloader_min_concurrent_requests: usize,
188    /// Maximum amount of responses to buffer internally.
189    /// The response contains multiple headers.
190    pub downloader_max_buffered_responses: usize,
191    /// The maximum number of headers to request from a peer at a time.
192    pub downloader_request_limit: u64,
193    /// The maximum number of headers to download before committing progress to the database.
194    pub commit_threshold: u64,
195}
196
197impl Default for HeadersConfig {
198    fn default() -> Self {
199        Self {
200            commit_threshold: 10_000,
201            downloader_request_limit: 1_000,
202            downloader_max_concurrent_requests: 100,
203            downloader_min_concurrent_requests: 5,
204            downloader_max_buffered_responses: 100,
205        }
206    }
207}
208
209/// Body stage configuration.
210#[derive(Debug, Clone, Copy, PartialEq, Eq)]
211#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
212#[cfg_attr(feature = "serde", serde(default))]
213pub struct BodiesConfig {
214    /// The batch size of non-empty blocks per one request
215    ///
216    /// Default: 200
217    pub downloader_request_limit: u64,
218    /// The maximum number of block bodies returned at once from the stream
219    ///
220    /// Default: `1_000`
221    pub downloader_stream_batch_size: usize,
222    /// The size of the internal block buffer in bytes.
223    ///
224    /// Default: 2GB
225    pub downloader_max_buffered_blocks_size_bytes: usize,
226    /// The minimum number of requests to send concurrently.
227    ///
228    /// Default: 5
229    pub downloader_min_concurrent_requests: usize,
230    /// The maximum number of requests to send concurrently.
231    /// This is equal to the max number of peers.
232    ///
233    /// Default: 100
234    pub downloader_max_concurrent_requests: usize,
235}
236
237impl Default for BodiesConfig {
238    fn default() -> Self {
239        Self {
240            downloader_request_limit: 200,
241            downloader_stream_batch_size: 1_000,
242            downloader_max_buffered_blocks_size_bytes: 2 * 1024 * 1024 * 1024, // ~2GB
243            downloader_min_concurrent_requests: 5,
244            downloader_max_concurrent_requests: 100,
245        }
246    }
247}
248
249/// Sender recovery stage configuration.
250#[derive(Debug, Clone, Copy, PartialEq, Eq)]
251#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
252#[cfg_attr(feature = "serde", serde(default))]
253pub struct SenderRecoveryConfig {
254    /// The maximum number of transactions to process before committing progress to the database.
255    pub commit_threshold: u64,
256}
257
258impl Default for SenderRecoveryConfig {
259    fn default() -> Self {
260        Self { commit_threshold: 5_000_000 }
261    }
262}
263
264/// Execution stage configuration.
265#[derive(Debug, Clone, Copy, PartialEq, Eq)]
266#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
267#[cfg_attr(feature = "serde", serde(default))]
268pub struct ExecutionConfig {
269    /// The maximum number of blocks to process before the execution stage commits.
270    pub max_blocks: Option<u64>,
271    /// The maximum number of state changes to keep in memory before the execution stage commits.
272    pub max_changes: Option<u64>,
273    /// The maximum cumulative amount of gas to process before the execution stage commits.
274    pub max_cumulative_gas: Option<u64>,
275    /// The maximum time spent on blocks processing before the execution stage commits.
276    #[cfg_attr(
277        feature = "serde",
278        serde(
279            serialize_with = "humantime_serde::serialize",
280            deserialize_with = "deserialize_duration"
281        )
282    )]
283    pub max_duration: Option<Duration>,
284}
285
286impl Default for ExecutionConfig {
287    fn default() -> Self {
288        Self {
289            max_blocks: Some(500_000),
290            max_changes: Some(5_000_000),
291            // 50k full blocks of 30M gas
292            max_cumulative_gas: Some(30_000_000 * 50_000),
293            // 10 minutes
294            max_duration: Some(Duration::from_secs(10 * 60)),
295        }
296    }
297}
298
299impl From<ExecutionConfig> for ExecutionStageThresholds {
300    fn from(config: ExecutionConfig) -> Self {
301        Self {
302            max_blocks: config.max_blocks,
303            max_changes: config.max_changes,
304            max_cumulative_gas: config.max_cumulative_gas,
305            max_duration: config.max_duration,
306        }
307    }
308}
309
310/// Prune stage configuration.
311#[derive(Debug, Clone, Copy, PartialEq, Eq)]
312#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
313#[cfg_attr(feature = "serde", serde(default))]
314pub struct PruneStageConfig {
315    /// The maximum number of entries to prune before committing progress to the database.
316    pub commit_threshold: usize,
317}
318
319impl Default for PruneStageConfig {
320    fn default() -> Self {
321        Self { commit_threshold: 1_000_000 }
322    }
323}
324
325/// Hashing stage configuration.
326#[derive(Debug, Clone, Copy, PartialEq, Eq)]
327#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
328#[cfg_attr(feature = "serde", serde(default))]
329pub struct HashingConfig {
330    /// The threshold (in number of blocks) for switching between
331    /// incremental hashing and full hashing.
332    pub clean_threshold: u64,
333    /// The maximum number of entities to process before committing progress to the database.
334    pub commit_threshold: u64,
335    /// The maximum number of changeset entries to process before committing progress. The stage
336    /// commits after either `commit_threshold` blocks or `commit_entries` entries, whichever
337    /// comes first. This bounds memory usage when blocks contain many state changes.
338    pub commit_entries: u64,
339}
340
341impl Default for HashingConfig {
342    fn default() -> Self {
343        Self { clean_threshold: 500_000, commit_threshold: 100_000, commit_entries: 30_000_000 }
344    }
345}
346
347/// Merkle stage configuration.
348#[derive(Debug, Clone, Copy, PartialEq, Eq)]
349#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
350#[cfg_attr(feature = "serde", serde(default))]
351pub struct MerkleConfig {
352    /// The number of blocks we will run the incremental root method for when we are catching up on
353    /// the merkle stage for a large number of blocks.
354    ///
355    /// When we are catching up for a large number of blocks, we can only run the incremental root
356    /// for a limited number of blocks, otherwise the incremental root method may cause the node to
357    /// OOM. This number determines how many blocks in a row we will run the incremental root
358    /// method for.
359    pub incremental_threshold: u64,
360    /// The threshold (in number of blocks) for switching from incremental trie building of changes
361    /// to whole rebuild.
362    pub rebuild_threshold: u64,
363}
364
365impl Default for MerkleConfig {
366    fn default() -> Self {
367        Self { incremental_threshold: 7_000, rebuild_threshold: 100_000 }
368    }
369}
370
371/// Transaction Lookup stage configuration.
372#[derive(Debug, Clone, Copy, PartialEq, Eq)]
373#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
374#[cfg_attr(feature = "serde", serde(default))]
375pub struct TransactionLookupConfig {
376    /// The maximum number of transactions to process before writing to disk.
377    pub chunk_size: u64,
378}
379
380impl Default for TransactionLookupConfig {
381    fn default() -> Self {
382        Self { chunk_size: 5_000_000 }
383    }
384}
385
386/// Common ETL related configuration.
387#[derive(Debug, Clone, PartialEq, Eq)]
388#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
389#[cfg_attr(feature = "serde", serde(default))]
390pub struct EtlConfig {
391    /// Data directory where temporary files are created.
392    pub dir: Option<PathBuf>,
393    /// The maximum size in bytes of data held in memory before being flushed to disk as a file.
394    pub file_size: usize,
395}
396
397impl Default for EtlConfig {
398    fn default() -> Self {
399        Self { dir: None, file_size: Self::default_file_size() }
400    }
401}
402
403impl EtlConfig {
404    /// Creates an ETL configuration
405    pub const fn new(dir: Option<PathBuf>, file_size: usize) -> Self {
406        Self { dir, file_size }
407    }
408
409    /// Return default ETL directory from datadir path.
410    pub fn from_datadir(path: &Path) -> PathBuf {
411        path.join("etl-tmp")
412    }
413
414    /// Default size in bytes of data held in memory before being flushed to disk as a file.
415    pub const fn default_file_size() -> usize {
416        // 500 MB
417        500 * (1024 * 1024)
418    }
419}
420
421/// Static files configuration.
422#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
423#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
424#[cfg_attr(feature = "serde", serde(default))]
425pub struct StaticFilesConfig {
426    /// Number of blocks per file for each segment.
427    pub blocks_per_file: BlocksPerFileConfig,
428}
429
430/// Configuration for the number of blocks per file for each segment.
431#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
432#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
433#[cfg_attr(feature = "serde", serde(default))]
434pub struct BlocksPerFileConfig {
435    /// Number of blocks per file for the headers segment.
436    pub headers: Option<u64>,
437    /// Number of blocks per file for the transactions segment.
438    pub transactions: Option<u64>,
439    /// Number of blocks per file for the receipts segment.
440    pub receipts: Option<u64>,
441    /// Number of blocks per file for the transaction senders segment.
442    pub transaction_senders: Option<u64>,
443    /// Number of blocks per file for the account changesets segment.
444    pub account_change_sets: Option<u64>,
445    /// Number of blocks per file for the storage changesets segment.
446    pub storage_change_sets: Option<u64>,
447}
448
449impl StaticFilesConfig {
450    /// Validates the static files configuration.
451    ///
452    /// Returns an error if any blocks per file value is zero.
453    pub fn validate(&self) -> eyre::Result<()> {
454        let BlocksPerFileConfig {
455            headers,
456            transactions,
457            receipts,
458            transaction_senders,
459            account_change_sets,
460            storage_change_sets,
461        } = self.blocks_per_file;
462        eyre::ensure!(headers != Some(0), "Headers segment blocks per file must be greater than 0");
463        eyre::ensure!(
464            transactions != Some(0),
465            "Transactions segment blocks per file must be greater than 0"
466        );
467        eyre::ensure!(
468            receipts != Some(0),
469            "Receipts segment blocks per file must be greater than 0"
470        );
471        eyre::ensure!(
472            transaction_senders != Some(0),
473            "Transaction senders segment blocks per file must be greater than 0"
474        );
475        eyre::ensure!(
476            account_change_sets != Some(0),
477            "Account changesets segment blocks per file must be greater than 0"
478        );
479        eyre::ensure!(
480            storage_change_sets != Some(0),
481            "Storage changesets segment blocks per file must be greater than 0"
482        );
483        Ok(())
484    }
485
486    /// Converts the blocks per file configuration into a [`StaticFileMap`].
487    pub fn as_blocks_per_file_map(&self) -> StaticFileMap<u64> {
488        let BlocksPerFileConfig {
489            headers,
490            transactions,
491            receipts,
492            transaction_senders,
493            account_change_sets,
494            storage_change_sets,
495        } = self.blocks_per_file;
496
497        let mut map = StaticFileMap::default();
498        // Iterating over all possible segments allows us to do an exhaustive match here,
499        // to not forget to configure new segments in the future.
500        for segment in StaticFileSegment::iter() {
501            let blocks_per_file = match segment {
502                StaticFileSegment::Headers => headers,
503                StaticFileSegment::Transactions => transactions,
504                StaticFileSegment::Receipts => receipts,
505                StaticFileSegment::TransactionSenders => transaction_senders,
506                StaticFileSegment::AccountChangeSets => account_change_sets,
507                StaticFileSegment::StorageChangeSets => storage_change_sets,
508            };
509
510            if let Some(blocks_per_file) = blocks_per_file {
511                map.insert(segment, blocks_per_file);
512            }
513        }
514        map
515    }
516}
517
518/// History stage configuration.
519#[derive(Debug, Clone, Copy, PartialEq, Eq)]
520#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
521#[cfg_attr(feature = "serde", serde(default))]
522pub struct IndexHistoryConfig {
523    /// The maximum number of blocks to process before committing progress to the database.
524    pub commit_threshold: u64,
525}
526
527impl Default for IndexHistoryConfig {
528    fn default() -> Self {
529        Self { commit_threshold: 100_000 }
530    }
531}
532
533/// Pruning configuration.
534#[derive(Debug, Clone, PartialEq, Eq)]
535#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
536#[cfg_attr(feature = "serde", serde(default))]
537pub struct PruneConfig {
538    /// Minimum pruning interval measured in blocks.
539    pub block_interval: usize,
540    /// Pruning configuration for every part of the data that can be pruned.
541    #[cfg_attr(feature = "serde", serde(alias = "parts"))]
542    pub segments: PruneModes,
543    /// Minimum distance from the tip required for pruning. Controls the safety margin for
544    /// reorgs and manual unwinds. Defaults to [`MINIMUM_UNWIND_SAFE_DISTANCE`].
545    #[cfg_attr(feature = "serde", serde(default = "default_minimum_pruning_distance"))]
546    pub minimum_pruning_distance: u64,
547}
548
549/// Returns the default minimum pruning distance.
550const fn default_minimum_pruning_distance() -> u64 {
551    MINIMUM_UNWIND_SAFE_DISTANCE
552}
553
554impl Default for PruneConfig {
555    fn default() -> Self {
556        Self {
557            block_interval: DEFAULT_BLOCK_INTERVAL,
558            segments: PruneModes::default(),
559            minimum_pruning_distance: MINIMUM_UNWIND_SAFE_DISTANCE,
560        }
561    }
562}
563
564impl PruneConfig {
565    /// Returns whether this configuration is the default one.
566    pub fn is_default(&self) -> bool {
567        self == &Self::default()
568    }
569
570    /// Returns whether there is any kind of receipt pruning configuration.
571    pub fn has_receipts_pruning(&self) -> bool {
572        self.segments.has_receipts_pruning()
573    }
574
575    /// Merges values from `other` into `self`.
576    /// - `Option<PruneMode>` fields: set from `other` only if `self` is `None`.
577    /// - `block_interval`: set from `other` only if `self.block_interval ==
578    ///   DEFAULT_BLOCK_INTERVAL`.
579    /// - `receipts_log_filter`: set from `other` only if `self` is empty and `other` is non-empty.
580    pub fn merge(&mut self, other: Self) {
581        let Self {
582            block_interval,
583            segments:
584                PruneModes {
585                    sender_recovery,
586                    transaction_lookup,
587                    receipts,
588                    account_history,
589                    storage_history,
590                    bodies_history,
591                    receipts_log_filter,
592                },
593            minimum_pruning_distance,
594        } = other;
595
596        // Merge block_interval, only update if it's the default interval
597        if self.block_interval == DEFAULT_BLOCK_INTERVAL {
598            self.block_interval = block_interval;
599        }
600
601        // Merge minimum_pruning_distance, only update if it's the default
602        if self.minimum_pruning_distance == MINIMUM_UNWIND_SAFE_DISTANCE {
603            self.minimum_pruning_distance = minimum_pruning_distance;
604        }
605
606        // Merge the various segment prune modes
607        self.segments.sender_recovery = self.segments.sender_recovery.or(sender_recovery);
608        self.segments.transaction_lookup = self.segments.transaction_lookup.or(transaction_lookup);
609        self.segments.receipts = self.segments.receipts.or(receipts);
610        self.segments.account_history = self.segments.account_history.or(account_history);
611        self.segments.storage_history = self.segments.storage_history.or(storage_history);
612        self.segments.bodies_history = self.segments.bodies_history.or(bodies_history);
613
614        if self.segments.receipts_log_filter.0.is_empty() && !receipts_log_filter.0.is_empty() {
615            self.segments.receipts_log_filter = receipts_log_filter;
616        }
617    }
618}
619
620/// Helper type to support older versions of Duration deserialization.
621#[cfg(feature = "serde")]
622fn deserialize_duration<'de, D>(deserializer: D) -> Result<Option<Duration>, D::Error>
623where
624    D: serde::de::Deserializer<'de>,
625{
626    #[derive(serde::Deserialize)]
627    #[serde(untagged)]
628    enum AnyDuration {
629        #[serde(deserialize_with = "humantime_serde::deserialize")]
630        Human(Option<Duration>),
631        Duration(Option<Duration>),
632    }
633
634    <AnyDuration as serde::Deserialize>::deserialize(deserializer).map(|d| match d {
635        AnyDuration::Human(duration) | AnyDuration::Duration(duration) => duration,
636    })
637}
638
639#[cfg(all(test, feature = "serde"))]
640mod tests {
641    use super::{Config, EXTENSION};
642    use crate::PruneConfig;
643    use alloy_primitives::Address;
644    use reth_network_peers::TrustedPeer;
645    use reth_prune_types::{
646        PruneMode, PruneModes, ReceiptsLogPruneConfig, MINIMUM_UNWIND_SAFE_DISTANCE,
647    };
648    use std::{collections::BTreeMap, path::Path, str::FromStr, time::Duration};
649
650    fn with_tempdir(filename: &str, proc: fn(&std::path::Path)) {
651        let temp_dir = tempfile::tempdir().unwrap();
652        let config_path = temp_dir.path().join(filename).with_extension(EXTENSION);
653
654        proc(&config_path);
655
656        temp_dir.close().unwrap()
657    }
658
659    /// Run a test function with a temporary config path as fixture.
660    fn with_config_path(test_fn: fn(&Path)) {
661        // Create a temporary directory for the config file
662        let config_dir = tempfile::tempdir().expect("creating test fixture failed");
663        // Create the config file path
664        let config_path =
665            config_dir.path().join("example-app").join("example-config").with_extension("toml");
666        // Run the test function with the config path
667        test_fn(&config_path);
668        config_dir.close().expect("removing test fixture failed");
669    }
670
671    #[test]
672    fn test_load_path_works() {
673        with_config_path(|path| {
674            let config = Config::from_path(path).expect("load_path failed");
675            assert_eq!(config, Config::default());
676        })
677    }
678
679    #[test]
680    fn test_load_path_reads_existing_config() {
681        with_config_path(|path| {
682            let config = Config::default();
683
684            // Create the parent directory if it doesn't exist
685            if let Some(parent) = path.parent() {
686                std::fs::create_dir_all(parent).expect("Failed to create directories");
687            }
688
689            // Write the config to the file
690            std::fs::write(path, toml::to_string(&config).unwrap())
691                .expect("Failed to write config");
692
693            // Load the config from the file and compare it
694            let loaded = Config::from_path(path).expect("load_path failed");
695            assert_eq!(config, loaded);
696        })
697    }
698
699    #[test]
700    fn test_load_path_fails_on_invalid_toml() {
701        with_config_path(|path| {
702            let invalid_toml = "invalid toml data";
703
704            // Create the parent directory if it doesn't exist
705            if let Some(parent) = path.parent() {
706                std::fs::create_dir_all(parent).expect("Failed to create directories");
707            }
708
709            // Write invalid TOML data to the file
710            std::fs::write(path, invalid_toml).expect("Failed to write invalid TOML");
711
712            // Attempt to load the config should fail
713            let result = Config::from_path(path);
714            assert!(result.is_err());
715        })
716    }
717
718    #[test]
719    fn test_load_path_creates_directory_if_not_exists() {
720        with_config_path(|path| {
721            // Ensure the directory does not exist
722            let parent = path.parent().unwrap();
723            assert!(!parent.exists());
724
725            // Load the configuration, which should create the directory and a default config file
726            let config = Config::from_path(path).expect("load_path failed");
727            assert_eq!(config, Config::default());
728
729            // The directory and file should now exist
730            assert!(parent.exists());
731            assert!(path.exists());
732        });
733    }
734
735    #[test]
736    fn test_store_config() {
737        with_tempdir("config-store-test", |config_path| {
738            let config = Config::default();
739            std::fs::write(
740                config_path,
741                toml::to_string(&config).expect("Failed to serialize config"),
742            )
743            .expect("Failed to write config file");
744        })
745    }
746
747    #[test]
748    fn test_store_config_method() {
749        with_tempdir("config-store-test-method", |config_path| {
750            let config = Config::default();
751            config.save(config_path).expect("Failed to store config");
752        })
753    }
754
755    #[test]
756    fn test_load_config() {
757        with_tempdir("config-load-test", |config_path| {
758            let config = Config::default();
759
760            // Write the config to a file
761            std::fs::write(
762                config_path,
763                toml::to_string(&config).expect("Failed to serialize config"),
764            )
765            .expect("Failed to write config file");
766
767            // Load the config from the file
768            let loaded_config = Config::from_path(config_path).unwrap();
769
770            // Compare the loaded config with the original config
771            assert_eq!(config, loaded_config);
772        })
773    }
774
775    #[test]
776    fn test_load_execution_stage() {
777        with_tempdir("config-load-test", |config_path| {
778            let mut config = Config::default();
779            config.stages.execution.max_duration = Some(Duration::from_secs(10 * 60));
780
781            // Write the config to a file
782            std::fs::write(
783                config_path,
784                toml::to_string(&config).expect("Failed to serialize config"),
785            )
786            .expect("Failed to write config file");
787
788            // Load the config from the file
789            let loaded_config = Config::from_path(config_path).unwrap();
790
791            // Compare the loaded config with the original config
792            assert_eq!(config, loaded_config);
793        })
794    }
795
796    // ensures config deserialization is backwards compatible
797    #[test]
798    fn test_backwards_compatibility() {
799        let alpha_0_0_8 = r"#
800[stages.headers]
801downloader_max_concurrent_requests = 100
802downloader_min_concurrent_requests = 5
803downloader_max_buffered_responses = 100
804downloader_request_limit = 1000
805commit_threshold = 10000
806
807[stages.bodies]
808downloader_request_limit = 200
809downloader_stream_batch_size = 1000
810downloader_max_buffered_blocks_size_bytes = 2147483648
811downloader_min_concurrent_requests = 5
812downloader_max_concurrent_requests = 100
813
814[stages.sender_recovery]
815commit_threshold = 5000000
816
817[stages.execution]
818max_blocks = 500000
819max_changes = 5000000
820
821[stages.account_hashing]
822clean_threshold = 500000
823commit_threshold = 100000
824
825[stages.storage_hashing]
826clean_threshold = 500000
827commit_threshold = 100000
828
829[stages.merkle]
830clean_threshold = 50000
831
832[stages.transaction_lookup]
833chunk_size = 5000000
834
835[stages.index_account_history]
836commit_threshold = 100000
837
838[stages.index_storage_history]
839commit_threshold = 100000
840
841[peers]
842refill_slots_interval = '1s'
843trusted_nodes = []
844connect_trusted_nodes_only = false
845max_backoff_count = 5
846ban_duration = '12h'
847
848[peers.connection_info]
849max_outbound = 100
850max_inbound = 30
851
852[peers.reputation_weights]
853bad_message = -16384
854bad_block = -16384
855bad_transactions = -16384
856already_seen_transactions = 0
857timeout = -4096
858bad_protocol = -2147483648
859failed_to_connect = -25600
860dropped = -4096
861
862[peers.backoff_durations]
863low = '30s'
864medium = '3m'
865high = '15m'
866max = '1h'
867
868[sessions]
869session_command_buffer = 32
870session_event_buffer = 260
871
872[sessions.limits]
873
874[sessions.initial_internal_request_timeout]
875secs = 20
876nanos = 0
877
878[sessions.protocol_breach_request_timeout]
879secs = 120
880nanos = 0
881
882[prune]
883block_interval = 5
884
885[prune.parts]
886sender_recovery = { distance = 16384 }
887transaction_lookup = 'full'
888receipts = { before = 1920000 }
889account_history = { distance = 16384 }
890storage_history = { distance = 16384 }
891[prune.parts.receipts_log_filter]
892'0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48' = { before = 17000000 }
893'0xdac17f958d2ee523a2206206994597c13d831ec7' = { distance = 1000 }
894#";
895        let _conf: Config = toml::from_str(alpha_0_0_8).unwrap();
896
897        let alpha_0_0_11 = r"#
898[prune.segments]
899sender_recovery = { distance = 16384 }
900transaction_lookup = 'full'
901receipts = { before = 1920000 }
902account_history = { distance = 16384 }
903storage_history = { distance = 16384 }
904[prune.segments.receipts_log_filter]
905'0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48' = { before = 17000000 }
906'0xdac17f958d2ee523a2206206994597c13d831ec7' = { distance = 1000 }
907#";
908        let _conf: Config = toml::from_str(alpha_0_0_11).unwrap();
909
910        let alpha_0_0_18 = r"#
911[stages.headers]
912downloader_max_concurrent_requests = 100
913downloader_min_concurrent_requests = 5
914downloader_max_buffered_responses = 100
915downloader_request_limit = 1000
916commit_threshold = 10000
917
918[stages.total_difficulty]
919commit_threshold = 100000
920
921[stages.bodies]
922downloader_request_limit = 200
923downloader_stream_batch_size = 1000
924downloader_max_buffered_blocks_size_bytes = 2147483648
925downloader_min_concurrent_requests = 5
926downloader_max_concurrent_requests = 100
927
928[stages.sender_recovery]
929commit_threshold = 5000000
930
931[stages.execution]
932max_blocks = 500000
933max_changes = 5000000
934max_cumulative_gas = 1500000000000
935[stages.execution.max_duration]
936secs = 600
937nanos = 0
938
939[stages.account_hashing]
940clean_threshold = 500000
941commit_threshold = 100000
942
943[stages.storage_hashing]
944clean_threshold = 500000
945commit_threshold = 100000
946
947[stages.merkle]
948clean_threshold = 50000
949
950[stages.transaction_lookup]
951commit_threshold = 5000000
952
953[stages.index_account_history]
954commit_threshold = 100000
955
956[stages.index_storage_history]
957commit_threshold = 100000
958
959[peers]
960refill_slots_interval = '5s'
961trusted_nodes = []
962connect_trusted_nodes_only = false
963max_backoff_count = 5
964ban_duration = '12h'
965
966[peers.connection_info]
967max_outbound = 100
968max_inbound = 30
969max_concurrent_outbound_dials = 10
970
971[peers.reputation_weights]
972bad_message = -16384
973bad_block = -16384
974bad_transactions = -16384
975already_seen_transactions = 0
976timeout = -4096
977bad_protocol = -2147483648
978failed_to_connect = -25600
979dropped = -4096
980bad_announcement = -1024
981
982[peers.backoff_durations]
983low = '30s'
984medium = '3m'
985high = '15m'
986max = '1h'
987
988[sessions]
989session_command_buffer = 32
990session_event_buffer = 260
991
992[sessions.limits]
993
994[sessions.initial_internal_request_timeout]
995secs = 20
996nanos = 0
997
998[sessions.protocol_breach_request_timeout]
999secs = 120
1000nanos = 0
1001#";
1002        let conf: Config = toml::from_str(alpha_0_0_18).unwrap();
1003        assert_eq!(conf.stages.execution.max_duration, Some(Duration::from_secs(10 * 60)));
1004
1005        let alpha_0_0_19 = r"#
1006[stages.headers]
1007downloader_max_concurrent_requests = 100
1008downloader_min_concurrent_requests = 5
1009downloader_max_buffered_responses = 100
1010downloader_request_limit = 1000
1011commit_threshold = 10000
1012
1013[stages.total_difficulty]
1014commit_threshold = 100000
1015
1016[stages.bodies]
1017downloader_request_limit = 200
1018downloader_stream_batch_size = 1000
1019downloader_max_buffered_blocks_size_bytes = 2147483648
1020downloader_min_concurrent_requests = 5
1021downloader_max_concurrent_requests = 100
1022
1023[stages.sender_recovery]
1024commit_threshold = 5000000
1025
1026[stages.execution]
1027max_blocks = 500000
1028max_changes = 5000000
1029max_cumulative_gas = 1500000000000
1030max_duration = '10m'
1031
1032[stages.account_hashing]
1033clean_threshold = 500000
1034commit_threshold = 100000
1035
1036[stages.storage_hashing]
1037clean_threshold = 500000
1038commit_threshold = 100000
1039
1040[stages.merkle]
1041clean_threshold = 50000
1042
1043[stages.transaction_lookup]
1044commit_threshold = 5000000
1045
1046[stages.index_account_history]
1047commit_threshold = 100000
1048
1049[stages.index_storage_history]
1050commit_threshold = 100000
1051
1052[peers]
1053refill_slots_interval = '5s'
1054trusted_nodes = []
1055connect_trusted_nodes_only = false
1056max_backoff_count = 5
1057ban_duration = '12h'
1058
1059[peers.connection_info]
1060max_outbound = 100
1061max_inbound = 30
1062max_concurrent_outbound_dials = 10
1063
1064[peers.reputation_weights]
1065bad_message = -16384
1066bad_block = -16384
1067bad_transactions = -16384
1068already_seen_transactions = 0
1069timeout = -4096
1070bad_protocol = -2147483648
1071failed_to_connect = -25600
1072dropped = -4096
1073bad_announcement = -1024
1074
1075[peers.backoff_durations]
1076low = '30s'
1077medium = '3m'
1078high = '15m'
1079max = '1h'
1080
1081[sessions]
1082session_command_buffer = 32
1083session_event_buffer = 260
1084
1085[sessions.limits]
1086
1087[sessions.initial_internal_request_timeout]
1088secs = 20
1089nanos = 0
1090
1091[sessions.protocol_breach_request_timeout]
1092secs = 120
1093nanos = 0
1094#";
1095        let _conf: Config = toml::from_str(alpha_0_0_19).unwrap();
1096    }
1097
1098    // ensures prune config deserialization is backwards compatible
1099    #[test]
1100    fn test_backwards_compatibility_prune_full() {
1101        let s = r"#
1102[prune]
1103block_interval = 5
1104
1105[prune.segments]
1106sender_recovery = { distance = 16384 }
1107transaction_lookup = 'full'
1108receipts = { distance = 16384 }
1109#";
1110        let _conf: Config = toml::from_str(s).unwrap();
1111    }
1112
1113    #[test]
1114    fn test_prune_config_merge() {
1115        let mut config1 = PruneConfig {
1116            block_interval: 5,
1117            minimum_pruning_distance: MINIMUM_UNWIND_SAFE_DISTANCE,
1118            segments: PruneModes {
1119                sender_recovery: Some(PruneMode::Full),
1120                transaction_lookup: None,
1121                receipts: Some(PruneMode::Distance(1000)),
1122                account_history: None,
1123                storage_history: Some(PruneMode::Before(5000)),
1124                bodies_history: None,
1125                receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([(
1126                    Address::random(),
1127                    PruneMode::Full,
1128                )])),
1129            },
1130        };
1131
1132        let config2 = PruneConfig {
1133            block_interval: 10,
1134            minimum_pruning_distance: MINIMUM_UNWIND_SAFE_DISTANCE,
1135            segments: PruneModes {
1136                sender_recovery: Some(PruneMode::Distance(500)),
1137                transaction_lookup: Some(PruneMode::Full),
1138                receipts: Some(PruneMode::Full),
1139                account_history: Some(PruneMode::Distance(2000)),
1140                storage_history: Some(PruneMode::Distance(3000)),
1141                bodies_history: None,
1142                receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([
1143                    (Address::random(), PruneMode::Distance(1000)),
1144                    (Address::random(), PruneMode::Before(2000)),
1145                ])),
1146            },
1147        };
1148
1149        let original_filter = config1.segments.receipts_log_filter.clone();
1150        config1.merge(config2);
1151
1152        // Check that the configuration has been merged. Any configuration present in config1
1153        // should not be overwritten by config2
1154        assert_eq!(config1.block_interval, 10);
1155        assert_eq!(config1.segments.sender_recovery, Some(PruneMode::Full));
1156        assert_eq!(config1.segments.transaction_lookup, Some(PruneMode::Full));
1157        assert_eq!(config1.segments.receipts, Some(PruneMode::Distance(1000)));
1158        assert_eq!(config1.segments.account_history, Some(PruneMode::Distance(2000)));
1159        assert_eq!(config1.segments.storage_history, Some(PruneMode::Before(5000)));
1160        assert_eq!(config1.segments.receipts_log_filter, original_filter);
1161    }
1162
1163    #[test]
1164    fn test_conf_trust_nodes_only() {
1165        let trusted_nodes_only = r"#
1166[peers]
1167trusted_nodes_only = true
1168#";
1169        let conf: Config = toml::from_str(trusted_nodes_only).unwrap();
1170        assert!(conf.peers.trusted_nodes_only);
1171
1172        let trusted_nodes_only = r"#
1173[peers]
1174connect_trusted_nodes_only = true
1175#";
1176        let conf: Config = toml::from_str(trusted_nodes_only).unwrap();
1177        assert!(conf.peers.trusted_nodes_only);
1178    }
1179
1180    #[test]
1181    fn test_can_support_dns_in_trusted_nodes() {
1182        let reth_toml = r#"
1183    [peers]
1184    trusted_nodes = [
1185        "enode://0401e494dbd0c84c5c0f72adac5985d2f2525e08b68d448958aae218f5ac8198a80d1498e0ebec2ce38b1b18d6750f6e61a56b4614c5a6c6cf0981c39aed47dc@34.159.32.127:30303",
1186        "enode://e9675164b5e17b9d9edf0cc2bd79e6b6f487200c74d1331c220abb5b8ee80c2eefbf18213989585e9d0960683e819542e11d4eefb5f2b4019e1e49f9fd8fff18@berav2-bootnode.staketab.org:30303"
1187    ]
1188    "#;
1189
1190        let conf: Config = toml::from_str(reth_toml).unwrap();
1191        assert_eq!(conf.peers.trusted_nodes.len(), 2);
1192
1193        let expected_enodes = vec![
1194            "enode://0401e494dbd0c84c5c0f72adac5985d2f2525e08b68d448958aae218f5ac8198a80d1498e0ebec2ce38b1b18d6750f6e61a56b4614c5a6c6cf0981c39aed47dc@34.159.32.127:30303",
1195            "enode://e9675164b5e17b9d9edf0cc2bd79e6b6f487200c74d1331c220abb5b8ee80c2eefbf18213989585e9d0960683e819542e11d4eefb5f2b4019e1e49f9fd8fff18@berav2-bootnode.staketab.org:30303",
1196        ];
1197
1198        for enode in expected_enodes {
1199            let node = TrustedPeer::from_str(enode).unwrap();
1200            assert!(conf.peers.trusted_nodes.contains(&node));
1201        }
1202    }
1203}